* arm.c (arm_gen_constant): Add new heuristic for generating
[official-gcc.git] / gcc / config / arm / arm.c
blobaaa9daaeaecc072b9afab1eab5c1b2a157b056b4
1 /* Output routines for GCC for ARM.
2 Copyright (C) 1991, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
4 Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
5 and Martin Simmons (@harleqn.co.uk).
6 More major hacks by Richard Earnshaw (rearnsha@arm.com).
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it
11 under the terms of the GNU General Public License as published
12 by the Free Software Foundation; either version 2, or (at your
13 option) any later version.
15 GCC is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
17 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
18 License for more details.
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING. If not, write to
22 the Free Software Foundation, 59 Temple Place - Suite 330,
23 Boston, MA 02111-1307, USA. */
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "tm.h"
29 #include "rtl.h"
30 #include "tree.h"
31 #include "obstack.h"
32 #include "regs.h"
33 #include "hard-reg-set.h"
34 #include "real.h"
35 #include "insn-config.h"
36 #include "conditions.h"
37 #include "output.h"
38 #include "insn-attr.h"
39 #include "flags.h"
40 #include "reload.h"
41 #include "function.h"
42 #include "expr.h"
43 #include "optabs.h"
44 #include "toplev.h"
45 #include "recog.h"
46 #include "ggc.h"
47 #include "except.h"
48 #include "c-pragma.h"
49 #include "integrate.h"
50 #include "tm_p.h"
51 #include "target.h"
52 #include "target-def.h"
53 #include "debug.h"
54 #include "langhooks.h"
56 /* Forward definitions of types. */
57 typedef struct minipool_node Mnode;
58 typedef struct minipool_fixup Mfix;
60 const struct attribute_spec arm_attribute_table[];
62 /* Forward function declarations. */
63 static arm_stack_offsets *arm_get_frame_offsets (void);
64 static void arm_add_gc_roots (void);
65 static int arm_gen_constant (enum rtx_code, enum machine_mode, rtx,
66 HOST_WIDE_INT, rtx, rtx, int, int);
67 static unsigned bit_count (unsigned long);
68 static int arm_address_register_rtx_p (rtx, int);
69 static int arm_legitimate_index_p (enum machine_mode, rtx, RTX_CODE, int);
70 static int thumb_base_register_rtx_p (rtx, enum machine_mode, int);
71 inline static int thumb_index_register_rtx_p (rtx, int);
72 static int thumb_far_jump_used_p (void);
73 static bool thumb_force_lr_save (void);
74 static int const_ok_for_op (HOST_WIDE_INT, enum rtx_code);
75 static rtx emit_sfm (int, int);
76 #ifndef AOF_ASSEMBLER
77 static bool arm_assemble_integer (rtx, unsigned int, int);
78 #endif
79 static const char *fp_const_from_val (REAL_VALUE_TYPE *);
80 static arm_cc get_arm_condition_code (rtx);
81 static HOST_WIDE_INT int_log2 (HOST_WIDE_INT);
82 static rtx is_jump_table (rtx);
83 static const char *output_multi_immediate (rtx *, const char *, const char *,
84 int, HOST_WIDE_INT);
85 static const char *shift_op (rtx, HOST_WIDE_INT *);
86 static struct machine_function *arm_init_machine_status (void);
87 static void thumb_exit (FILE *, int);
88 static rtx is_jump_table (rtx);
89 static HOST_WIDE_INT get_jump_table_size (rtx);
90 static Mnode *move_minipool_fix_forward_ref (Mnode *, Mnode *, HOST_WIDE_INT);
91 static Mnode *add_minipool_forward_ref (Mfix *);
92 static Mnode *move_minipool_fix_backward_ref (Mnode *, Mnode *, HOST_WIDE_INT);
93 static Mnode *add_minipool_backward_ref (Mfix *);
94 static void assign_minipool_offsets (Mfix *);
95 static void arm_print_value (FILE *, rtx);
96 static void dump_minipool (rtx);
97 static int arm_barrier_cost (rtx);
98 static Mfix *create_fix_barrier (Mfix *, HOST_WIDE_INT);
99 static void push_minipool_barrier (rtx, HOST_WIDE_INT);
100 static void push_minipool_fix (rtx, HOST_WIDE_INT, rtx *, enum machine_mode,
101 rtx);
102 static void arm_reorg (void);
103 static bool note_invalid_constants (rtx, HOST_WIDE_INT, int);
104 static int current_file_function_operand (rtx);
105 static unsigned long arm_compute_save_reg0_reg12_mask (void);
106 static unsigned long arm_compute_save_reg_mask (void);
107 static unsigned long arm_isr_value (tree);
108 static unsigned long arm_compute_func_type (void);
109 static tree arm_handle_fndecl_attribute (tree *, tree, tree, int, bool *);
110 static tree arm_handle_isr_attribute (tree *, tree, tree, int, bool *);
111 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
112 static tree arm_handle_notshared_attribute (tree *, tree, tree, int, bool *);
113 #endif
114 static void arm_output_function_epilogue (FILE *, HOST_WIDE_INT);
115 static void arm_output_function_prologue (FILE *, HOST_WIDE_INT);
116 static void thumb_output_function_prologue (FILE *, HOST_WIDE_INT);
117 static int arm_comp_type_attributes (tree, tree);
118 static void arm_set_default_type_attributes (tree);
119 static int arm_adjust_cost (rtx, rtx, rtx, int);
120 static int count_insns_for_constant (HOST_WIDE_INT, int);
121 static int arm_get_strip_length (int);
122 static bool arm_function_ok_for_sibcall (tree, tree);
123 static void arm_internal_label (FILE *, const char *, unsigned long);
124 static void arm_output_mi_thunk (FILE *, tree, HOST_WIDE_INT, HOST_WIDE_INT,
125 tree);
126 static int arm_rtx_costs_1 (rtx, enum rtx_code, enum rtx_code);
127 static bool arm_size_rtx_costs (rtx, int, int, int *);
128 static bool arm_slowmul_rtx_costs (rtx, int, int, int *);
129 static bool arm_fastmul_rtx_costs (rtx, int, int, int *);
130 static bool arm_xscale_rtx_costs (rtx, int, int, int *);
131 static bool arm_9e_rtx_costs (rtx, int, int, int *);
132 static int arm_address_cost (rtx);
133 static bool arm_memory_load_p (rtx);
134 static bool arm_cirrus_insn_p (rtx);
135 static void cirrus_reorg (rtx);
136 static void arm_init_builtins (void);
137 static rtx arm_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
138 static void arm_init_iwmmxt_builtins (void);
139 static rtx safe_vector_operand (rtx, enum machine_mode);
140 static rtx arm_expand_binop_builtin (enum insn_code, tree, rtx);
141 static rtx arm_expand_unop_builtin (enum insn_code, tree, rtx, int);
142 static rtx arm_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
143 static void emit_constant_insn (rtx cond, rtx pattern);
144 static int arm_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode,
145 tree, bool);
147 #ifdef OBJECT_FORMAT_ELF
148 static void arm_elf_asm_constructor (rtx, int);
149 #endif
150 #ifndef ARM_PE
151 static void arm_encode_section_info (tree, rtx, int);
152 #endif
154 static void arm_file_end (void);
156 #ifdef AOF_ASSEMBLER
157 static void aof_globalize_label (FILE *, const char *);
158 static void aof_dump_imports (FILE *);
159 static void aof_dump_pic_table (FILE *);
160 static void aof_file_start (void);
161 static void aof_file_end (void);
162 #endif
163 static rtx arm_struct_value_rtx (tree, int);
164 static void arm_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
165 tree, int *, int);
166 static bool arm_pass_by_reference (CUMULATIVE_ARGS *,
167 enum machine_mode, tree, bool);
168 static bool arm_promote_prototypes (tree);
169 static bool arm_default_short_enums (void);
170 static bool arm_align_anon_bitfield (void);
171 static bool arm_return_in_msb (tree);
172 static bool arm_must_pass_in_stack (enum machine_mode, tree);
174 static tree arm_cxx_guard_type (void);
175 static bool arm_cxx_guard_mask_bit (void);
176 static tree arm_get_cookie_size (tree);
177 static bool arm_cookie_has_size (void);
178 static bool arm_cxx_cdtor_returns_this (void);
179 static bool arm_cxx_key_method_may_be_inline (void);
180 static void arm_cxx_determine_class_data_visibility (tree);
181 static bool arm_cxx_class_data_always_comdat (void);
182 static bool arm_cxx_use_aeabi_atexit (void);
183 static void arm_init_libfuncs (void);
184 static bool arm_handle_option (size_t, const char *, int);
185 static unsigned HOST_WIDE_INT arm_shift_truncation_mask (enum machine_mode);
187 /* Initialize the GCC target structure. */
188 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
189 #undef TARGET_MERGE_DECL_ATTRIBUTES
190 #define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
191 #endif
193 #undef TARGET_ATTRIBUTE_TABLE
194 #define TARGET_ATTRIBUTE_TABLE arm_attribute_table
196 #undef TARGET_ASM_FILE_END
197 #define TARGET_ASM_FILE_END arm_file_end
199 #ifdef AOF_ASSEMBLER
200 #undef TARGET_ASM_BYTE_OP
201 #define TARGET_ASM_BYTE_OP "\tDCB\t"
202 #undef TARGET_ASM_ALIGNED_HI_OP
203 #define TARGET_ASM_ALIGNED_HI_OP "\tDCW\t"
204 #undef TARGET_ASM_ALIGNED_SI_OP
205 #define TARGET_ASM_ALIGNED_SI_OP "\tDCD\t"
206 #undef TARGET_ASM_GLOBALIZE_LABEL
207 #define TARGET_ASM_GLOBALIZE_LABEL aof_globalize_label
208 #undef TARGET_ASM_FILE_START
209 #define TARGET_ASM_FILE_START aof_file_start
210 #undef TARGET_ASM_FILE_END
211 #define TARGET_ASM_FILE_END aof_file_end
212 #else
213 #undef TARGET_ASM_ALIGNED_SI_OP
214 #define TARGET_ASM_ALIGNED_SI_OP NULL
215 #undef TARGET_ASM_INTEGER
216 #define TARGET_ASM_INTEGER arm_assemble_integer
217 #endif
219 #undef TARGET_ASM_FUNCTION_PROLOGUE
220 #define TARGET_ASM_FUNCTION_PROLOGUE arm_output_function_prologue
222 #undef TARGET_ASM_FUNCTION_EPILOGUE
223 #define TARGET_ASM_FUNCTION_EPILOGUE arm_output_function_epilogue
225 #undef TARGET_DEFAULT_TARGET_FLAGS
226 #define TARGET_DEFAULT_TARGET_FLAGS (TARGET_DEFAULT | MASK_SCHED_PROLOG)
227 #undef TARGET_HANDLE_OPTION
228 #define TARGET_HANDLE_OPTION arm_handle_option
230 #undef TARGET_COMP_TYPE_ATTRIBUTES
231 #define TARGET_COMP_TYPE_ATTRIBUTES arm_comp_type_attributes
233 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
234 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES arm_set_default_type_attributes
236 #undef TARGET_SCHED_ADJUST_COST
237 #define TARGET_SCHED_ADJUST_COST arm_adjust_cost
239 #undef TARGET_ENCODE_SECTION_INFO
240 #ifdef ARM_PE
241 #define TARGET_ENCODE_SECTION_INFO arm_pe_encode_section_info
242 #else
243 #define TARGET_ENCODE_SECTION_INFO arm_encode_section_info
244 #endif
246 #undef TARGET_STRIP_NAME_ENCODING
247 #define TARGET_STRIP_NAME_ENCODING arm_strip_name_encoding
249 #undef TARGET_ASM_INTERNAL_LABEL
250 #define TARGET_ASM_INTERNAL_LABEL arm_internal_label
252 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
253 #define TARGET_FUNCTION_OK_FOR_SIBCALL arm_function_ok_for_sibcall
255 #undef TARGET_ASM_OUTPUT_MI_THUNK
256 #define TARGET_ASM_OUTPUT_MI_THUNK arm_output_mi_thunk
257 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
258 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
260 /* This will be overridden in arm_override_options. */
261 #undef TARGET_RTX_COSTS
262 #define TARGET_RTX_COSTS arm_slowmul_rtx_costs
263 #undef TARGET_ADDRESS_COST
264 #define TARGET_ADDRESS_COST arm_address_cost
266 #undef TARGET_SHIFT_TRUNCATION_MASK
267 #define TARGET_SHIFT_TRUNCATION_MASK arm_shift_truncation_mask
268 #undef TARGET_VECTOR_MODE_SUPPORTED_P
269 #define TARGET_VECTOR_MODE_SUPPORTED_P arm_vector_mode_supported_p
271 #undef TARGET_MACHINE_DEPENDENT_REORG
272 #define TARGET_MACHINE_DEPENDENT_REORG arm_reorg
274 #undef TARGET_INIT_BUILTINS
275 #define TARGET_INIT_BUILTINS arm_init_builtins
276 #undef TARGET_EXPAND_BUILTIN
277 #define TARGET_EXPAND_BUILTIN arm_expand_builtin
279 #undef TARGET_INIT_LIBFUNCS
280 #define TARGET_INIT_LIBFUNCS arm_init_libfuncs
282 #undef TARGET_PROMOTE_FUNCTION_ARGS
283 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
284 #undef TARGET_PROMOTE_FUNCTION_RETURN
285 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
286 #undef TARGET_PROMOTE_PROTOTYPES
287 #define TARGET_PROMOTE_PROTOTYPES arm_promote_prototypes
288 #undef TARGET_PASS_BY_REFERENCE
289 #define TARGET_PASS_BY_REFERENCE arm_pass_by_reference
290 #undef TARGET_ARG_PARTIAL_BYTES
291 #define TARGET_ARG_PARTIAL_BYTES arm_arg_partial_bytes
293 #undef TARGET_STRUCT_VALUE_RTX
294 #define TARGET_STRUCT_VALUE_RTX arm_struct_value_rtx
296 #undef TARGET_SETUP_INCOMING_VARARGS
297 #define TARGET_SETUP_INCOMING_VARARGS arm_setup_incoming_varargs
299 #undef TARGET_DEFAULT_SHORT_ENUMS
300 #define TARGET_DEFAULT_SHORT_ENUMS arm_default_short_enums
302 #undef TARGET_ALIGN_ANON_BITFIELD
303 #define TARGET_ALIGN_ANON_BITFIELD arm_align_anon_bitfield
305 #undef TARGET_CXX_GUARD_TYPE
306 #define TARGET_CXX_GUARD_TYPE arm_cxx_guard_type
308 #undef TARGET_CXX_GUARD_MASK_BIT
309 #define TARGET_CXX_GUARD_MASK_BIT arm_cxx_guard_mask_bit
311 #undef TARGET_CXX_GET_COOKIE_SIZE
312 #define TARGET_CXX_GET_COOKIE_SIZE arm_get_cookie_size
314 #undef TARGET_CXX_COOKIE_HAS_SIZE
315 #define TARGET_CXX_COOKIE_HAS_SIZE arm_cookie_has_size
317 #undef TARGET_CXX_CDTOR_RETURNS_THIS
318 #define TARGET_CXX_CDTOR_RETURNS_THIS arm_cxx_cdtor_returns_this
320 #undef TARGET_CXX_KEY_METHOD_MAY_BE_INLINE
321 #define TARGET_CXX_KEY_METHOD_MAY_BE_INLINE arm_cxx_key_method_may_be_inline
323 #undef TARGET_CXX_USE_AEABI_ATEXIT
324 #define TARGET_CXX_USE_AEABI_ATEXIT arm_cxx_use_aeabi_atexit
326 #undef TARGET_CXX_DETERMINE_CLASS_DATA_VISIBILITY
327 #define TARGET_CXX_DETERMINE_CLASS_DATA_VISIBILITY \
328 arm_cxx_determine_class_data_visibility
330 #undef TARGET_CXX_CLASS_DATA_ALWAYS_COMDAT
331 #define TARGET_CXX_CLASS_DATA_ALWAYS_COMDAT arm_cxx_class_data_always_comdat
333 #undef TARGET_RETURN_IN_MSB
334 #define TARGET_RETURN_IN_MSB arm_return_in_msb
336 #undef TARGET_MUST_PASS_IN_STACK
337 #define TARGET_MUST_PASS_IN_STACK arm_must_pass_in_stack
339 struct gcc_target targetm = TARGET_INITIALIZER;
341 /* Obstack for minipool constant handling. */
342 static struct obstack minipool_obstack;
343 static char * minipool_startobj;
345 /* The maximum number of insns skipped which
346 will be conditionalised if possible. */
347 static int max_insns_skipped = 5;
349 extern FILE * asm_out_file;
351 /* True if we are currently building a constant table. */
352 int making_const_table;
354 /* Define the information needed to generate branch insns. This is
355 stored from the compare operation. */
356 rtx arm_compare_op0, arm_compare_op1;
358 /* The processor for which instructions should be scheduled. */
359 enum processor_type arm_tune = arm_none;
361 /* Which floating point model to use. */
362 enum arm_fp_model arm_fp_model;
364 /* Which floating point hardware is available. */
365 enum fputype arm_fpu_arch;
367 /* Which floating point hardware to schedule for. */
368 enum fputype arm_fpu_tune;
370 /* Whether to use floating point hardware. */
371 enum float_abi_type arm_float_abi;
373 /* Which ABI to use. */
374 enum arm_abi_type arm_abi;
376 /* Set by the -mfpu=... option. */
377 static const char * target_fpu_name = NULL;
379 /* Set by the -mfpe=... option. */
380 static const char * target_fpe_name = NULL;
382 /* Set by the -mfloat-abi=... option. */
383 static const char * target_float_abi_name = NULL;
385 /* Set by the -mabi=... option. */
386 static const char * target_abi_name = NULL;
388 /* Used to parse -mstructure_size_boundary command line option. */
389 static const char * structure_size_string = NULL;
390 int arm_structure_size_boundary = DEFAULT_STRUCTURE_SIZE_BOUNDARY;
392 /* Used for Thumb call_via trampolines. */
393 rtx thumb_call_via_label[14];
394 static int thumb_call_reg_needed;
396 /* Bit values used to identify processor capabilities. */
397 #define FL_CO_PROC (1 << 0) /* Has external co-processor bus */
398 #define FL_ARCH3M (1 << 1) /* Extended multiply */
399 #define FL_MODE26 (1 << 2) /* 26-bit mode support */
400 #define FL_MODE32 (1 << 3) /* 32-bit mode support */
401 #define FL_ARCH4 (1 << 4) /* Architecture rel 4 */
402 #define FL_ARCH5 (1 << 5) /* Architecture rel 5 */
403 #define FL_THUMB (1 << 6) /* Thumb aware */
404 #define FL_LDSCHED (1 << 7) /* Load scheduling necessary */
405 #define FL_STRONG (1 << 8) /* StrongARM */
406 #define FL_ARCH5E (1 << 9) /* DSP extensions to v5 */
407 #define FL_XSCALE (1 << 10) /* XScale */
408 #define FL_CIRRUS (1 << 11) /* Cirrus/DSP. */
409 #define FL_ARCH6 (1 << 12) /* Architecture rel 6. Adds
410 media instructions. */
411 #define FL_VFPV2 (1 << 13) /* Vector Floating Point V2. */
412 #define FL_WBUF (1 << 14) /* Schedule for write buffer ops.
413 Note: ARM6 & 7 derivatives only. */
415 #define FL_IWMMXT (1 << 29) /* XScale v2 or "Intel Wireless MMX technology". */
417 #define FL_FOR_ARCH2 0
418 #define FL_FOR_ARCH3 FL_MODE32
419 #define FL_FOR_ARCH3M (FL_FOR_ARCH3 | FL_ARCH3M)
420 #define FL_FOR_ARCH4 (FL_FOR_ARCH3M | FL_ARCH4)
421 #define FL_FOR_ARCH4T (FL_FOR_ARCH4 | FL_THUMB)
422 #define FL_FOR_ARCH5 (FL_FOR_ARCH4 | FL_ARCH5)
423 #define FL_FOR_ARCH5T (FL_FOR_ARCH5 | FL_THUMB)
424 #define FL_FOR_ARCH5E (FL_FOR_ARCH5 | FL_ARCH5E)
425 #define FL_FOR_ARCH5TE (FL_FOR_ARCH5E | FL_THUMB)
426 #define FL_FOR_ARCH5TEJ FL_FOR_ARCH5TE
427 #define FL_FOR_ARCH6 (FL_FOR_ARCH5TE | FL_ARCH6)
428 #define FL_FOR_ARCH6J FL_FOR_ARCH6
429 #define FL_FOR_ARCH6K FL_FOR_ARCH6
430 #define FL_FOR_ARCH6Z FL_FOR_ARCH6
431 #define FL_FOR_ARCH6ZK FL_FOR_ARCH6
433 /* The bits in this mask specify which
434 instructions we are allowed to generate. */
435 static unsigned long insn_flags = 0;
437 /* The bits in this mask specify which instruction scheduling options should
438 be used. */
439 static unsigned long tune_flags = 0;
441 /* The following are used in the arm.md file as equivalents to bits
442 in the above two flag variables. */
444 /* Nonzero if this chip supports the ARM Architecture 3M extensions. */
445 int arm_arch3m = 0;
447 /* Nonzero if this chip supports the ARM Architecture 4 extensions. */
448 int arm_arch4 = 0;
450 /* Nonzero if this chip supports the ARM Architecture 4t extensions. */
451 int arm_arch4t = 0;
453 /* Nonzero if this chip supports the ARM Architecture 5 extensions. */
454 int arm_arch5 = 0;
456 /* Nonzero if this chip supports the ARM Architecture 5E extensions. */
457 int arm_arch5e = 0;
459 /* Nonzero if this chip supports the ARM Architecture 6 extensions. */
460 int arm_arch6 = 0;
462 /* Nonzero if this chip can benefit from load scheduling. */
463 int arm_ld_sched = 0;
465 /* Nonzero if this chip is a StrongARM. */
466 int arm_tune_strongarm = 0;
468 /* Nonzero if this chip is a Cirrus variant. */
469 int arm_arch_cirrus = 0;
471 /* Nonzero if this chip supports Intel Wireless MMX technology. */
472 int arm_arch_iwmmxt = 0;
474 /* Nonzero if this chip is an XScale. */
475 int arm_arch_xscale = 0;
477 /* Nonzero if tuning for XScale */
478 int arm_tune_xscale = 0;
480 /* Nonzero if we want to tune for stores that access the write-buffer.
481 This typically means an ARM6 or ARM7 with MMU or MPU. */
482 int arm_tune_wbuf = 0;
484 /* Nonzero if generating Thumb instructions. */
485 int thumb_code = 0;
487 /* Nonzero if we should define __THUMB_INTERWORK__ in the
488 preprocessor.
489 XXX This is a bit of a hack, it's intended to help work around
490 problems in GLD which doesn't understand that armv5t code is
491 interworking clean. */
492 int arm_cpp_interwork = 0;
494 /* In case of a PRE_INC, POST_INC, PRE_DEC, POST_DEC memory reference, we
495 must report the mode of the memory reference from PRINT_OPERAND to
496 PRINT_OPERAND_ADDRESS. */
497 enum machine_mode output_memory_reference_mode;
499 /* The register number to be used for the PIC offset register. */
500 static const char * arm_pic_register_string = NULL;
501 int arm_pic_register = INVALID_REGNUM;
503 /* Set to 1 when a return insn is output, this means that the epilogue
504 is not needed. */
505 int return_used_this_function;
507 /* Set to 1 after arm_reorg has started. Reset to start at the start of
508 the next function. */
509 static int after_arm_reorg = 0;
511 /* The maximum number of insns to be used when loading a constant. */
512 static int arm_constant_limit = 3;
514 /* For an explanation of these variables, see final_prescan_insn below. */
515 int arm_ccfsm_state;
516 enum arm_cond_code arm_current_cc;
517 rtx arm_target_insn;
518 int arm_target_label;
520 /* The condition codes of the ARM, and the inverse function. */
521 static const char * const arm_condition_codes[] =
523 "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc",
524 "hi", "ls", "ge", "lt", "gt", "le", "al", "nv"
527 #define streq(string1, string2) (strcmp (string1, string2) == 0)
529 /* Initialization code. */
531 struct processors
533 const char *const name;
534 enum processor_type core;
535 const char *arch;
536 const unsigned long flags;
537 bool (* rtx_costs) (rtx, int, int, int *);
540 /* Not all of these give usefully different compilation alternatives,
541 but there is no simple way of generalizing them. */
542 static const struct processors all_cores[] =
544 /* ARM Cores */
545 #define ARM_CORE(NAME, IDENT, ARCH, FLAGS, COSTS) \
546 {NAME, arm_none, #ARCH, FLAGS | FL_FOR_ARCH##ARCH, arm_##COSTS##_rtx_costs},
547 #include "arm-cores.def"
548 #undef ARM_CORE
549 {NULL, arm_none, NULL, 0, NULL}
552 static const struct processors all_architectures[] =
554 /* ARM Architectures */
555 /* We don't specify rtx_costs here as it will be figured out
556 from the core. */
558 {"armv2", arm2, "2", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH2, NULL},
559 {"armv2a", arm2, "2", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH2, NULL},
560 {"armv3", arm6, "3", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH3, NULL},
561 {"armv3m", arm7m, "3M", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH3M, NULL},
562 {"armv4", arm7tdmi, "4", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH4, NULL},
563 /* Strictly, FL_MODE26 is a permitted option for v4t, but there are no
564 implementations that support it, so we will leave it out for now. */
565 {"armv4t", arm7tdmi, "4T", FL_CO_PROC | FL_FOR_ARCH4T, NULL},
566 {"armv5", arm10tdmi, "5", FL_CO_PROC | FL_FOR_ARCH5, NULL},
567 {"armv5t", arm10tdmi, "5T", FL_CO_PROC | FL_FOR_ARCH5T, NULL},
568 {"armv5e", arm1026ejs, "5E", FL_CO_PROC | FL_FOR_ARCH5E, NULL},
569 {"armv5te", arm1026ejs, "5TE", FL_CO_PROC | FL_FOR_ARCH5TE, NULL},
570 {"armv6", arm1136js, "6", FL_CO_PROC | FL_FOR_ARCH6, NULL},
571 {"armv6j", arm1136js, "6J", FL_CO_PROC | FL_FOR_ARCH6J, NULL},
572 {"armv6k", mpcore, "6K", FL_CO_PROC | FL_FOR_ARCH6K, NULL},
573 {"armv6z", arm1176jzs, "6Z", FL_CO_PROC | FL_FOR_ARCH6Z, NULL},
574 {"armv6zk", arm1176jzs, "6ZK", FL_CO_PROC | FL_FOR_ARCH6ZK, NULL},
575 {"ep9312", ep9312, "4T", FL_LDSCHED | FL_CIRRUS | FL_FOR_ARCH4, NULL},
576 {"iwmmxt", iwmmxt, "5TE", FL_LDSCHED | FL_STRONG | FL_FOR_ARCH5TE | FL_XSCALE | FL_IWMMXT , NULL},
577 {NULL, arm_none, NULL, 0 , NULL}
580 struct arm_cpu_select
582 const char * string;
583 const char * name;
584 const struct processors * processors;
587 /* This is a magic structure. The 'string' field is magically filled in
588 with a pointer to the value specified by the user on the command line
589 assuming that the user has specified such a value. */
591 static struct arm_cpu_select arm_select[] =
593 /* string name processors */
594 { NULL, "-mcpu=", all_cores },
595 { NULL, "-march=", all_architectures },
596 { NULL, "-mtune=", all_cores }
599 /* Defines representing the indexes into the above table. */
600 #define ARM_OPT_SET_CPU 0
601 #define ARM_OPT_SET_ARCH 1
602 #define ARM_OPT_SET_TUNE 2
604 /* The name of the proprocessor macro to define for this architecture. */
606 char arm_arch_name[] = "__ARM_ARCH_0UNK__";
608 struct fpu_desc
610 const char * name;
611 enum fputype fpu;
615 /* Available values for for -mfpu=. */
617 static const struct fpu_desc all_fpus[] =
619 {"fpa", FPUTYPE_FPA},
620 {"fpe2", FPUTYPE_FPA_EMU2},
621 {"fpe3", FPUTYPE_FPA_EMU2},
622 {"maverick", FPUTYPE_MAVERICK},
623 {"vfp", FPUTYPE_VFP}
627 /* Floating point models used by the different hardware.
628 See fputype in arm.h. */
630 static const enum fputype fp_model_for_fpu[] =
632 /* No FP hardware. */
633 ARM_FP_MODEL_UNKNOWN, /* FPUTYPE_NONE */
634 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA */
635 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA_EMU2 */
636 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA_EMU3 */
637 ARM_FP_MODEL_MAVERICK, /* FPUTYPE_MAVERICK */
638 ARM_FP_MODEL_VFP /* FPUTYPE_VFP */
642 struct float_abi
644 const char * name;
645 enum float_abi_type abi_type;
649 /* Available values for -mfloat-abi=. */
651 static const struct float_abi all_float_abis[] =
653 {"soft", ARM_FLOAT_ABI_SOFT},
654 {"softfp", ARM_FLOAT_ABI_SOFTFP},
655 {"hard", ARM_FLOAT_ABI_HARD}
659 struct abi_name
661 const char *name;
662 enum arm_abi_type abi_type;
666 /* Available values for -mabi=. */
668 static const struct abi_name arm_all_abis[] =
670 {"apcs-gnu", ARM_ABI_APCS},
671 {"atpcs", ARM_ABI_ATPCS},
672 {"aapcs", ARM_ABI_AAPCS},
673 {"iwmmxt", ARM_ABI_IWMMXT}
676 /* Return the number of bits set in VALUE. */
677 static unsigned
678 bit_count (unsigned long value)
680 unsigned long count = 0;
682 while (value)
684 count++;
685 value &= value - 1; /* Clear the least-significant set bit. */
688 return count;
691 /* Set up library functions unique to ARM. */
693 static void
694 arm_init_libfuncs (void)
696 /* There are no special library functions unless we are using the
697 ARM BPABI. */
698 if (!TARGET_BPABI)
699 return;
701 /* The functions below are described in Section 4 of the "Run-Time
702 ABI for the ARM architecture", Version 1.0. */
704 /* Double-precision floating-point arithmetic. Table 2. */
705 set_optab_libfunc (add_optab, DFmode, "__aeabi_dadd");
706 set_optab_libfunc (sdiv_optab, DFmode, "__aeabi_ddiv");
707 set_optab_libfunc (smul_optab, DFmode, "__aeabi_dmul");
708 set_optab_libfunc (neg_optab, DFmode, "__aeabi_dneg");
709 set_optab_libfunc (sub_optab, DFmode, "__aeabi_dsub");
711 /* Double-precision comparisons. Table 3. */
712 set_optab_libfunc (eq_optab, DFmode, "__aeabi_dcmpeq");
713 set_optab_libfunc (ne_optab, DFmode, NULL);
714 set_optab_libfunc (lt_optab, DFmode, "__aeabi_dcmplt");
715 set_optab_libfunc (le_optab, DFmode, "__aeabi_dcmple");
716 set_optab_libfunc (ge_optab, DFmode, "__aeabi_dcmpge");
717 set_optab_libfunc (gt_optab, DFmode, "__aeabi_dcmpgt");
718 set_optab_libfunc (unord_optab, DFmode, "__aeabi_dcmpun");
720 /* Single-precision floating-point arithmetic. Table 4. */
721 set_optab_libfunc (add_optab, SFmode, "__aeabi_fadd");
722 set_optab_libfunc (sdiv_optab, SFmode, "__aeabi_fdiv");
723 set_optab_libfunc (smul_optab, SFmode, "__aeabi_fmul");
724 set_optab_libfunc (neg_optab, SFmode, "__aeabi_fneg");
725 set_optab_libfunc (sub_optab, SFmode, "__aeabi_fsub");
727 /* Single-precision comparisons. Table 5. */
728 set_optab_libfunc (eq_optab, SFmode, "__aeabi_fcmpeq");
729 set_optab_libfunc (ne_optab, SFmode, NULL);
730 set_optab_libfunc (lt_optab, SFmode, "__aeabi_fcmplt");
731 set_optab_libfunc (le_optab, SFmode, "__aeabi_fcmple");
732 set_optab_libfunc (ge_optab, SFmode, "__aeabi_fcmpge");
733 set_optab_libfunc (gt_optab, SFmode, "__aeabi_fcmpgt");
734 set_optab_libfunc (unord_optab, SFmode, "__aeabi_fcmpun");
736 /* Floating-point to integer conversions. Table 6. */
737 set_conv_libfunc (sfix_optab, SImode, DFmode, "__aeabi_d2iz");
738 set_conv_libfunc (ufix_optab, SImode, DFmode, "__aeabi_d2uiz");
739 set_conv_libfunc (sfix_optab, DImode, DFmode, "__aeabi_d2lz");
740 set_conv_libfunc (ufix_optab, DImode, DFmode, "__aeabi_d2ulz");
741 set_conv_libfunc (sfix_optab, SImode, SFmode, "__aeabi_f2iz");
742 set_conv_libfunc (ufix_optab, SImode, SFmode, "__aeabi_f2uiz");
743 set_conv_libfunc (sfix_optab, DImode, SFmode, "__aeabi_f2lz");
744 set_conv_libfunc (ufix_optab, DImode, SFmode, "__aeabi_f2ulz");
746 /* Conversions between floating types. Table 7. */
747 set_conv_libfunc (trunc_optab, SFmode, DFmode, "__aeabi_d2f");
748 set_conv_libfunc (sext_optab, DFmode, SFmode, "__aeabi_f2d");
750 /* Integer to floating-point conversions. Table 8. */
751 set_conv_libfunc (sfloat_optab, DFmode, SImode, "__aeabi_i2d");
752 set_conv_libfunc (ufloat_optab, DFmode, SImode, "__aeabi_ui2d");
753 set_conv_libfunc (sfloat_optab, DFmode, DImode, "__aeabi_l2d");
754 set_conv_libfunc (ufloat_optab, DFmode, DImode, "__aeabi_ul2d");
755 set_conv_libfunc (sfloat_optab, SFmode, SImode, "__aeabi_i2f");
756 set_conv_libfunc (ufloat_optab, SFmode, SImode, "__aeabi_ui2f");
757 set_conv_libfunc (sfloat_optab, SFmode, DImode, "__aeabi_l2f");
758 set_conv_libfunc (ufloat_optab, SFmode, DImode, "__aeabi_ul2f");
760 /* Long long. Table 9. */
761 set_optab_libfunc (smul_optab, DImode, "__aeabi_lmul");
762 set_optab_libfunc (sdivmod_optab, DImode, "__aeabi_ldivmod");
763 set_optab_libfunc (udivmod_optab, DImode, "__aeabi_uldivmod");
764 set_optab_libfunc (ashl_optab, DImode, "__aeabi_llsl");
765 set_optab_libfunc (lshr_optab, DImode, "__aeabi_llsr");
766 set_optab_libfunc (ashr_optab, DImode, "__aeabi_lasr");
767 set_optab_libfunc (cmp_optab, DImode, "__aeabi_lcmp");
768 set_optab_libfunc (ucmp_optab, DImode, "__aeabi_ulcmp");
770 /* Integer (32/32->32) division. \S 4.3.1. */
771 set_optab_libfunc (sdivmod_optab, SImode, "__aeabi_idivmod");
772 set_optab_libfunc (udivmod_optab, SImode, "__aeabi_uidivmod");
774 /* The divmod functions are designed so that they can be used for
775 plain division, even though they return both the quotient and the
776 remainder. The quotient is returned in the usual location (i.e.,
777 r0 for SImode, {r0, r1} for DImode), just as would be expected
778 for an ordinary division routine. Because the AAPCS calling
779 conventions specify that all of { r0, r1, r2, r3 } are
780 callee-saved registers, there is no need to tell the compiler
781 explicitly that those registers are clobbered by these
782 routines. */
783 set_optab_libfunc (sdiv_optab, DImode, "__aeabi_ldivmod");
784 set_optab_libfunc (udiv_optab, DImode, "__aeabi_uldivmod");
785 set_optab_libfunc (sdiv_optab, SImode, "__aeabi_idivmod");
786 set_optab_libfunc (udiv_optab, SImode, "__aeabi_uidivmod");
788 /* We don't have mod libcalls. Fortunately gcc knows how to use the
789 divmod libcalls instead. */
790 set_optab_libfunc (smod_optab, DImode, NULL);
791 set_optab_libfunc (umod_optab, DImode, NULL);
792 set_optab_libfunc (smod_optab, SImode, NULL);
793 set_optab_libfunc (umod_optab, SImode, NULL);
796 /* Implement TARGET_HANDLE_OPTION. */
798 static bool
799 arm_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
801 switch (code)
803 case OPT_mabi_:
804 target_abi_name = arg;
805 return true;
807 case OPT_march_:
808 arm_select[1].string = arg;
809 return true;
811 case OPT_mcpu_:
812 arm_select[0].string = arg;
813 return true;
815 case OPT_mfloat_abi_:
816 target_float_abi_name = arg;
817 return true;
819 case OPT_mfp_:
820 case OPT_mfpe_:
821 target_fpe_name = arg;
822 return true;
824 case OPT_mfpu_:
825 target_fpu_name = arg;
826 return true;
828 case OPT_mhard_float:
829 target_float_abi_name = "hard";
830 return true;
832 case OPT_mpic_register_:
833 arm_pic_register_string = arg;
834 return true;
836 case OPT_msoft_float:
837 target_float_abi_name = "soft";
838 return true;
840 case OPT_mstructure_size_boundary_:
841 structure_size_string = arg;
842 return true;
844 case OPT_mtune_:
845 arm_select[2].string = arg;
846 return true;
848 default:
849 return true;
853 /* Fix up any incompatible options that the user has specified.
854 This has now turned into a maze. */
855 void
856 arm_override_options (void)
858 unsigned i;
859 enum processor_type target_arch_cpu = arm_none;
861 /* Set up the flags based on the cpu/architecture selected by the user. */
862 for (i = ARRAY_SIZE (arm_select); i--;)
864 struct arm_cpu_select * ptr = arm_select + i;
866 if (ptr->string != NULL && ptr->string[0] != '\0')
868 const struct processors * sel;
870 for (sel = ptr->processors; sel->name != NULL; sel++)
871 if (streq (ptr->string, sel->name))
873 /* Set the architecture define. */
874 if (i != ARM_OPT_SET_TUNE)
875 sprintf (arm_arch_name, "__ARM_ARCH_%s__", sel->arch);
877 /* Determine the processor core for which we should
878 tune code-generation. */
879 if (/* -mcpu= is a sensible default. */
880 i == ARM_OPT_SET_CPU
881 /* -mtune= overrides -mcpu= and -march=. */
882 || i == ARM_OPT_SET_TUNE)
883 arm_tune = (enum processor_type) (sel - ptr->processors);
885 /* Remember the CPU associated with this architecture.
886 If no other option is used to set the CPU type,
887 we'll use this to guess the most suitable tuning
888 options. */
889 if (i == ARM_OPT_SET_ARCH)
890 target_arch_cpu = sel->core;
892 if (i != ARM_OPT_SET_TUNE)
894 /* If we have been given an architecture and a processor
895 make sure that they are compatible. We only generate
896 a warning though, and we prefer the CPU over the
897 architecture. */
898 if (insn_flags != 0 && (insn_flags ^ sel->flags))
899 warning (0, "switch -mcpu=%s conflicts with -march= switch",
900 ptr->string);
902 insn_flags = sel->flags;
905 break;
908 if (sel->name == NULL)
909 error ("bad value (%s) for %s switch", ptr->string, ptr->name);
913 /* Guess the tuning options from the architecture if necessary. */
914 if (arm_tune == arm_none)
915 arm_tune = target_arch_cpu;
917 /* If the user did not specify a processor, choose one for them. */
918 if (insn_flags == 0)
920 const struct processors * sel;
921 unsigned int sought;
922 enum processor_type cpu;
924 cpu = TARGET_CPU_DEFAULT;
925 if (cpu == arm_none)
927 #ifdef SUBTARGET_CPU_DEFAULT
928 /* Use the subtarget default CPU if none was specified by
929 configure. */
930 cpu = SUBTARGET_CPU_DEFAULT;
931 #endif
932 /* Default to ARM6. */
933 if (cpu == arm_none)
934 cpu = arm6;
936 sel = &all_cores[cpu];
938 insn_flags = sel->flags;
940 /* Now check to see if the user has specified some command line
941 switch that require certain abilities from the cpu. */
942 sought = 0;
944 if (TARGET_INTERWORK || TARGET_THUMB)
946 sought |= (FL_THUMB | FL_MODE32);
948 /* There are no ARM processors that support both APCS-26 and
949 interworking. Therefore we force FL_MODE26 to be removed
950 from insn_flags here (if it was set), so that the search
951 below will always be able to find a compatible processor. */
952 insn_flags &= ~FL_MODE26;
955 if (sought != 0 && ((sought & insn_flags) != sought))
957 /* Try to locate a CPU type that supports all of the abilities
958 of the default CPU, plus the extra abilities requested by
959 the user. */
960 for (sel = all_cores; sel->name != NULL; sel++)
961 if ((sel->flags & sought) == (sought | insn_flags))
962 break;
964 if (sel->name == NULL)
966 unsigned current_bit_count = 0;
967 const struct processors * best_fit = NULL;
969 /* Ideally we would like to issue an error message here
970 saying that it was not possible to find a CPU compatible
971 with the default CPU, but which also supports the command
972 line options specified by the programmer, and so they
973 ought to use the -mcpu=<name> command line option to
974 override the default CPU type.
976 If we cannot find a cpu that has both the
977 characteristics of the default cpu and the given
978 command line options we scan the array again looking
979 for a best match. */
980 for (sel = all_cores; sel->name != NULL; sel++)
981 if ((sel->flags & sought) == sought)
983 unsigned count;
985 count = bit_count (sel->flags & insn_flags);
987 if (count >= current_bit_count)
989 best_fit = sel;
990 current_bit_count = count;
994 gcc_assert (best_fit);
995 sel = best_fit;
998 insn_flags = sel->flags;
1000 sprintf (arm_arch_name, "__ARM_ARCH_%s__", sel->arch);
1001 if (arm_tune == arm_none)
1002 arm_tune = (enum processor_type) (sel - all_cores);
1005 /* The processor for which we should tune should now have been
1006 chosen. */
1007 gcc_assert (arm_tune != arm_none);
1009 tune_flags = all_cores[(int)arm_tune].flags;
1010 if (optimize_size)
1011 targetm.rtx_costs = arm_size_rtx_costs;
1012 else
1013 targetm.rtx_costs = all_cores[(int)arm_tune].rtx_costs;
1015 /* Make sure that the processor choice does not conflict with any of the
1016 other command line choices. */
1017 if (TARGET_INTERWORK && !(insn_flags & FL_THUMB))
1019 warning (0, "target CPU does not support interworking" );
1020 target_flags &= ~MASK_INTERWORK;
1023 if (TARGET_THUMB && !(insn_flags & FL_THUMB))
1025 warning (0, "target CPU does not support THUMB instructions");
1026 target_flags &= ~MASK_THUMB;
1029 if (TARGET_APCS_FRAME && TARGET_THUMB)
1031 /* warning (0, "ignoring -mapcs-frame because -mthumb was used"); */
1032 target_flags &= ~MASK_APCS_FRAME;
1035 /* TARGET_BACKTRACE calls leaf_function_p, which causes a crash if done
1036 from here where no function is being compiled currently. */
1037 if ((TARGET_TPCS_FRAME || TARGET_TPCS_LEAF_FRAME) && TARGET_ARM)
1038 warning (0, "enabling backtrace support is only meaningful when compiling for the Thumb");
1040 if (TARGET_ARM && TARGET_CALLEE_INTERWORKING)
1041 warning (0, "enabling callee interworking support is only meaningful when compiling for the Thumb");
1043 if (TARGET_ARM && TARGET_CALLER_INTERWORKING)
1044 warning (0, "enabling caller interworking support is only meaningful when compiling for the Thumb");
1046 if (TARGET_APCS_STACK && !TARGET_APCS_FRAME)
1048 warning (0, "-mapcs-stack-check incompatible with -mno-apcs-frame");
1049 target_flags |= MASK_APCS_FRAME;
1052 if (TARGET_POKE_FUNCTION_NAME)
1053 target_flags |= MASK_APCS_FRAME;
1055 if (TARGET_APCS_REENT && flag_pic)
1056 error ("-fpic and -mapcs-reent are incompatible");
1058 if (TARGET_APCS_REENT)
1059 warning (0, "APCS reentrant code not supported. Ignored");
1061 /* If this target is normally configured to use APCS frames, warn if they
1062 are turned off and debugging is turned on. */
1063 if (TARGET_ARM
1064 && write_symbols != NO_DEBUG
1065 && !TARGET_APCS_FRAME
1066 && (TARGET_DEFAULT & MASK_APCS_FRAME))
1067 warning (0, "-g with -mno-apcs-frame may not give sensible debugging");
1069 /* If stack checking is disabled, we can use r10 as the PIC register,
1070 which keeps r9 available. */
1071 if (flag_pic)
1072 arm_pic_register = TARGET_APCS_STACK ? 9 : 10;
1074 if (TARGET_APCS_FLOAT)
1075 warning (0, "passing floating point arguments in fp regs not yet supported");
1077 /* Initialize boolean versions of the flags, for use in the arm.md file. */
1078 arm_arch3m = (insn_flags & FL_ARCH3M) != 0;
1079 arm_arch4 = (insn_flags & FL_ARCH4) != 0;
1080 arm_arch4t = arm_arch4 & ((insn_flags & FL_THUMB) != 0);
1081 arm_arch5 = (insn_flags & FL_ARCH5) != 0;
1082 arm_arch5e = (insn_flags & FL_ARCH5E) != 0;
1083 arm_arch6 = (insn_flags & FL_ARCH6) != 0;
1084 arm_arch_xscale = (insn_flags & FL_XSCALE) != 0;
1085 arm_arch_cirrus = (insn_flags & FL_CIRRUS) != 0;
1087 arm_ld_sched = (tune_flags & FL_LDSCHED) != 0;
1088 arm_tune_strongarm = (tune_flags & FL_STRONG) != 0;
1089 thumb_code = (TARGET_ARM == 0);
1090 arm_tune_wbuf = (tune_flags & FL_WBUF) != 0;
1091 arm_tune_xscale = (tune_flags & FL_XSCALE) != 0;
1092 arm_arch_iwmmxt = (insn_flags & FL_IWMMXT) != 0;
1094 /* V5 code we generate is completely interworking capable, so we turn off
1095 TARGET_INTERWORK here to avoid many tests later on. */
1097 /* XXX However, we must pass the right pre-processor defines to CPP
1098 or GLD can get confused. This is a hack. */
1099 if (TARGET_INTERWORK)
1100 arm_cpp_interwork = 1;
1102 if (arm_arch5)
1103 target_flags &= ~MASK_INTERWORK;
1105 if (target_abi_name)
1107 for (i = 0; i < ARRAY_SIZE (arm_all_abis); i++)
1109 if (streq (arm_all_abis[i].name, target_abi_name))
1111 arm_abi = arm_all_abis[i].abi_type;
1112 break;
1115 if (i == ARRAY_SIZE (arm_all_abis))
1116 error ("invalid ABI option: -mabi=%s", target_abi_name);
1118 else
1119 arm_abi = ARM_DEFAULT_ABI;
1121 if (TARGET_IWMMXT && !ARM_DOUBLEWORD_ALIGN)
1122 error ("iwmmxt requires an AAPCS compatible ABI for proper operation");
1124 if (TARGET_IWMMXT_ABI && !TARGET_IWMMXT)
1125 error ("iwmmxt abi requires an iwmmxt capable cpu");
1127 arm_fp_model = ARM_FP_MODEL_UNKNOWN;
1128 if (target_fpu_name == NULL && target_fpe_name != NULL)
1130 if (streq (target_fpe_name, "2"))
1131 target_fpu_name = "fpe2";
1132 else if (streq (target_fpe_name, "3"))
1133 target_fpu_name = "fpe3";
1134 else
1135 error ("invalid floating point emulation option: -mfpe=%s",
1136 target_fpe_name);
1138 if (target_fpu_name != NULL)
1140 /* The user specified a FPU. */
1141 for (i = 0; i < ARRAY_SIZE (all_fpus); i++)
1143 if (streq (all_fpus[i].name, target_fpu_name))
1145 arm_fpu_arch = all_fpus[i].fpu;
1146 arm_fpu_tune = arm_fpu_arch;
1147 arm_fp_model = fp_model_for_fpu[arm_fpu_arch];
1148 break;
1151 if (arm_fp_model == ARM_FP_MODEL_UNKNOWN)
1152 error ("invalid floating point option: -mfpu=%s", target_fpu_name);
1154 else
1156 #ifdef FPUTYPE_DEFAULT
1157 /* Use the default if it is specified for this platform. */
1158 arm_fpu_arch = FPUTYPE_DEFAULT;
1159 arm_fpu_tune = FPUTYPE_DEFAULT;
1160 #else
1161 /* Pick one based on CPU type. */
1162 /* ??? Some targets assume FPA is the default.
1163 if ((insn_flags & FL_VFP) != 0)
1164 arm_fpu_arch = FPUTYPE_VFP;
1165 else
1167 if (arm_arch_cirrus)
1168 arm_fpu_arch = FPUTYPE_MAVERICK;
1169 else
1170 arm_fpu_arch = FPUTYPE_FPA_EMU2;
1171 #endif
1172 if (tune_flags & FL_CO_PROC && arm_fpu_arch == FPUTYPE_FPA_EMU2)
1173 arm_fpu_tune = FPUTYPE_FPA;
1174 else
1175 arm_fpu_tune = arm_fpu_arch;
1176 arm_fp_model = fp_model_for_fpu[arm_fpu_arch];
1177 gcc_assert (arm_fp_model != ARM_FP_MODEL_UNKNOWN);
1180 if (target_float_abi_name != NULL)
1182 /* The user specified a FP ABI. */
1183 for (i = 0; i < ARRAY_SIZE (all_float_abis); i++)
1185 if (streq (all_float_abis[i].name, target_float_abi_name))
1187 arm_float_abi = all_float_abis[i].abi_type;
1188 break;
1191 if (i == ARRAY_SIZE (all_float_abis))
1192 error ("invalid floating point abi: -mfloat-abi=%s",
1193 target_float_abi_name);
1195 else
1196 arm_float_abi = TARGET_DEFAULT_FLOAT_ABI;
1198 if (arm_float_abi == ARM_FLOAT_ABI_HARD && TARGET_VFP)
1199 sorry ("-mfloat-abi=hard and VFP");
1201 /* If soft-float is specified then don't use FPU. */
1202 if (TARGET_SOFT_FLOAT)
1203 arm_fpu_arch = FPUTYPE_NONE;
1205 /* For arm2/3 there is no need to do any scheduling if there is only
1206 a floating point emulator, or we are doing software floating-point. */
1207 if ((TARGET_SOFT_FLOAT
1208 || arm_fpu_tune == FPUTYPE_FPA_EMU2
1209 || arm_fpu_tune == FPUTYPE_FPA_EMU3)
1210 && (tune_flags & FL_MODE32) == 0)
1211 flag_schedule_insns = flag_schedule_insns_after_reload = 0;
1213 /* Override the default structure alignment for AAPCS ABI. */
1214 if (arm_abi == ARM_ABI_AAPCS)
1215 arm_structure_size_boundary = 8;
1217 if (structure_size_string != NULL)
1219 int size = strtol (structure_size_string, NULL, 0);
1221 if (size == 8 || size == 32
1222 || (ARM_DOUBLEWORD_ALIGN && size == 64))
1223 arm_structure_size_boundary = size;
1224 else
1225 warning (0, "structure size boundary can only be set to %s",
1226 ARM_DOUBLEWORD_ALIGN ? "8, 32 or 64": "8 or 32");
1229 if (arm_pic_register_string != NULL)
1231 int pic_register = decode_reg_name (arm_pic_register_string);
1233 if (!flag_pic)
1234 warning (0, "-mpic-register= is useless without -fpic");
1236 /* Prevent the user from choosing an obviously stupid PIC register. */
1237 else if (pic_register < 0 || call_used_regs[pic_register]
1238 || pic_register == HARD_FRAME_POINTER_REGNUM
1239 || pic_register == STACK_POINTER_REGNUM
1240 || pic_register >= PC_REGNUM)
1241 error ("unable to use '%s' for PIC register", arm_pic_register_string);
1242 else
1243 arm_pic_register = pic_register;
1246 if (TARGET_THUMB && flag_schedule_insns)
1248 /* Don't warn since it's on by default in -O2. */
1249 flag_schedule_insns = 0;
1252 if (optimize_size)
1254 /* There's some dispute as to whether this should be 1 or 2. However,
1255 experiments seem to show that in pathological cases a setting of
1256 1 degrades less severely than a setting of 2. This could change if
1257 other parts of the compiler change their behavior. */
1258 arm_constant_limit = 1;
1260 /* If optimizing for size, bump the number of instructions that we
1261 are prepared to conditionally execute (even on a StrongARM). */
1262 max_insns_skipped = 6;
1264 else
1266 /* For processors with load scheduling, it never costs more than
1267 2 cycles to load a constant, and the load scheduler may well
1268 reduce that to 1. */
1269 if (arm_ld_sched)
1270 arm_constant_limit = 1;
1272 /* On XScale the longer latency of a load makes it more difficult
1273 to achieve a good schedule, so it's faster to synthesize
1274 constants that can be done in two insns. */
1275 if (arm_tune_xscale)
1276 arm_constant_limit = 2;
1278 /* StrongARM has early execution of branches, so a sequence
1279 that is worth skipping is shorter. */
1280 if (arm_tune_strongarm)
1281 max_insns_skipped = 3;
1284 /* Register global variables with the garbage collector. */
1285 arm_add_gc_roots ();
1288 static void
1289 arm_add_gc_roots (void)
1291 gcc_obstack_init(&minipool_obstack);
1292 minipool_startobj = (char *) obstack_alloc (&minipool_obstack, 0);
1295 /* A table of known ARM exception types.
1296 For use with the interrupt function attribute. */
1298 typedef struct
1300 const char *const arg;
1301 const unsigned long return_value;
1303 isr_attribute_arg;
1305 static const isr_attribute_arg isr_attribute_args [] =
1307 { "IRQ", ARM_FT_ISR },
1308 { "irq", ARM_FT_ISR },
1309 { "FIQ", ARM_FT_FIQ },
1310 { "fiq", ARM_FT_FIQ },
1311 { "ABORT", ARM_FT_ISR },
1312 { "abort", ARM_FT_ISR },
1313 { "ABORT", ARM_FT_ISR },
1314 { "abort", ARM_FT_ISR },
1315 { "UNDEF", ARM_FT_EXCEPTION },
1316 { "undef", ARM_FT_EXCEPTION },
1317 { "SWI", ARM_FT_EXCEPTION },
1318 { "swi", ARM_FT_EXCEPTION },
1319 { NULL, ARM_FT_NORMAL }
1322 /* Returns the (interrupt) function type of the current
1323 function, or ARM_FT_UNKNOWN if the type cannot be determined. */
1325 static unsigned long
1326 arm_isr_value (tree argument)
1328 const isr_attribute_arg * ptr;
1329 const char * arg;
1331 /* No argument - default to IRQ. */
1332 if (argument == NULL_TREE)
1333 return ARM_FT_ISR;
1335 /* Get the value of the argument. */
1336 if (TREE_VALUE (argument) == NULL_TREE
1337 || TREE_CODE (TREE_VALUE (argument)) != STRING_CST)
1338 return ARM_FT_UNKNOWN;
1340 arg = TREE_STRING_POINTER (TREE_VALUE (argument));
1342 /* Check it against the list of known arguments. */
1343 for (ptr = isr_attribute_args; ptr->arg != NULL; ptr++)
1344 if (streq (arg, ptr->arg))
1345 return ptr->return_value;
1347 /* An unrecognized interrupt type. */
1348 return ARM_FT_UNKNOWN;
1351 /* Computes the type of the current function. */
1353 static unsigned long
1354 arm_compute_func_type (void)
1356 unsigned long type = ARM_FT_UNKNOWN;
1357 tree a;
1358 tree attr;
1360 gcc_assert (TREE_CODE (current_function_decl) == FUNCTION_DECL);
1362 /* Decide if the current function is volatile. Such functions
1363 never return, and many memory cycles can be saved by not storing
1364 register values that will never be needed again. This optimization
1365 was added to speed up context switching in a kernel application. */
1366 if (optimize > 0
1367 && TREE_NOTHROW (current_function_decl)
1368 && TREE_THIS_VOLATILE (current_function_decl))
1369 type |= ARM_FT_VOLATILE;
1371 if (cfun->static_chain_decl != NULL)
1372 type |= ARM_FT_NESTED;
1374 attr = DECL_ATTRIBUTES (current_function_decl);
1376 a = lookup_attribute ("naked", attr);
1377 if (a != NULL_TREE)
1378 type |= ARM_FT_NAKED;
1380 a = lookup_attribute ("isr", attr);
1381 if (a == NULL_TREE)
1382 a = lookup_attribute ("interrupt", attr);
1384 if (a == NULL_TREE)
1385 type |= TARGET_INTERWORK ? ARM_FT_INTERWORKED : ARM_FT_NORMAL;
1386 else
1387 type |= arm_isr_value (TREE_VALUE (a));
1389 return type;
1392 /* Returns the type of the current function. */
1394 unsigned long
1395 arm_current_func_type (void)
1397 if (ARM_FUNC_TYPE (cfun->machine->func_type) == ARM_FT_UNKNOWN)
1398 cfun->machine->func_type = arm_compute_func_type ();
1400 return cfun->machine->func_type;
1403 /* Return 1 if it is possible to return using a single instruction.
1404 If SIBLING is non-null, this is a test for a return before a sibling
1405 call. SIBLING is the call insn, so we can examine its register usage. */
1408 use_return_insn (int iscond, rtx sibling)
1410 int regno;
1411 unsigned int func_type;
1412 unsigned long saved_int_regs;
1413 unsigned HOST_WIDE_INT stack_adjust;
1414 arm_stack_offsets *offsets;
1416 /* Never use a return instruction before reload has run. */
1417 if (!reload_completed)
1418 return 0;
1420 func_type = arm_current_func_type ();
1422 /* Naked functions and volatile functions need special
1423 consideration. */
1424 if (func_type & (ARM_FT_VOLATILE | ARM_FT_NAKED))
1425 return 0;
1427 /* So do interrupt functions that use the frame pointer. */
1428 if (IS_INTERRUPT (func_type) && frame_pointer_needed)
1429 return 0;
1431 offsets = arm_get_frame_offsets ();
1432 stack_adjust = offsets->outgoing_args - offsets->saved_regs;
1434 /* As do variadic functions. */
1435 if (current_function_pretend_args_size
1436 || cfun->machine->uses_anonymous_args
1437 /* Or if the function calls __builtin_eh_return () */
1438 || current_function_calls_eh_return
1439 /* Or if the function calls alloca */
1440 || current_function_calls_alloca
1441 /* Or if there is a stack adjustment. However, if the stack pointer
1442 is saved on the stack, we can use a pre-incrementing stack load. */
1443 || !(stack_adjust == 0 || (frame_pointer_needed && stack_adjust == 4)))
1444 return 0;
1446 saved_int_regs = arm_compute_save_reg_mask ();
1448 /* Unfortunately, the insn
1450 ldmib sp, {..., sp, ...}
1452 triggers a bug on most SA-110 based devices, such that the stack
1453 pointer won't be correctly restored if the instruction takes a
1454 page fault. We work around this problem by popping r3 along with
1455 the other registers, since that is never slower than executing
1456 another instruction.
1458 We test for !arm_arch5 here, because code for any architecture
1459 less than this could potentially be run on one of the buggy
1460 chips. */
1461 if (stack_adjust == 4 && !arm_arch5)
1463 /* Validate that r3 is a call-clobbered register (always true in
1464 the default abi) ... */
1465 if (!call_used_regs[3])
1466 return 0;
1468 /* ... that it isn't being used for a return value (always true
1469 until we implement return-in-regs), or for a tail-call
1470 argument ... */
1471 if (sibling)
1473 gcc_assert (GET_CODE (sibling) == CALL_INSN);
1475 if (find_regno_fusage (sibling, USE, 3))
1476 return 0;
1479 /* ... and that there are no call-saved registers in r0-r2
1480 (always true in the default ABI). */
1481 if (saved_int_regs & 0x7)
1482 return 0;
1485 /* Can't be done if interworking with Thumb, and any registers have been
1486 stacked. */
1487 if (TARGET_INTERWORK && saved_int_regs != 0)
1488 return 0;
1490 /* On StrongARM, conditional returns are expensive if they aren't
1491 taken and multiple registers have been stacked. */
1492 if (iscond && arm_tune_strongarm)
1494 /* Conditional return when just the LR is stored is a simple
1495 conditional-load instruction, that's not expensive. */
1496 if (saved_int_regs != 0 && saved_int_regs != (1 << LR_REGNUM))
1497 return 0;
1499 if (flag_pic && regs_ever_live[PIC_OFFSET_TABLE_REGNUM])
1500 return 0;
1503 /* If there are saved registers but the LR isn't saved, then we need
1504 two instructions for the return. */
1505 if (saved_int_regs && !(saved_int_regs & (1 << LR_REGNUM)))
1506 return 0;
1508 /* Can't be done if any of the FPA regs are pushed,
1509 since this also requires an insn. */
1510 if (TARGET_HARD_FLOAT && TARGET_FPA)
1511 for (regno = FIRST_FPA_REGNUM; regno <= LAST_FPA_REGNUM; regno++)
1512 if (regs_ever_live[regno] && !call_used_regs[regno])
1513 return 0;
1515 /* Likewise VFP regs. */
1516 if (TARGET_HARD_FLOAT && TARGET_VFP)
1517 for (regno = FIRST_VFP_REGNUM; regno <= LAST_VFP_REGNUM; regno++)
1518 if (regs_ever_live[regno] && !call_used_regs[regno])
1519 return 0;
1521 if (TARGET_REALLY_IWMMXT)
1522 for (regno = FIRST_IWMMXT_REGNUM; regno <= LAST_IWMMXT_REGNUM; regno++)
1523 if (regs_ever_live[regno] && ! call_used_regs [regno])
1524 return 0;
1526 return 1;
1529 /* Return TRUE if int I is a valid immediate ARM constant. */
1532 const_ok_for_arm (HOST_WIDE_INT i)
1534 int lowbit;
1536 /* For machines with >32 bit HOST_WIDE_INT, the bits above bit 31 must
1537 be all zero, or all one. */
1538 if ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff) != 0
1539 && ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff)
1540 != ((~(unsigned HOST_WIDE_INT) 0)
1541 & ~(unsigned HOST_WIDE_INT) 0xffffffff)))
1542 return FALSE;
1544 i &= (unsigned HOST_WIDE_INT) 0xffffffff;
1546 /* Fast return for 0 and small values. We must do this for zero, since
1547 the code below can't handle that one case. */
1548 if ((i & ~(unsigned HOST_WIDE_INT) 0xff) == 0)
1549 return TRUE;
1551 /* Get the number of trailing zeros, rounded down to the nearest even
1552 number. */
1553 lowbit = (ffs ((int) i) - 1) & ~1;
1555 if ((i & ~(((unsigned HOST_WIDE_INT) 0xff) << lowbit)) == 0)
1556 return TRUE;
1557 else if (lowbit <= 4
1558 && ((i & ~0xc000003f) == 0
1559 || (i & ~0xf000000f) == 0
1560 || (i & ~0xfc000003) == 0))
1561 return TRUE;
1563 return FALSE;
1566 /* Return true if I is a valid constant for the operation CODE. */
1567 static int
1568 const_ok_for_op (HOST_WIDE_INT i, enum rtx_code code)
1570 if (const_ok_for_arm (i))
1571 return 1;
1573 switch (code)
1575 case PLUS:
1576 return const_ok_for_arm (ARM_SIGN_EXTEND (-i));
1578 case MINUS: /* Should only occur with (MINUS I reg) => rsb */
1579 case XOR:
1580 case IOR:
1581 return 0;
1583 case AND:
1584 return const_ok_for_arm (ARM_SIGN_EXTEND (~i));
1586 default:
1587 gcc_unreachable ();
1591 /* Emit a sequence of insns to handle a large constant.
1592 CODE is the code of the operation required, it can be any of SET, PLUS,
1593 IOR, AND, XOR, MINUS;
1594 MODE is the mode in which the operation is being performed;
1595 VAL is the integer to operate on;
1596 SOURCE is the other operand (a register, or a null-pointer for SET);
1597 SUBTARGETS means it is safe to create scratch registers if that will
1598 either produce a simpler sequence, or we will want to cse the values.
1599 Return value is the number of insns emitted. */
1602 arm_split_constant (enum rtx_code code, enum machine_mode mode, rtx insn,
1603 HOST_WIDE_INT val, rtx target, rtx source, int subtargets)
1605 rtx cond;
1607 if (insn && GET_CODE (PATTERN (insn)) == COND_EXEC)
1608 cond = COND_EXEC_TEST (PATTERN (insn));
1609 else
1610 cond = NULL_RTX;
1612 if (subtargets || code == SET
1613 || (GET_CODE (target) == REG && GET_CODE (source) == REG
1614 && REGNO (target) != REGNO (source)))
1616 /* After arm_reorg has been called, we can't fix up expensive
1617 constants by pushing them into memory so we must synthesize
1618 them in-line, regardless of the cost. This is only likely to
1619 be more costly on chips that have load delay slots and we are
1620 compiling without running the scheduler (so no splitting
1621 occurred before the final instruction emission).
1623 Ref: gcc -O1 -mcpu=strongarm gcc.c-torture/compile/980506-2.c
1625 if (!after_arm_reorg
1626 && !cond
1627 && (arm_gen_constant (code, mode, NULL_RTX, val, target, source,
1628 1, 0)
1629 > arm_constant_limit + (code != SET)))
1631 if (code == SET)
1633 /* Currently SET is the only monadic value for CODE, all
1634 the rest are diadic. */
1635 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (val)));
1636 return 1;
1638 else
1640 rtx temp = subtargets ? gen_reg_rtx (mode) : target;
1642 emit_insn (gen_rtx_SET (VOIDmode, temp, GEN_INT (val)));
1643 /* For MINUS, the value is subtracted from, since we never
1644 have subtraction of a constant. */
1645 if (code == MINUS)
1646 emit_insn (gen_rtx_SET (VOIDmode, target,
1647 gen_rtx_MINUS (mode, temp, source)));
1648 else
1649 emit_insn (gen_rtx_SET (VOIDmode, target,
1650 gen_rtx_fmt_ee (code, mode, source, temp)));
1651 return 2;
1656 return arm_gen_constant (code, mode, cond, val, target, source, subtargets,
1660 static int
1661 count_insns_for_constant (HOST_WIDE_INT remainder, int i)
1663 HOST_WIDE_INT temp1;
1664 int num_insns = 0;
1667 int end;
1669 if (i <= 0)
1670 i += 32;
1671 if (remainder & (3 << (i - 2)))
1673 end = i - 8;
1674 if (end < 0)
1675 end += 32;
1676 temp1 = remainder & ((0x0ff << end)
1677 | ((i < end) ? (0xff >> (32 - end)) : 0));
1678 remainder &= ~temp1;
1679 num_insns++;
1680 i -= 6;
1682 i -= 2;
1683 } while (remainder);
1684 return num_insns;
1687 /* Emit an instruction with the indicated PATTERN. If COND is
1688 non-NULL, conditionalize the execution of the instruction on COND
1689 being true. */
1691 static void
1692 emit_constant_insn (rtx cond, rtx pattern)
1694 if (cond)
1695 pattern = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (cond), pattern);
1696 emit_insn (pattern);
1699 /* As above, but extra parameter GENERATE which, if clear, suppresses
1700 RTL generation. */
1702 static int
1703 arm_gen_constant (enum rtx_code code, enum machine_mode mode, rtx cond,
1704 HOST_WIDE_INT val, rtx target, rtx source, int subtargets,
1705 int generate)
1707 int can_invert = 0;
1708 int can_negate = 0;
1709 int can_negate_initial = 0;
1710 int can_shift = 0;
1711 int i;
1712 int num_bits_set = 0;
1713 int set_sign_bit_copies = 0;
1714 int clear_sign_bit_copies = 0;
1715 int clear_zero_bit_copies = 0;
1716 int set_zero_bit_copies = 0;
1717 int insns = 0;
1718 unsigned HOST_WIDE_INT temp1, temp2;
1719 unsigned HOST_WIDE_INT remainder = val & 0xffffffff;
1721 /* Find out which operations are safe for a given CODE. Also do a quick
1722 check for degenerate cases; these can occur when DImode operations
1723 are split. */
1724 switch (code)
1726 case SET:
1727 can_invert = 1;
1728 can_shift = 1;
1729 can_negate = 1;
1730 break;
1732 case PLUS:
1733 can_negate = 1;
1734 can_negate_initial = 1;
1735 break;
1737 case IOR:
1738 if (remainder == 0xffffffff)
1740 if (generate)
1741 emit_constant_insn (cond,
1742 gen_rtx_SET (VOIDmode, target,
1743 GEN_INT (ARM_SIGN_EXTEND (val))));
1744 return 1;
1746 if (remainder == 0)
1748 if (reload_completed && rtx_equal_p (target, source))
1749 return 0;
1750 if (generate)
1751 emit_constant_insn (cond,
1752 gen_rtx_SET (VOIDmode, target, source));
1753 return 1;
1755 break;
1757 case AND:
1758 if (remainder == 0)
1760 if (generate)
1761 emit_constant_insn (cond,
1762 gen_rtx_SET (VOIDmode, target, const0_rtx));
1763 return 1;
1765 if (remainder == 0xffffffff)
1767 if (reload_completed && rtx_equal_p (target, source))
1768 return 0;
1769 if (generate)
1770 emit_constant_insn (cond,
1771 gen_rtx_SET (VOIDmode, target, source));
1772 return 1;
1774 can_invert = 1;
1775 break;
1777 case XOR:
1778 if (remainder == 0)
1780 if (reload_completed && rtx_equal_p (target, source))
1781 return 0;
1782 if (generate)
1783 emit_constant_insn (cond,
1784 gen_rtx_SET (VOIDmode, target, source));
1785 return 1;
1788 /* We don't know how to handle other cases yet. */
1789 gcc_assert (remainder == 0xffffffff);
1791 if (generate)
1792 emit_constant_insn (cond,
1793 gen_rtx_SET (VOIDmode, target,
1794 gen_rtx_NOT (mode, source)));
1795 return 1;
1797 case MINUS:
1798 /* We treat MINUS as (val - source), since (source - val) is always
1799 passed as (source + (-val)). */
1800 if (remainder == 0)
1802 if (generate)
1803 emit_constant_insn (cond,
1804 gen_rtx_SET (VOIDmode, target,
1805 gen_rtx_NEG (mode, source)));
1806 return 1;
1808 if (const_ok_for_arm (val))
1810 if (generate)
1811 emit_constant_insn (cond,
1812 gen_rtx_SET (VOIDmode, target,
1813 gen_rtx_MINUS (mode, GEN_INT (val),
1814 source)));
1815 return 1;
1817 can_negate = 1;
1819 break;
1821 default:
1822 gcc_unreachable ();
1825 /* If we can do it in one insn get out quickly. */
1826 if (const_ok_for_arm (val)
1827 || (can_negate_initial && const_ok_for_arm (-val))
1828 || (can_invert && const_ok_for_arm (~val)))
1830 if (generate)
1831 emit_constant_insn (cond,
1832 gen_rtx_SET (VOIDmode, target,
1833 (source
1834 ? gen_rtx_fmt_ee (code, mode, source,
1835 GEN_INT (val))
1836 : GEN_INT (val))));
1837 return 1;
1840 /* Calculate a few attributes that may be useful for specific
1841 optimizations. */
1842 for (i = 31; i >= 0; i--)
1844 if ((remainder & (1 << i)) == 0)
1845 clear_sign_bit_copies++;
1846 else
1847 break;
1850 for (i = 31; i >= 0; i--)
1852 if ((remainder & (1 << i)) != 0)
1853 set_sign_bit_copies++;
1854 else
1855 break;
1858 for (i = 0; i <= 31; i++)
1860 if ((remainder & (1 << i)) == 0)
1861 clear_zero_bit_copies++;
1862 else
1863 break;
1866 for (i = 0; i <= 31; i++)
1868 if ((remainder & (1 << i)) != 0)
1869 set_zero_bit_copies++;
1870 else
1871 break;
1874 switch (code)
1876 case SET:
1877 /* See if we can do this by sign_extending a constant that is known
1878 to be negative. This is a good, way of doing it, since the shift
1879 may well merge into a subsequent insn. */
1880 if (set_sign_bit_copies > 1)
1882 if (const_ok_for_arm
1883 (temp1 = ARM_SIGN_EXTEND (remainder
1884 << (set_sign_bit_copies - 1))))
1886 if (generate)
1888 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1889 emit_constant_insn (cond,
1890 gen_rtx_SET (VOIDmode, new_src,
1891 GEN_INT (temp1)));
1892 emit_constant_insn (cond,
1893 gen_ashrsi3 (target, new_src,
1894 GEN_INT (set_sign_bit_copies - 1)));
1896 return 2;
1898 /* For an inverted constant, we will need to set the low bits,
1899 these will be shifted out of harm's way. */
1900 temp1 |= (1 << (set_sign_bit_copies - 1)) - 1;
1901 if (const_ok_for_arm (~temp1))
1903 if (generate)
1905 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1906 emit_constant_insn (cond,
1907 gen_rtx_SET (VOIDmode, new_src,
1908 GEN_INT (temp1)));
1909 emit_constant_insn (cond,
1910 gen_ashrsi3 (target, new_src,
1911 GEN_INT (set_sign_bit_copies - 1)));
1913 return 2;
1917 /* See if we can generate this by setting the bottom (or the top)
1918 16 bits, and then shifting these into the other half of the
1919 word. We only look for the simplest cases, to do more would cost
1920 too much. Be careful, however, not to generate this when the
1921 alternative would take fewer insns. */
1922 if (val & 0xffff0000)
1924 temp1 = remainder & 0xffff0000;
1925 temp2 = remainder & 0x0000ffff;
1927 /* Overlaps outside this range are best done using other methods. */
1928 for (i = 9; i < 24; i++)
1930 if ((((temp2 | (temp2 << i)) & 0xffffffff) == remainder)
1931 && !const_ok_for_arm (temp2))
1933 rtx new_src = (subtargets
1934 ? (generate ? gen_reg_rtx (mode) : NULL_RTX)
1935 : target);
1936 insns = arm_gen_constant (code, mode, cond, temp2, new_src,
1937 source, subtargets, generate);
1938 source = new_src;
1939 if (generate)
1940 emit_constant_insn
1941 (cond,
1942 gen_rtx_SET
1943 (VOIDmode, target,
1944 gen_rtx_IOR (mode,
1945 gen_rtx_ASHIFT (mode, source,
1946 GEN_INT (i)),
1947 source)));
1948 return insns + 1;
1952 /* Don't duplicate cases already considered. */
1953 for (i = 17; i < 24; i++)
1955 if (((temp1 | (temp1 >> i)) == remainder)
1956 && !const_ok_for_arm (temp1))
1958 rtx new_src = (subtargets
1959 ? (generate ? gen_reg_rtx (mode) : NULL_RTX)
1960 : target);
1961 insns = arm_gen_constant (code, mode, cond, temp1, new_src,
1962 source, subtargets, generate);
1963 source = new_src;
1964 if (generate)
1965 emit_constant_insn
1966 (cond,
1967 gen_rtx_SET (VOIDmode, target,
1968 gen_rtx_IOR
1969 (mode,
1970 gen_rtx_LSHIFTRT (mode, source,
1971 GEN_INT (i)),
1972 source)));
1973 return insns + 1;
1977 break;
1979 case IOR:
1980 case XOR:
1981 /* If we have IOR or XOR, and the constant can be loaded in a
1982 single instruction, and we can find a temporary to put it in,
1983 then this can be done in two instructions instead of 3-4. */
1984 if (subtargets
1985 /* TARGET can't be NULL if SUBTARGETS is 0 */
1986 || (reload_completed && !reg_mentioned_p (target, source)))
1988 if (const_ok_for_arm (ARM_SIGN_EXTEND (~val)))
1990 if (generate)
1992 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1994 emit_constant_insn (cond,
1995 gen_rtx_SET (VOIDmode, sub,
1996 GEN_INT (val)));
1997 emit_constant_insn (cond,
1998 gen_rtx_SET (VOIDmode, target,
1999 gen_rtx_fmt_ee (code, mode,
2000 source, sub)));
2002 return 2;
2006 if (code == XOR)
2007 break;
2009 if (set_sign_bit_copies > 8
2010 && (val & (-1 << (32 - set_sign_bit_copies))) == val)
2012 if (generate)
2014 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
2015 rtx shift = GEN_INT (set_sign_bit_copies);
2017 emit_constant_insn
2018 (cond,
2019 gen_rtx_SET (VOIDmode, sub,
2020 gen_rtx_NOT (mode,
2021 gen_rtx_ASHIFT (mode,
2022 source,
2023 shift))));
2024 emit_constant_insn
2025 (cond,
2026 gen_rtx_SET (VOIDmode, target,
2027 gen_rtx_NOT (mode,
2028 gen_rtx_LSHIFTRT (mode, sub,
2029 shift))));
2031 return 2;
2034 if (set_zero_bit_copies > 8
2035 && (remainder & ((1 << set_zero_bit_copies) - 1)) == remainder)
2037 if (generate)
2039 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
2040 rtx shift = GEN_INT (set_zero_bit_copies);
2042 emit_constant_insn
2043 (cond,
2044 gen_rtx_SET (VOIDmode, sub,
2045 gen_rtx_NOT (mode,
2046 gen_rtx_LSHIFTRT (mode,
2047 source,
2048 shift))));
2049 emit_constant_insn
2050 (cond,
2051 gen_rtx_SET (VOIDmode, target,
2052 gen_rtx_NOT (mode,
2053 gen_rtx_ASHIFT (mode, sub,
2054 shift))));
2056 return 2;
2059 if (const_ok_for_arm (temp1 = ARM_SIGN_EXTEND (~val)))
2061 if (generate)
2063 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
2064 emit_constant_insn (cond,
2065 gen_rtx_SET (VOIDmode, sub,
2066 gen_rtx_NOT (mode, source)));
2067 source = sub;
2068 if (subtargets)
2069 sub = gen_reg_rtx (mode);
2070 emit_constant_insn (cond,
2071 gen_rtx_SET (VOIDmode, sub,
2072 gen_rtx_AND (mode, source,
2073 GEN_INT (temp1))));
2074 emit_constant_insn (cond,
2075 gen_rtx_SET (VOIDmode, target,
2076 gen_rtx_NOT (mode, sub)));
2078 return 3;
2080 break;
2082 case AND:
2083 /* See if two shifts will do 2 or more insn's worth of work. */
2084 if (clear_sign_bit_copies >= 16 && clear_sign_bit_copies < 24)
2086 HOST_WIDE_INT shift_mask = ((0xffffffff
2087 << (32 - clear_sign_bit_copies))
2088 & 0xffffffff);
2090 if ((remainder | shift_mask) != 0xffffffff)
2092 if (generate)
2094 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2095 insns = arm_gen_constant (AND, mode, cond,
2096 remainder | shift_mask,
2097 new_src, source, subtargets, 1);
2098 source = new_src;
2100 else
2102 rtx targ = subtargets ? NULL_RTX : target;
2103 insns = arm_gen_constant (AND, mode, cond,
2104 remainder | shift_mask,
2105 targ, source, subtargets, 0);
2109 if (generate)
2111 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2112 rtx shift = GEN_INT (clear_sign_bit_copies);
2114 emit_insn (gen_ashlsi3 (new_src, source, shift));
2115 emit_insn (gen_lshrsi3 (target, new_src, shift));
2118 return insns + 2;
2121 if (clear_zero_bit_copies >= 16 && clear_zero_bit_copies < 24)
2123 HOST_WIDE_INT shift_mask = (1 << clear_zero_bit_copies) - 1;
2125 if ((remainder | shift_mask) != 0xffffffff)
2127 if (generate)
2129 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2131 insns = arm_gen_constant (AND, mode, cond,
2132 remainder | shift_mask,
2133 new_src, source, subtargets, 1);
2134 source = new_src;
2136 else
2138 rtx targ = subtargets ? NULL_RTX : target;
2140 insns = arm_gen_constant (AND, mode, cond,
2141 remainder | shift_mask,
2142 targ, source, subtargets, 0);
2146 if (generate)
2148 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2149 rtx shift = GEN_INT (clear_zero_bit_copies);
2151 emit_insn (gen_lshrsi3 (new_src, source, shift));
2152 emit_insn (gen_ashlsi3 (target, new_src, shift));
2155 return insns + 2;
2158 break;
2160 default:
2161 break;
2164 for (i = 0; i < 32; i++)
2165 if (remainder & (1 << i))
2166 num_bits_set++;
2168 if (code == AND || (can_invert && num_bits_set > 16))
2169 remainder = (~remainder) & 0xffffffff;
2170 else if (code == PLUS && num_bits_set > 16)
2171 remainder = (-remainder) & 0xffffffff;
2172 else
2174 can_invert = 0;
2175 can_negate = 0;
2178 /* Now try and find a way of doing the job in either two or three
2179 instructions.
2180 We start by looking for the largest block of zeros that are aligned on
2181 a 2-bit boundary, we then fill up the temps, wrapping around to the
2182 top of the word when we drop off the bottom.
2183 In the worst case this code should produce no more than four insns. */
2185 int best_start = 0;
2186 int best_consecutive_zeros = 0;
2188 for (i = 0; i < 32; i += 2)
2190 int consecutive_zeros = 0;
2192 if (!(remainder & (3 << i)))
2194 while ((i < 32) && !(remainder & (3 << i)))
2196 consecutive_zeros += 2;
2197 i += 2;
2199 if (consecutive_zeros > best_consecutive_zeros)
2201 best_consecutive_zeros = consecutive_zeros;
2202 best_start = i - consecutive_zeros;
2204 i -= 2;
2208 /* So long as it won't require any more insns to do so, it's
2209 desirable to emit a small constant (in bits 0...9) in the last
2210 insn. This way there is more chance that it can be combined with
2211 a later addressing insn to form a pre-indexed load or store
2212 operation. Consider:
2214 *((volatile int *)0xe0000100) = 1;
2215 *((volatile int *)0xe0000110) = 2;
2217 We want this to wind up as:
2219 mov rA, #0xe0000000
2220 mov rB, #1
2221 str rB, [rA, #0x100]
2222 mov rB, #2
2223 str rB, [rA, #0x110]
2225 rather than having to synthesize both large constants from scratch.
2227 Therefore, we calculate how many insns would be required to emit
2228 the constant starting from `best_start', and also starting from
2229 zero (i.e. with bit 31 first to be output). If `best_start' doesn't
2230 yield a shorter sequence, we may as well use zero. */
2231 if (best_start != 0
2232 && ((((unsigned HOST_WIDE_INT) 1) << best_start) < remainder)
2233 && (count_insns_for_constant (remainder, 0) <=
2234 count_insns_for_constant (remainder, best_start)))
2235 best_start = 0;
2237 /* Now start emitting the insns. */
2238 i = best_start;
2241 int end;
2243 if (i <= 0)
2244 i += 32;
2245 if (remainder & (3 << (i - 2)))
2247 end = i - 8;
2248 if (end < 0)
2249 end += 32;
2250 temp1 = remainder & ((0x0ff << end)
2251 | ((i < end) ? (0xff >> (32 - end)) : 0));
2252 remainder &= ~temp1;
2254 if (generate)
2256 rtx new_src, temp1_rtx;
2258 if (code == SET || code == MINUS)
2260 new_src = (subtargets ? gen_reg_rtx (mode) : target);
2261 if (can_invert && code != MINUS)
2262 temp1 = ~temp1;
2264 else
2266 if (remainder && subtargets)
2267 new_src = gen_reg_rtx (mode);
2268 else
2269 new_src = target;
2270 if (can_invert)
2271 temp1 = ~temp1;
2272 else if (can_negate)
2273 temp1 = -temp1;
2276 temp1 = trunc_int_for_mode (temp1, mode);
2277 temp1_rtx = GEN_INT (temp1);
2279 if (code == SET)
2281 else if (code == MINUS)
2282 temp1_rtx = gen_rtx_MINUS (mode, temp1_rtx, source);
2283 else
2284 temp1_rtx = gen_rtx_fmt_ee (code, mode, source, temp1_rtx);
2286 emit_constant_insn (cond,
2287 gen_rtx_SET (VOIDmode, new_src,
2288 temp1_rtx));
2289 source = new_src;
2292 if (code == SET)
2294 can_invert = 0;
2295 code = PLUS;
2297 else if (code == MINUS)
2298 code = PLUS;
2300 insns++;
2301 i -= 6;
2303 i -= 2;
2305 while (remainder);
2308 return insns;
2311 /* Canonicalize a comparison so that we are more likely to recognize it.
2312 This can be done for a few constant compares, where we can make the
2313 immediate value easier to load. */
2315 enum rtx_code
2316 arm_canonicalize_comparison (enum rtx_code code, rtx * op1)
2318 unsigned HOST_WIDE_INT i = INTVAL (*op1);
2320 switch (code)
2322 case EQ:
2323 case NE:
2324 return code;
2326 case GT:
2327 case LE:
2328 if (i != ((((unsigned HOST_WIDE_INT) 1) << (HOST_BITS_PER_WIDE_INT - 1)) - 1)
2329 && (const_ok_for_arm (i + 1) || const_ok_for_arm (-(i + 1))))
2331 *op1 = GEN_INT (i + 1);
2332 return code == GT ? GE : LT;
2334 break;
2336 case GE:
2337 case LT:
2338 if (i != (((unsigned HOST_WIDE_INT) 1) << (HOST_BITS_PER_WIDE_INT - 1))
2339 && (const_ok_for_arm (i - 1) || const_ok_for_arm (-(i - 1))))
2341 *op1 = GEN_INT (i - 1);
2342 return code == GE ? GT : LE;
2344 break;
2346 case GTU:
2347 case LEU:
2348 if (i != ~((unsigned HOST_WIDE_INT) 0)
2349 && (const_ok_for_arm (i + 1) || const_ok_for_arm (-(i + 1))))
2351 *op1 = GEN_INT (i + 1);
2352 return code == GTU ? GEU : LTU;
2354 break;
2356 case GEU:
2357 case LTU:
2358 if (i != 0
2359 && (const_ok_for_arm (i - 1) || const_ok_for_arm (-(i - 1))))
2361 *op1 = GEN_INT (i - 1);
2362 return code == GEU ? GTU : LEU;
2364 break;
2366 default:
2367 gcc_unreachable ();
2370 return code;
2374 /* Define how to find the value returned by a function. */
2377 arm_function_value(tree type, tree func ATTRIBUTE_UNUSED)
2379 enum machine_mode mode;
2380 int unsignedp ATTRIBUTE_UNUSED;
2381 rtx r ATTRIBUTE_UNUSED;
2383 mode = TYPE_MODE (type);
2384 /* Promote integer types. */
2385 if (INTEGRAL_TYPE_P (type))
2386 PROMOTE_FUNCTION_MODE (mode, unsignedp, type);
2388 /* Promotes small structs returned in a register to full-word size
2389 for big-endian AAPCS. */
2390 if (arm_return_in_msb (type))
2392 HOST_WIDE_INT size = int_size_in_bytes (type);
2393 if (size % UNITS_PER_WORD != 0)
2395 size += UNITS_PER_WORD - size % UNITS_PER_WORD;
2396 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
2400 return LIBCALL_VALUE(mode);
2403 /* Determine the amount of memory needed to store the possible return
2404 registers of an untyped call. */
2406 arm_apply_result_size (void)
2408 int size = 16;
2410 if (TARGET_ARM)
2412 if (TARGET_HARD_FLOAT_ABI)
2414 if (TARGET_FPA)
2415 size += 12;
2416 if (TARGET_MAVERICK)
2417 size += 8;
2419 if (TARGET_IWMMXT_ABI)
2420 size += 8;
2423 return size;
2426 /* Decide whether a type should be returned in memory (true)
2427 or in a register (false). This is called by the macro
2428 RETURN_IN_MEMORY. */
2430 arm_return_in_memory (tree type)
2432 HOST_WIDE_INT size;
2434 if (!AGGREGATE_TYPE_P (type) &&
2435 !(TARGET_AAPCS_BASED && TREE_CODE (type) == COMPLEX_TYPE))
2436 /* All simple types are returned in registers.
2437 For AAPCS, complex types are treated the same as aggregates. */
2438 return 0;
2440 size = int_size_in_bytes (type);
2442 if (arm_abi != ARM_ABI_APCS)
2444 /* ATPCS and later return aggregate types in memory only if they are
2445 larger than a word (or are variable size). */
2446 return (size < 0 || size > UNITS_PER_WORD);
2449 /* For the arm-wince targets we choose to be compatible with Microsoft's
2450 ARM and Thumb compilers, which always return aggregates in memory. */
2451 #ifndef ARM_WINCE
2452 /* All structures/unions bigger than one word are returned in memory.
2453 Also catch the case where int_size_in_bytes returns -1. In this case
2454 the aggregate is either huge or of variable size, and in either case
2455 we will want to return it via memory and not in a register. */
2456 if (size < 0 || size > UNITS_PER_WORD)
2457 return 1;
2459 if (TREE_CODE (type) == RECORD_TYPE)
2461 tree field;
2463 /* For a struct the APCS says that we only return in a register
2464 if the type is 'integer like' and every addressable element
2465 has an offset of zero. For practical purposes this means
2466 that the structure can have at most one non bit-field element
2467 and that this element must be the first one in the structure. */
2469 /* Find the first field, ignoring non FIELD_DECL things which will
2470 have been created by C++. */
2471 for (field = TYPE_FIELDS (type);
2472 field && TREE_CODE (field) != FIELD_DECL;
2473 field = TREE_CHAIN (field))
2474 continue;
2476 if (field == NULL)
2477 return 0; /* An empty structure. Allowed by an extension to ANSI C. */
2479 /* Check that the first field is valid for returning in a register. */
2481 /* ... Floats are not allowed */
2482 if (FLOAT_TYPE_P (TREE_TYPE (field)))
2483 return 1;
2485 /* ... Aggregates that are not themselves valid for returning in
2486 a register are not allowed. */
2487 if (RETURN_IN_MEMORY (TREE_TYPE (field)))
2488 return 1;
2490 /* Now check the remaining fields, if any. Only bitfields are allowed,
2491 since they are not addressable. */
2492 for (field = TREE_CHAIN (field);
2493 field;
2494 field = TREE_CHAIN (field))
2496 if (TREE_CODE (field) != FIELD_DECL)
2497 continue;
2499 if (!DECL_BIT_FIELD_TYPE (field))
2500 return 1;
2503 return 0;
2506 if (TREE_CODE (type) == UNION_TYPE)
2508 tree field;
2510 /* Unions can be returned in registers if every element is
2511 integral, or can be returned in an integer register. */
2512 for (field = TYPE_FIELDS (type);
2513 field;
2514 field = TREE_CHAIN (field))
2516 if (TREE_CODE (field) != FIELD_DECL)
2517 continue;
2519 if (FLOAT_TYPE_P (TREE_TYPE (field)))
2520 return 1;
2522 if (RETURN_IN_MEMORY (TREE_TYPE (field)))
2523 return 1;
2526 return 0;
2528 #endif /* not ARM_WINCE */
2530 /* Return all other types in memory. */
2531 return 1;
2534 /* Indicate whether or not words of a double are in big-endian order. */
2537 arm_float_words_big_endian (void)
2539 if (TARGET_MAVERICK)
2540 return 0;
2542 /* For FPA, float words are always big-endian. For VFP, floats words
2543 follow the memory system mode. */
2545 if (TARGET_FPA)
2547 return 1;
2550 if (TARGET_VFP)
2551 return (TARGET_BIG_END ? 1 : 0);
2553 return 1;
2556 /* Initialize a variable CUM of type CUMULATIVE_ARGS
2557 for a call to a function whose data type is FNTYPE.
2558 For a library call, FNTYPE is NULL. */
2559 void
2560 arm_init_cumulative_args (CUMULATIVE_ARGS *pcum, tree fntype,
2561 rtx libname ATTRIBUTE_UNUSED,
2562 tree fndecl ATTRIBUTE_UNUSED)
2564 /* On the ARM, the offset starts at 0. */
2565 pcum->nregs = ((fntype && aggregate_value_p (TREE_TYPE (fntype), fntype)) ? 1 : 0);
2566 pcum->iwmmxt_nregs = 0;
2567 pcum->can_split = true;
2569 pcum->call_cookie = CALL_NORMAL;
2571 if (TARGET_LONG_CALLS)
2572 pcum->call_cookie = CALL_LONG;
2574 /* Check for long call/short call attributes. The attributes
2575 override any command line option. */
2576 if (fntype)
2578 if (lookup_attribute ("short_call", TYPE_ATTRIBUTES (fntype)))
2579 pcum->call_cookie = CALL_SHORT;
2580 else if (lookup_attribute ("long_call", TYPE_ATTRIBUTES (fntype)))
2581 pcum->call_cookie = CALL_LONG;
2584 /* Varargs vectors are treated the same as long long.
2585 named_count avoids having to change the way arm handles 'named' */
2586 pcum->named_count = 0;
2587 pcum->nargs = 0;
2589 if (TARGET_REALLY_IWMMXT && fntype)
2591 tree fn_arg;
2593 for (fn_arg = TYPE_ARG_TYPES (fntype);
2594 fn_arg;
2595 fn_arg = TREE_CHAIN (fn_arg))
2596 pcum->named_count += 1;
2598 if (! pcum->named_count)
2599 pcum->named_count = INT_MAX;
2604 /* Return true if mode/type need doubleword alignment. */
2605 bool
2606 arm_needs_doubleword_align (enum machine_mode mode, tree type)
2608 return (GET_MODE_ALIGNMENT (mode) > PARM_BOUNDARY
2609 || (type && TYPE_ALIGN (type) > PARM_BOUNDARY));
2613 /* Determine where to put an argument to a function.
2614 Value is zero to push the argument on the stack,
2615 or a hard register in which to store the argument.
2617 MODE is the argument's machine mode.
2618 TYPE is the data type of the argument (as a tree).
2619 This is null for libcalls where that information may
2620 not be available.
2621 CUM is a variable of type CUMULATIVE_ARGS which gives info about
2622 the preceding args and about the function being called.
2623 NAMED is nonzero if this argument is a named parameter
2624 (otherwise it is an extra parameter matching an ellipsis). */
2627 arm_function_arg (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
2628 tree type, int named)
2630 int nregs;
2632 /* Varargs vectors are treated the same as long long.
2633 named_count avoids having to change the way arm handles 'named' */
2634 if (TARGET_IWMMXT_ABI
2635 && arm_vector_mode_supported_p (mode)
2636 && pcum->named_count > pcum->nargs + 1)
2638 if (pcum->iwmmxt_nregs <= 9)
2639 return gen_rtx_REG (mode, pcum->iwmmxt_nregs + FIRST_IWMMXT_REGNUM);
2640 else
2642 pcum->can_split = false;
2643 return NULL_RTX;
2647 /* Put doubleword aligned quantities in even register pairs. */
2648 if (pcum->nregs & 1
2649 && ARM_DOUBLEWORD_ALIGN
2650 && arm_needs_doubleword_align (mode, type))
2651 pcum->nregs++;
2653 if (mode == VOIDmode)
2654 /* Compute operand 2 of the call insn. */
2655 return GEN_INT (pcum->call_cookie);
2657 /* Only allow splitting an arg between regs and memory if all preceding
2658 args were allocated to regs. For args passed by reference we only count
2659 the reference pointer. */
2660 if (pcum->can_split)
2661 nregs = 1;
2662 else
2663 nregs = ARM_NUM_REGS2 (mode, type);
2665 if (!named || pcum->nregs + nregs > NUM_ARG_REGS)
2666 return NULL_RTX;
2668 return gen_rtx_REG (mode, pcum->nregs);
2671 static int
2672 arm_arg_partial_bytes (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
2673 tree type, bool named ATTRIBUTE_UNUSED)
2675 int nregs = pcum->nregs;
2677 if (arm_vector_mode_supported_p (mode))
2678 return 0;
2680 if (NUM_ARG_REGS > nregs
2681 && (NUM_ARG_REGS < nregs + ARM_NUM_REGS2 (mode, type))
2682 && pcum->can_split)
2683 return (NUM_ARG_REGS - nregs) * UNITS_PER_WORD;
2685 return 0;
2688 /* Variable sized types are passed by reference. This is a GCC
2689 extension to the ARM ABI. */
2691 static bool
2692 arm_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
2693 enum machine_mode mode ATTRIBUTE_UNUSED,
2694 tree type, bool named ATTRIBUTE_UNUSED)
2696 return type && TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST;
2699 /* Encode the current state of the #pragma [no_]long_calls. */
2700 typedef enum
2702 OFF, /* No #pramgma [no_]long_calls is in effect. */
2703 LONG, /* #pragma long_calls is in effect. */
2704 SHORT /* #pragma no_long_calls is in effect. */
2705 } arm_pragma_enum;
2707 static arm_pragma_enum arm_pragma_long_calls = OFF;
2709 void
2710 arm_pr_long_calls (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2712 arm_pragma_long_calls = LONG;
2715 void
2716 arm_pr_no_long_calls (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2718 arm_pragma_long_calls = SHORT;
2721 void
2722 arm_pr_long_calls_off (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2724 arm_pragma_long_calls = OFF;
2727 /* Table of machine attributes. */
2728 const struct attribute_spec arm_attribute_table[] =
2730 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
2731 /* Function calls made to this symbol must be done indirectly, because
2732 it may lie outside of the 26 bit addressing range of a normal function
2733 call. */
2734 { "long_call", 0, 0, false, true, true, NULL },
2735 /* Whereas these functions are always known to reside within the 26 bit
2736 addressing range. */
2737 { "short_call", 0, 0, false, true, true, NULL },
2738 /* Interrupt Service Routines have special prologue and epilogue requirements. */
2739 { "isr", 0, 1, false, false, false, arm_handle_isr_attribute },
2740 { "interrupt", 0, 1, false, false, false, arm_handle_isr_attribute },
2741 { "naked", 0, 0, true, false, false, arm_handle_fndecl_attribute },
2742 #ifdef ARM_PE
2743 /* ARM/PE has three new attributes:
2744 interfacearm - ?
2745 dllexport - for exporting a function/variable that will live in a dll
2746 dllimport - for importing a function/variable from a dll
2748 Microsoft allows multiple declspecs in one __declspec, separating
2749 them with spaces. We do NOT support this. Instead, use __declspec
2750 multiple times.
2752 { "dllimport", 0, 0, true, false, false, NULL },
2753 { "dllexport", 0, 0, true, false, false, NULL },
2754 { "interfacearm", 0, 0, true, false, false, arm_handle_fndecl_attribute },
2755 #elif TARGET_DLLIMPORT_DECL_ATTRIBUTES
2756 { "dllimport", 0, 0, false, false, false, handle_dll_attribute },
2757 { "dllexport", 0, 0, false, false, false, handle_dll_attribute },
2758 { "notshared", 0, 0, false, true, false, arm_handle_notshared_attribute },
2759 #endif
2760 { NULL, 0, 0, false, false, false, NULL }
2763 /* Handle an attribute requiring a FUNCTION_DECL;
2764 arguments as in struct attribute_spec.handler. */
2765 static tree
2766 arm_handle_fndecl_attribute (tree *node, tree name, tree args ATTRIBUTE_UNUSED,
2767 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
2769 if (TREE_CODE (*node) != FUNCTION_DECL)
2771 warning (0, "%qs attribute only applies to functions",
2772 IDENTIFIER_POINTER (name));
2773 *no_add_attrs = true;
2776 return NULL_TREE;
2779 /* Handle an "interrupt" or "isr" attribute;
2780 arguments as in struct attribute_spec.handler. */
2781 static tree
2782 arm_handle_isr_attribute (tree *node, tree name, tree args, int flags,
2783 bool *no_add_attrs)
2785 if (DECL_P (*node))
2787 if (TREE_CODE (*node) != FUNCTION_DECL)
2789 warning (0, "%qs attribute only applies to functions",
2790 IDENTIFIER_POINTER (name));
2791 *no_add_attrs = true;
2793 /* FIXME: the argument if any is checked for type attributes;
2794 should it be checked for decl ones? */
2796 else
2798 if (TREE_CODE (*node) == FUNCTION_TYPE
2799 || TREE_CODE (*node) == METHOD_TYPE)
2801 if (arm_isr_value (args) == ARM_FT_UNKNOWN)
2803 warning (0, "%qs attribute ignored", IDENTIFIER_POINTER (name));
2804 *no_add_attrs = true;
2807 else if (TREE_CODE (*node) == POINTER_TYPE
2808 && (TREE_CODE (TREE_TYPE (*node)) == FUNCTION_TYPE
2809 || TREE_CODE (TREE_TYPE (*node)) == METHOD_TYPE)
2810 && arm_isr_value (args) != ARM_FT_UNKNOWN)
2812 *node = build_variant_type_copy (*node);
2813 TREE_TYPE (*node) = build_type_attribute_variant
2814 (TREE_TYPE (*node),
2815 tree_cons (name, args, TYPE_ATTRIBUTES (TREE_TYPE (*node))));
2816 *no_add_attrs = true;
2818 else
2820 /* Possibly pass this attribute on from the type to a decl. */
2821 if (flags & ((int) ATTR_FLAG_DECL_NEXT
2822 | (int) ATTR_FLAG_FUNCTION_NEXT
2823 | (int) ATTR_FLAG_ARRAY_NEXT))
2825 *no_add_attrs = true;
2826 return tree_cons (name, args, NULL_TREE);
2828 else
2830 warning (0, "%qs attribute ignored", IDENTIFIER_POINTER (name));
2835 return NULL_TREE;
2838 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
2839 /* Handle the "notshared" attribute. This attribute is another way of
2840 requesting hidden visibility. ARM's compiler supports
2841 "__declspec(notshared)"; we support the same thing via an
2842 attribute. */
2844 static tree
2845 arm_handle_notshared_attribute (tree *node,
2846 tree name ATTRIBUTE_UNUSED,
2847 tree args ATTRIBUTE_UNUSED,
2848 int flags ATTRIBUTE_UNUSED,
2849 bool *no_add_attrs)
2851 tree decl = TYPE_NAME (*node);
2853 if (decl)
2855 DECL_VISIBILITY (decl) = VISIBILITY_HIDDEN;
2856 DECL_VISIBILITY_SPECIFIED (decl) = 1;
2857 *no_add_attrs = false;
2859 return NULL_TREE;
2861 #endif
2863 /* Return 0 if the attributes for two types are incompatible, 1 if they
2864 are compatible, and 2 if they are nearly compatible (which causes a
2865 warning to be generated). */
2866 static int
2867 arm_comp_type_attributes (tree type1, tree type2)
2869 int l1, l2, s1, s2;
2871 /* Check for mismatch of non-default calling convention. */
2872 if (TREE_CODE (type1) != FUNCTION_TYPE)
2873 return 1;
2875 /* Check for mismatched call attributes. */
2876 l1 = lookup_attribute ("long_call", TYPE_ATTRIBUTES (type1)) != NULL;
2877 l2 = lookup_attribute ("long_call", TYPE_ATTRIBUTES (type2)) != NULL;
2878 s1 = lookup_attribute ("short_call", TYPE_ATTRIBUTES (type1)) != NULL;
2879 s2 = lookup_attribute ("short_call", TYPE_ATTRIBUTES (type2)) != NULL;
2881 /* Only bother to check if an attribute is defined. */
2882 if (l1 | l2 | s1 | s2)
2884 /* If one type has an attribute, the other must have the same attribute. */
2885 if ((l1 != l2) || (s1 != s2))
2886 return 0;
2888 /* Disallow mixed attributes. */
2889 if ((l1 & s2) || (l2 & s1))
2890 return 0;
2893 /* Check for mismatched ISR attribute. */
2894 l1 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type1)) != NULL;
2895 if (! l1)
2896 l1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type1)) != NULL;
2897 l2 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type2)) != NULL;
2898 if (! l2)
2899 l1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type2)) != NULL;
2900 if (l1 != l2)
2901 return 0;
2903 return 1;
2906 /* Encode long_call or short_call attribute by prefixing
2907 symbol name in DECL with a special character FLAG. */
2908 void
2909 arm_encode_call_attribute (tree decl, int flag)
2911 const char * str = XSTR (XEXP (DECL_RTL (decl), 0), 0);
2912 int len = strlen (str);
2913 char * newstr;
2915 /* Do not allow weak functions to be treated as short call. */
2916 if (DECL_WEAK (decl) && flag == SHORT_CALL_FLAG_CHAR)
2917 return;
2919 newstr = alloca (len + 2);
2920 newstr[0] = flag;
2921 strcpy (newstr + 1, str);
2923 newstr = (char *) ggc_alloc_string (newstr, len + 1);
2924 XSTR (XEXP (DECL_RTL (decl), 0), 0) = newstr;
2927 /* Assigns default attributes to newly defined type. This is used to
2928 set short_call/long_call attributes for function types of
2929 functions defined inside corresponding #pragma scopes. */
2930 static void
2931 arm_set_default_type_attributes (tree type)
2933 /* Add __attribute__ ((long_call)) to all functions, when
2934 inside #pragma long_calls or __attribute__ ((short_call)),
2935 when inside #pragma no_long_calls. */
2936 if (TREE_CODE (type) == FUNCTION_TYPE || TREE_CODE (type) == METHOD_TYPE)
2938 tree type_attr_list, attr_name;
2939 type_attr_list = TYPE_ATTRIBUTES (type);
2941 if (arm_pragma_long_calls == LONG)
2942 attr_name = get_identifier ("long_call");
2943 else if (arm_pragma_long_calls == SHORT)
2944 attr_name = get_identifier ("short_call");
2945 else
2946 return;
2948 type_attr_list = tree_cons (attr_name, NULL_TREE, type_attr_list);
2949 TYPE_ATTRIBUTES (type) = type_attr_list;
2953 /* Return 1 if the operand is a SYMBOL_REF for a function known to be
2954 defined within the current compilation unit. If this cannot be
2955 determined, then 0 is returned. */
2956 static int
2957 current_file_function_operand (rtx sym_ref)
2959 /* This is a bit of a fib. A function will have a short call flag
2960 applied to its name if it has the short call attribute, or it has
2961 already been defined within the current compilation unit. */
2962 if (ENCODED_SHORT_CALL_ATTR_P (XSTR (sym_ref, 0)))
2963 return 1;
2965 /* The current function is always defined within the current compilation
2966 unit. If it s a weak definition however, then this may not be the real
2967 definition of the function, and so we have to say no. */
2968 if (sym_ref == XEXP (DECL_RTL (current_function_decl), 0)
2969 && !DECL_WEAK (current_function_decl))
2970 return 1;
2972 /* We cannot make the determination - default to returning 0. */
2973 return 0;
2976 /* Return nonzero if a 32 bit "long_call" should be generated for
2977 this call. We generate a long_call if the function:
2979 a. has an __attribute__((long call))
2980 or b. is within the scope of a #pragma long_calls
2981 or c. the -mlong-calls command line switch has been specified
2982 . and either:
2983 1. -ffunction-sections is in effect
2984 or 2. the current function has __attribute__ ((section))
2985 or 3. the target function has __attribute__ ((section))
2987 However we do not generate a long call if the function:
2989 d. has an __attribute__ ((short_call))
2990 or e. is inside the scope of a #pragma no_long_calls
2991 or f. is defined within the current compilation unit.
2993 This function will be called by C fragments contained in the machine
2994 description file. SYM_REF and CALL_COOKIE correspond to the matched
2995 rtl operands. CALL_SYMBOL is used to distinguish between
2996 two different callers of the function. It is set to 1 in the
2997 "call_symbol" and "call_symbol_value" patterns and to 0 in the "call"
2998 and "call_value" patterns. This is because of the difference in the
2999 SYM_REFs passed by these patterns. */
3001 arm_is_longcall_p (rtx sym_ref, int call_cookie, int call_symbol)
3003 if (!call_symbol)
3005 if (GET_CODE (sym_ref) != MEM)
3006 return 0;
3008 sym_ref = XEXP (sym_ref, 0);
3011 if (GET_CODE (sym_ref) != SYMBOL_REF)
3012 return 0;
3014 if (call_cookie & CALL_SHORT)
3015 return 0;
3017 if (TARGET_LONG_CALLS)
3019 if (flag_function_sections
3020 || DECL_SECTION_NAME (current_function_decl))
3021 /* c.3 is handled by the definition of the
3022 ARM_DECLARE_FUNCTION_SIZE macro. */
3023 return 1;
3026 if (current_file_function_operand (sym_ref))
3027 return 0;
3029 return (call_cookie & CALL_LONG)
3030 || ENCODED_LONG_CALL_ATTR_P (XSTR (sym_ref, 0))
3031 || TARGET_LONG_CALLS;
3034 /* Return nonzero if it is ok to make a tail-call to DECL. */
3035 static bool
3036 arm_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
3038 int call_type = TARGET_LONG_CALLS ? CALL_LONG : CALL_NORMAL;
3040 if (cfun->machine->sibcall_blocked)
3041 return false;
3043 /* Never tailcall something for which we have no decl, or if we
3044 are in Thumb mode. */
3045 if (decl == NULL || TARGET_THUMB)
3046 return false;
3048 /* Get the calling method. */
3049 if (lookup_attribute ("short_call", TYPE_ATTRIBUTES (TREE_TYPE (decl))))
3050 call_type = CALL_SHORT;
3051 else if (lookup_attribute ("long_call", TYPE_ATTRIBUTES (TREE_TYPE (decl))))
3052 call_type = CALL_LONG;
3054 /* Cannot tail-call to long calls, since these are out of range of
3055 a branch instruction. However, if not compiling PIC, we know
3056 we can reach the symbol if it is in this compilation unit. */
3057 if (call_type == CALL_LONG && (flag_pic || !TREE_ASM_WRITTEN (decl)))
3058 return false;
3060 /* If we are interworking and the function is not declared static
3061 then we can't tail-call it unless we know that it exists in this
3062 compilation unit (since it might be a Thumb routine). */
3063 if (TARGET_INTERWORK && TREE_PUBLIC (decl) && !TREE_ASM_WRITTEN (decl))
3064 return false;
3066 /* Never tailcall from an ISR routine - it needs a special exit sequence. */
3067 if (IS_INTERRUPT (arm_current_func_type ()))
3068 return false;
3070 /* Everything else is ok. */
3071 return true;
3075 /* Addressing mode support functions. */
3077 /* Return nonzero if X is a legitimate immediate operand when compiling
3078 for PIC. */
3080 legitimate_pic_operand_p (rtx x)
3082 if (CONSTANT_P (x)
3083 && flag_pic
3084 && (GET_CODE (x) == SYMBOL_REF
3085 || (GET_CODE (x) == CONST
3086 && GET_CODE (XEXP (x, 0)) == PLUS
3087 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF)))
3088 return 0;
3090 return 1;
3094 legitimize_pic_address (rtx orig, enum machine_mode mode, rtx reg)
3096 if (GET_CODE (orig) == SYMBOL_REF
3097 || GET_CODE (orig) == LABEL_REF)
3099 #ifndef AOF_ASSEMBLER
3100 rtx pic_ref, address;
3101 #endif
3102 rtx insn;
3103 int subregs = 0;
3105 if (reg == 0)
3107 gcc_assert (!no_new_pseudos);
3108 reg = gen_reg_rtx (Pmode);
3110 subregs = 1;
3113 #ifdef AOF_ASSEMBLER
3114 /* The AOF assembler can generate relocations for these directly, and
3115 understands that the PIC register has to be added into the offset. */
3116 insn = emit_insn (gen_pic_load_addr_based (reg, orig));
3117 #else
3118 if (subregs)
3119 address = gen_reg_rtx (Pmode);
3120 else
3121 address = reg;
3123 if (TARGET_ARM)
3124 emit_insn (gen_pic_load_addr_arm (address, orig));
3125 else
3126 emit_insn (gen_pic_load_addr_thumb (address, orig));
3128 if ((GET_CODE (orig) == LABEL_REF
3129 || (GET_CODE (orig) == SYMBOL_REF &&
3130 SYMBOL_REF_LOCAL_P (orig)))
3131 && NEED_GOT_RELOC)
3132 pic_ref = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, address);
3133 else
3135 pic_ref = gen_const_mem (Pmode,
3136 gen_rtx_PLUS (Pmode, pic_offset_table_rtx,
3137 address));
3140 insn = emit_move_insn (reg, pic_ref);
3141 #endif
3142 current_function_uses_pic_offset_table = 1;
3143 /* Put a REG_EQUAL note on this insn, so that it can be optimized
3144 by loop. */
3145 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EQUAL, orig,
3146 REG_NOTES (insn));
3147 return reg;
3149 else if (GET_CODE (orig) == CONST)
3151 rtx base, offset;
3153 if (GET_CODE (XEXP (orig, 0)) == PLUS
3154 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
3155 return orig;
3157 if (reg == 0)
3159 gcc_assert (!no_new_pseudos);
3160 reg = gen_reg_rtx (Pmode);
3163 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
3165 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
3166 offset = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
3167 base == reg ? 0 : reg);
3169 if (GET_CODE (offset) == CONST_INT)
3171 /* The base register doesn't really matter, we only want to
3172 test the index for the appropriate mode. */
3173 if (!arm_legitimate_index_p (mode, offset, SET, 0))
3175 gcc_assert (!no_new_pseudos);
3176 offset = force_reg (Pmode, offset);
3179 if (GET_CODE (offset) == CONST_INT)
3180 return plus_constant (base, INTVAL (offset));
3183 if (GET_MODE_SIZE (mode) > 4
3184 && (GET_MODE_CLASS (mode) == MODE_INT
3185 || TARGET_SOFT_FLOAT))
3187 emit_insn (gen_addsi3 (reg, base, offset));
3188 return reg;
3191 return gen_rtx_PLUS (Pmode, base, offset);
3194 return orig;
3198 /* Find a spare low register to use during the prolog of a function. */
3200 static int
3201 thumb_find_work_register (unsigned long pushed_regs_mask)
3203 int reg;
3205 /* Check the argument registers first as these are call-used. The
3206 register allocation order means that sometimes r3 might be used
3207 but earlier argument registers might not, so check them all. */
3208 for (reg = LAST_ARG_REGNUM; reg >= 0; reg --)
3209 if (!regs_ever_live[reg])
3210 return reg;
3212 /* Before going on to check the call-saved registers we can try a couple
3213 more ways of deducing that r3 is available. The first is when we are
3214 pushing anonymous arguments onto the stack and we have less than 4
3215 registers worth of fixed arguments(*). In this case r3 will be part of
3216 the variable argument list and so we can be sure that it will be
3217 pushed right at the start of the function. Hence it will be available
3218 for the rest of the prologue.
3219 (*): ie current_function_pretend_args_size is greater than 0. */
3220 if (cfun->machine->uses_anonymous_args
3221 && current_function_pretend_args_size > 0)
3222 return LAST_ARG_REGNUM;
3224 /* The other case is when we have fixed arguments but less than 4 registers
3225 worth. In this case r3 might be used in the body of the function, but
3226 it is not being used to convey an argument into the function. In theory
3227 we could just check current_function_args_size to see how many bytes are
3228 being passed in argument registers, but it seems that it is unreliable.
3229 Sometimes it will have the value 0 when in fact arguments are being
3230 passed. (See testcase execute/20021111-1.c for an example). So we also
3231 check the args_info.nregs field as well. The problem with this field is
3232 that it makes no allowances for arguments that are passed to the
3233 function but which are not used. Hence we could miss an opportunity
3234 when a function has an unused argument in r3. But it is better to be
3235 safe than to be sorry. */
3236 if (! cfun->machine->uses_anonymous_args
3237 && current_function_args_size >= 0
3238 && current_function_args_size <= (LAST_ARG_REGNUM * UNITS_PER_WORD)
3239 && cfun->args_info.nregs < 4)
3240 return LAST_ARG_REGNUM;
3242 /* Otherwise look for a call-saved register that is going to be pushed. */
3243 for (reg = LAST_LO_REGNUM; reg > LAST_ARG_REGNUM; reg --)
3244 if (pushed_regs_mask & (1 << reg))
3245 return reg;
3247 /* Something went wrong - thumb_compute_save_reg_mask()
3248 should have arranged for a suitable register to be pushed. */
3249 gcc_unreachable ();
3253 /* Generate code to load the PIC register. In thumb mode SCRATCH is a
3254 low register. */
3256 void
3257 arm_load_pic_register (unsigned int scratch)
3259 #ifndef AOF_ASSEMBLER
3260 rtx l1, pic_tmp, pic_tmp2, pic_rtx;
3261 rtx global_offset_table;
3263 if (current_function_uses_pic_offset_table == 0 || TARGET_SINGLE_PIC_BASE)
3264 return;
3266 gcc_assert (flag_pic);
3268 l1 = gen_label_rtx ();
3270 global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
3271 /* On the ARM the PC register contains 'dot + 8' at the time of the
3272 addition, on the Thumb it is 'dot + 4'. */
3273 pic_tmp = plus_constant (gen_rtx_LABEL_REF (Pmode, l1), TARGET_ARM ? 8 : 4);
3274 if (GOT_PCREL)
3275 pic_tmp2 = gen_rtx_CONST (VOIDmode,
3276 gen_rtx_PLUS (Pmode, global_offset_table, pc_rtx));
3277 else
3278 pic_tmp2 = gen_rtx_CONST (VOIDmode, global_offset_table);
3280 pic_rtx = gen_rtx_CONST (Pmode, gen_rtx_MINUS (Pmode, pic_tmp2, pic_tmp));
3282 if (TARGET_ARM)
3284 emit_insn (gen_pic_load_addr_arm (pic_offset_table_rtx, pic_rtx));
3285 emit_insn (gen_pic_add_dot_plus_eight (pic_offset_table_rtx, l1));
3287 else
3289 if (REGNO (pic_offset_table_rtx) > LAST_LO_REGNUM)
3291 /* We will have pushed the pic register, so should always be
3292 able to find a work register. */
3293 pic_tmp = gen_rtx_REG (SImode, scratch);
3294 emit_insn (gen_pic_load_addr_thumb (pic_tmp, pic_rtx));
3295 emit_insn (gen_movsi (pic_offset_table_rtx, pic_tmp));
3297 else
3298 emit_insn (gen_pic_load_addr_thumb (pic_offset_table_rtx, pic_rtx));
3299 emit_insn (gen_pic_add_dot_plus_four (pic_offset_table_rtx, l1));
3302 /* Need to emit this whether or not we obey regdecls,
3303 since setjmp/longjmp can cause life info to screw up. */
3304 emit_insn (gen_rtx_USE (VOIDmode, pic_offset_table_rtx));
3305 #endif /* AOF_ASSEMBLER */
3309 /* Return nonzero if X is valid as an ARM state addressing register. */
3310 static int
3311 arm_address_register_rtx_p (rtx x, int strict_p)
3313 int regno;
3315 if (GET_CODE (x) != REG)
3316 return 0;
3318 regno = REGNO (x);
3320 if (strict_p)
3321 return ARM_REGNO_OK_FOR_BASE_P (regno);
3323 return (regno <= LAST_ARM_REGNUM
3324 || regno >= FIRST_PSEUDO_REGISTER
3325 || regno == FRAME_POINTER_REGNUM
3326 || regno == ARG_POINTER_REGNUM);
3329 /* Return nonzero if X is a valid ARM state address operand. */
3331 arm_legitimate_address_p (enum machine_mode mode, rtx x, RTX_CODE outer,
3332 int strict_p)
3334 bool use_ldrd;
3335 enum rtx_code code = GET_CODE (x);
3337 if (arm_address_register_rtx_p (x, strict_p))
3338 return 1;
3340 use_ldrd = (TARGET_LDRD
3341 && (mode == DImode
3342 || (mode == DFmode && (TARGET_SOFT_FLOAT || TARGET_VFP))));
3344 if (code == POST_INC || code == PRE_DEC
3345 || ((code == PRE_INC || code == POST_DEC)
3346 && (use_ldrd || GET_MODE_SIZE (mode) <= 4)))
3347 return arm_address_register_rtx_p (XEXP (x, 0), strict_p);
3349 else if ((code == POST_MODIFY || code == PRE_MODIFY)
3350 && arm_address_register_rtx_p (XEXP (x, 0), strict_p)
3351 && GET_CODE (XEXP (x, 1)) == PLUS
3352 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
3354 rtx addend = XEXP (XEXP (x, 1), 1);
3356 /* Don't allow ldrd post increment by register because it's hard
3357 to fixup invalid register choices. */
3358 if (use_ldrd
3359 && GET_CODE (x) == POST_MODIFY
3360 && GET_CODE (addend) == REG)
3361 return 0;
3363 return ((use_ldrd || GET_MODE_SIZE (mode) <= 4)
3364 && arm_legitimate_index_p (mode, addend, outer, strict_p));
3367 /* After reload constants split into minipools will have addresses
3368 from a LABEL_REF. */
3369 else if (reload_completed
3370 && (code == LABEL_REF
3371 || (code == CONST
3372 && GET_CODE (XEXP (x, 0)) == PLUS
3373 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
3374 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
3375 return 1;
3377 else if (mode == TImode)
3378 return 0;
3380 else if (code == PLUS)
3382 rtx xop0 = XEXP (x, 0);
3383 rtx xop1 = XEXP (x, 1);
3385 return ((arm_address_register_rtx_p (xop0, strict_p)
3386 && arm_legitimate_index_p (mode, xop1, outer, strict_p))
3387 || (arm_address_register_rtx_p (xop1, strict_p)
3388 && arm_legitimate_index_p (mode, xop0, outer, strict_p)));
3391 #if 0
3392 /* Reload currently can't handle MINUS, so disable this for now */
3393 else if (GET_CODE (x) == MINUS)
3395 rtx xop0 = XEXP (x, 0);
3396 rtx xop1 = XEXP (x, 1);
3398 return (arm_address_register_rtx_p (xop0, strict_p)
3399 && arm_legitimate_index_p (mode, xop1, outer, strict_p));
3401 #endif
3403 else if (GET_MODE_CLASS (mode) != MODE_FLOAT
3404 && code == SYMBOL_REF
3405 && CONSTANT_POOL_ADDRESS_P (x)
3406 && ! (flag_pic
3407 && symbol_mentioned_p (get_pool_constant (x))))
3408 return 1;
3410 return 0;
3413 /* Return nonzero if INDEX is valid for an address index operand in
3414 ARM state. */
3415 static int
3416 arm_legitimate_index_p (enum machine_mode mode, rtx index, RTX_CODE outer,
3417 int strict_p)
3419 HOST_WIDE_INT range;
3420 enum rtx_code code = GET_CODE (index);
3422 /* Standard coprocessor addressing modes. */
3423 if (TARGET_HARD_FLOAT
3424 && (TARGET_FPA || TARGET_MAVERICK)
3425 && (GET_MODE_CLASS (mode) == MODE_FLOAT
3426 || (TARGET_MAVERICK && mode == DImode)))
3427 return (code == CONST_INT && INTVAL (index) < 1024
3428 && INTVAL (index) > -1024
3429 && (INTVAL (index) & 3) == 0);
3431 if (TARGET_REALLY_IWMMXT && VALID_IWMMXT_REG_MODE (mode))
3432 return (code == CONST_INT
3433 && INTVAL (index) < 1024
3434 && INTVAL (index) > -1024
3435 && (INTVAL (index) & 3) == 0);
3437 if (arm_address_register_rtx_p (index, strict_p)
3438 && (GET_MODE_SIZE (mode) <= 4))
3439 return 1;
3441 if (mode == DImode || mode == DFmode)
3443 if (code == CONST_INT)
3445 HOST_WIDE_INT val = INTVAL (index);
3447 if (TARGET_LDRD)
3448 return val > -256 && val < 256;
3449 else
3450 return val > -4096 && val < 4092;
3453 return TARGET_LDRD && arm_address_register_rtx_p (index, strict_p);
3456 if (GET_MODE_SIZE (mode) <= 4
3457 && ! (arm_arch4
3458 && (mode == HImode
3459 || (mode == QImode && outer == SIGN_EXTEND))))
3461 if (code == MULT)
3463 rtx xiop0 = XEXP (index, 0);
3464 rtx xiop1 = XEXP (index, 1);
3466 return ((arm_address_register_rtx_p (xiop0, strict_p)
3467 && power_of_two_operand (xiop1, SImode))
3468 || (arm_address_register_rtx_p (xiop1, strict_p)
3469 && power_of_two_operand (xiop0, SImode)));
3471 else if (code == LSHIFTRT || code == ASHIFTRT
3472 || code == ASHIFT || code == ROTATERT)
3474 rtx op = XEXP (index, 1);
3476 return (arm_address_register_rtx_p (XEXP (index, 0), strict_p)
3477 && GET_CODE (op) == CONST_INT
3478 && INTVAL (op) > 0
3479 && INTVAL (op) <= 31);
3483 /* For ARM v4 we may be doing a sign-extend operation during the
3484 load. */
3485 if (arm_arch4)
3487 if (mode == HImode || (outer == SIGN_EXTEND && mode == QImode))
3488 range = 256;
3489 else
3490 range = 4096;
3492 else
3493 range = (mode == HImode) ? 4095 : 4096;
3495 return (code == CONST_INT
3496 && INTVAL (index) < range
3497 && INTVAL (index) > -range);
3500 /* Return nonzero if X is valid as a Thumb state base register. */
3501 static int
3502 thumb_base_register_rtx_p (rtx x, enum machine_mode mode, int strict_p)
3504 int regno;
3506 if (GET_CODE (x) != REG)
3507 return 0;
3509 regno = REGNO (x);
3511 if (strict_p)
3512 return THUMB_REGNO_MODE_OK_FOR_BASE_P (regno, mode);
3514 return (regno <= LAST_LO_REGNUM
3515 || regno > LAST_VIRTUAL_REGISTER
3516 || regno == FRAME_POINTER_REGNUM
3517 || (GET_MODE_SIZE (mode) >= 4
3518 && (regno == STACK_POINTER_REGNUM
3519 || regno >= FIRST_PSEUDO_REGISTER
3520 || x == hard_frame_pointer_rtx
3521 || x == arg_pointer_rtx)));
3524 /* Return nonzero if x is a legitimate index register. This is the case
3525 for any base register that can access a QImode object. */
3526 inline static int
3527 thumb_index_register_rtx_p (rtx x, int strict_p)
3529 return thumb_base_register_rtx_p (x, QImode, strict_p);
3532 /* Return nonzero if x is a legitimate Thumb-state address.
3534 The AP may be eliminated to either the SP or the FP, so we use the
3535 least common denominator, e.g. SImode, and offsets from 0 to 64.
3537 ??? Verify whether the above is the right approach.
3539 ??? Also, the FP may be eliminated to the SP, so perhaps that
3540 needs special handling also.
3542 ??? Look at how the mips16 port solves this problem. It probably uses
3543 better ways to solve some of these problems.
3545 Although it is not incorrect, we don't accept QImode and HImode
3546 addresses based on the frame pointer or arg pointer until the
3547 reload pass starts. This is so that eliminating such addresses
3548 into stack based ones won't produce impossible code. */
3550 thumb_legitimate_address_p (enum machine_mode mode, rtx x, int strict_p)
3552 /* ??? Not clear if this is right. Experiment. */
3553 if (GET_MODE_SIZE (mode) < 4
3554 && !(reload_in_progress || reload_completed)
3555 && (reg_mentioned_p (frame_pointer_rtx, x)
3556 || reg_mentioned_p (arg_pointer_rtx, x)
3557 || reg_mentioned_p (virtual_incoming_args_rtx, x)
3558 || reg_mentioned_p (virtual_outgoing_args_rtx, x)
3559 || reg_mentioned_p (virtual_stack_dynamic_rtx, x)
3560 || reg_mentioned_p (virtual_stack_vars_rtx, x)))
3561 return 0;
3563 /* Accept any base register. SP only in SImode or larger. */
3564 else if (thumb_base_register_rtx_p (x, mode, strict_p))
3565 return 1;
3567 /* This is PC relative data before arm_reorg runs. */
3568 else if (GET_MODE_SIZE (mode) >= 4 && CONSTANT_P (x)
3569 && GET_CODE (x) == SYMBOL_REF
3570 && CONSTANT_POOL_ADDRESS_P (x) && ! flag_pic)
3571 return 1;
3573 /* This is PC relative data after arm_reorg runs. */
3574 else if (GET_MODE_SIZE (mode) >= 4 && reload_completed
3575 && (GET_CODE (x) == LABEL_REF
3576 || (GET_CODE (x) == CONST
3577 && GET_CODE (XEXP (x, 0)) == PLUS
3578 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
3579 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
3580 return 1;
3582 /* Post-inc indexing only supported for SImode and larger. */
3583 else if (GET_CODE (x) == POST_INC && GET_MODE_SIZE (mode) >= 4
3584 && thumb_index_register_rtx_p (XEXP (x, 0), strict_p))
3585 return 1;
3587 else if (GET_CODE (x) == PLUS)
3589 /* REG+REG address can be any two index registers. */
3590 /* We disallow FRAME+REG addressing since we know that FRAME
3591 will be replaced with STACK, and SP relative addressing only
3592 permits SP+OFFSET. */
3593 if (GET_MODE_SIZE (mode) <= 4
3594 && XEXP (x, 0) != frame_pointer_rtx
3595 && XEXP (x, 1) != frame_pointer_rtx
3596 && thumb_index_register_rtx_p (XEXP (x, 0), strict_p)
3597 && thumb_index_register_rtx_p (XEXP (x, 1), strict_p))
3598 return 1;
3600 /* REG+const has 5-7 bit offset for non-SP registers. */
3601 else if ((thumb_index_register_rtx_p (XEXP (x, 0), strict_p)
3602 || XEXP (x, 0) == arg_pointer_rtx)
3603 && GET_CODE (XEXP (x, 1)) == CONST_INT
3604 && thumb_legitimate_offset_p (mode, INTVAL (XEXP (x, 1))))
3605 return 1;
3607 /* REG+const has 10 bit offset for SP, but only SImode and
3608 larger is supported. */
3609 /* ??? Should probably check for DI/DFmode overflow here
3610 just like GO_IF_LEGITIMATE_OFFSET does. */
3611 else if (GET_CODE (XEXP (x, 0)) == REG
3612 && REGNO (XEXP (x, 0)) == STACK_POINTER_REGNUM
3613 && GET_MODE_SIZE (mode) >= 4
3614 && GET_CODE (XEXP (x, 1)) == CONST_INT
3615 && INTVAL (XEXP (x, 1)) >= 0
3616 && INTVAL (XEXP (x, 1)) + GET_MODE_SIZE (mode) <= 1024
3617 && (INTVAL (XEXP (x, 1)) & 3) == 0)
3618 return 1;
3620 else if (GET_CODE (XEXP (x, 0)) == REG
3621 && REGNO (XEXP (x, 0)) == FRAME_POINTER_REGNUM
3622 && GET_MODE_SIZE (mode) >= 4
3623 && GET_CODE (XEXP (x, 1)) == CONST_INT
3624 && (INTVAL (XEXP (x, 1)) & 3) == 0)
3625 return 1;
3628 else if (GET_MODE_CLASS (mode) != MODE_FLOAT
3629 && GET_MODE_SIZE (mode) == 4
3630 && GET_CODE (x) == SYMBOL_REF
3631 && CONSTANT_POOL_ADDRESS_P (x)
3632 && !(flag_pic
3633 && symbol_mentioned_p (get_pool_constant (x))))
3634 return 1;
3636 return 0;
3639 /* Return nonzero if VAL can be used as an offset in a Thumb-state address
3640 instruction of mode MODE. */
3642 thumb_legitimate_offset_p (enum machine_mode mode, HOST_WIDE_INT val)
3644 switch (GET_MODE_SIZE (mode))
3646 case 1:
3647 return val >= 0 && val < 32;
3649 case 2:
3650 return val >= 0 && val < 64 && (val & 1) == 0;
3652 default:
3653 return (val >= 0
3654 && (val + GET_MODE_SIZE (mode)) <= 128
3655 && (val & 3) == 0);
3659 /* Try machine-dependent ways of modifying an illegitimate address
3660 to be legitimate. If we find one, return the new, valid address. */
3662 arm_legitimize_address (rtx x, rtx orig_x, enum machine_mode mode)
3664 if (GET_CODE (x) == PLUS)
3666 rtx xop0 = XEXP (x, 0);
3667 rtx xop1 = XEXP (x, 1);
3669 if (CONSTANT_P (xop0) && !symbol_mentioned_p (xop0))
3670 xop0 = force_reg (SImode, xop0);
3672 if (CONSTANT_P (xop1) && !symbol_mentioned_p (xop1))
3673 xop1 = force_reg (SImode, xop1);
3675 if (ARM_BASE_REGISTER_RTX_P (xop0)
3676 && GET_CODE (xop1) == CONST_INT)
3678 HOST_WIDE_INT n, low_n;
3679 rtx base_reg, val;
3680 n = INTVAL (xop1);
3682 /* VFP addressing modes actually allow greater offsets, but for
3683 now we just stick with the lowest common denominator. */
3684 if (mode == DImode
3685 || ((TARGET_SOFT_FLOAT || TARGET_VFP) && mode == DFmode))
3687 low_n = n & 0x0f;
3688 n &= ~0x0f;
3689 if (low_n > 4)
3691 n += 16;
3692 low_n -= 16;
3695 else
3697 low_n = ((mode) == TImode ? 0
3698 : n >= 0 ? (n & 0xfff) : -((-n) & 0xfff));
3699 n -= low_n;
3702 base_reg = gen_reg_rtx (SImode);
3703 val = force_operand (gen_rtx_PLUS (SImode, xop0,
3704 GEN_INT (n)), NULL_RTX);
3705 emit_move_insn (base_reg, val);
3706 x = (low_n == 0 ? base_reg
3707 : gen_rtx_PLUS (SImode, base_reg, GEN_INT (low_n)));
3709 else if (xop0 != XEXP (x, 0) || xop1 != XEXP (x, 1))
3710 x = gen_rtx_PLUS (SImode, xop0, xop1);
3713 /* XXX We don't allow MINUS any more -- see comment in
3714 arm_legitimate_address_p (). */
3715 else if (GET_CODE (x) == MINUS)
3717 rtx xop0 = XEXP (x, 0);
3718 rtx xop1 = XEXP (x, 1);
3720 if (CONSTANT_P (xop0))
3721 xop0 = force_reg (SImode, xop0);
3723 if (CONSTANT_P (xop1) && ! symbol_mentioned_p (xop1))
3724 xop1 = force_reg (SImode, xop1);
3726 if (xop0 != XEXP (x, 0) || xop1 != XEXP (x, 1))
3727 x = gen_rtx_MINUS (SImode, xop0, xop1);
3730 if (flag_pic)
3732 /* We need to find and carefully transform any SYMBOL and LABEL
3733 references; so go back to the original address expression. */
3734 rtx new_x = legitimize_pic_address (orig_x, mode, NULL_RTX);
3736 if (new_x != orig_x)
3737 x = new_x;
3740 return x;
3744 /* Try machine-dependent ways of modifying an illegitimate Thumb address
3745 to be legitimate. If we find one, return the new, valid address. */
3747 thumb_legitimize_address (rtx x, rtx orig_x, enum machine_mode mode)
3749 if (GET_CODE (x) == PLUS
3750 && GET_CODE (XEXP (x, 1)) == CONST_INT
3751 && (INTVAL (XEXP (x, 1)) >= 32 * GET_MODE_SIZE (mode)
3752 || INTVAL (XEXP (x, 1)) < 0))
3754 rtx xop0 = XEXP (x, 0);
3755 rtx xop1 = XEXP (x, 1);
3756 HOST_WIDE_INT offset = INTVAL (xop1);
3758 /* Try and fold the offset into a biasing of the base register and
3759 then offsetting that. Don't do this when optimizing for space
3760 since it can cause too many CSEs. */
3761 if (optimize_size && offset >= 0
3762 && offset < 256 + 31 * GET_MODE_SIZE (mode))
3764 HOST_WIDE_INT delta;
3766 if (offset >= 256)
3767 delta = offset - (256 - GET_MODE_SIZE (mode));
3768 else if (offset < 32 * GET_MODE_SIZE (mode) + 8)
3769 delta = 31 * GET_MODE_SIZE (mode);
3770 else
3771 delta = offset & (~31 * GET_MODE_SIZE (mode));
3773 xop0 = force_operand (plus_constant (xop0, offset - delta),
3774 NULL_RTX);
3775 x = plus_constant (xop0, delta);
3777 else if (offset < 0 && offset > -256)
3778 /* Small negative offsets are best done with a subtract before the
3779 dereference, forcing these into a register normally takes two
3780 instructions. */
3781 x = force_operand (x, NULL_RTX);
3782 else
3784 /* For the remaining cases, force the constant into a register. */
3785 xop1 = force_reg (SImode, xop1);
3786 x = gen_rtx_PLUS (SImode, xop0, xop1);
3789 else if (GET_CODE (x) == PLUS
3790 && s_register_operand (XEXP (x, 1), SImode)
3791 && !s_register_operand (XEXP (x, 0), SImode))
3793 rtx xop0 = force_operand (XEXP (x, 0), NULL_RTX);
3795 x = gen_rtx_PLUS (SImode, xop0, XEXP (x, 1));
3798 if (flag_pic)
3800 /* We need to find and carefully transform any SYMBOL and LABEL
3801 references; so go back to the original address expression. */
3802 rtx new_x = legitimize_pic_address (orig_x, mode, NULL_RTX);
3804 if (new_x != orig_x)
3805 x = new_x;
3808 return x;
3813 #define REG_OR_SUBREG_REG(X) \
3814 (GET_CODE (X) == REG \
3815 || (GET_CODE (X) == SUBREG && GET_CODE (SUBREG_REG (X)) == REG))
3817 #define REG_OR_SUBREG_RTX(X) \
3818 (GET_CODE (X) == REG ? (X) : SUBREG_REG (X))
3820 #ifndef COSTS_N_INSNS
3821 #define COSTS_N_INSNS(N) ((N) * 4 - 2)
3822 #endif
3823 static inline int
3824 thumb_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer)
3826 enum machine_mode mode = GET_MODE (x);
3828 switch (code)
3830 case ASHIFT:
3831 case ASHIFTRT:
3832 case LSHIFTRT:
3833 case ROTATERT:
3834 case PLUS:
3835 case MINUS:
3836 case COMPARE:
3837 case NEG:
3838 case NOT:
3839 return COSTS_N_INSNS (1);
3841 case MULT:
3842 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
3844 int cycles = 0;
3845 unsigned HOST_WIDE_INT i = INTVAL (XEXP (x, 1));
3847 while (i)
3849 i >>= 2;
3850 cycles++;
3852 return COSTS_N_INSNS (2) + cycles;
3854 return COSTS_N_INSNS (1) + 16;
3856 case SET:
3857 return (COSTS_N_INSNS (1)
3858 + 4 * ((GET_CODE (SET_SRC (x)) == MEM)
3859 + GET_CODE (SET_DEST (x)) == MEM));
3861 case CONST_INT:
3862 if (outer == SET)
3864 if ((unsigned HOST_WIDE_INT) INTVAL (x) < 256)
3865 return 0;
3866 if (thumb_shiftable_const (INTVAL (x)))
3867 return COSTS_N_INSNS (2);
3868 return COSTS_N_INSNS (3);
3870 else if ((outer == PLUS || outer == COMPARE)
3871 && INTVAL (x) < 256 && INTVAL (x) > -256)
3872 return 0;
3873 else if (outer == AND
3874 && INTVAL (x) < 256 && INTVAL (x) >= -256)
3875 return COSTS_N_INSNS (1);
3876 else if (outer == ASHIFT || outer == ASHIFTRT
3877 || outer == LSHIFTRT)
3878 return 0;
3879 return COSTS_N_INSNS (2);
3881 case CONST:
3882 case CONST_DOUBLE:
3883 case LABEL_REF:
3884 case SYMBOL_REF:
3885 return COSTS_N_INSNS (3);
3887 case UDIV:
3888 case UMOD:
3889 case DIV:
3890 case MOD:
3891 return 100;
3893 case TRUNCATE:
3894 return 99;
3896 case AND:
3897 case XOR:
3898 case IOR:
3899 /* XXX guess. */
3900 return 8;
3902 case MEM:
3903 /* XXX another guess. */
3904 /* Memory costs quite a lot for the first word, but subsequent words
3905 load at the equivalent of a single insn each. */
3906 return (10 + 4 * ((GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD)
3907 + ((GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
3908 ? 4 : 0));
3910 case IF_THEN_ELSE:
3911 /* XXX a guess. */
3912 if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
3913 return 14;
3914 return 2;
3916 case ZERO_EXTEND:
3917 /* XXX still guessing. */
3918 switch (GET_MODE (XEXP (x, 0)))
3920 case QImode:
3921 return (1 + (mode == DImode ? 4 : 0)
3922 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3924 case HImode:
3925 return (4 + (mode == DImode ? 4 : 0)
3926 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3928 case SImode:
3929 return (1 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3931 default:
3932 return 99;
3935 default:
3936 return 99;
3941 /* Worker routine for arm_rtx_costs. */
3942 static inline int
3943 arm_rtx_costs_1 (rtx x, enum rtx_code code, enum rtx_code outer)
3945 enum machine_mode mode = GET_MODE (x);
3946 enum rtx_code subcode;
3947 int extra_cost;
3949 switch (code)
3951 case MEM:
3952 /* Memory costs quite a lot for the first word, but subsequent words
3953 load at the equivalent of a single insn each. */
3954 return (10 + 4 * ((GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD)
3955 + (GET_CODE (x) == SYMBOL_REF
3956 && CONSTANT_POOL_ADDRESS_P (x) ? 4 : 0));
3958 case DIV:
3959 case MOD:
3960 case UDIV:
3961 case UMOD:
3962 return optimize_size ? COSTS_N_INSNS (2) : 100;
3964 case ROTATE:
3965 if (mode == SImode && GET_CODE (XEXP (x, 1)) == REG)
3966 return 4;
3967 /* Fall through */
3968 case ROTATERT:
3969 if (mode != SImode)
3970 return 8;
3971 /* Fall through */
3972 case ASHIFT: case LSHIFTRT: case ASHIFTRT:
3973 if (mode == DImode)
3974 return (8 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : 8)
3975 + ((GET_CODE (XEXP (x, 0)) == REG
3976 || (GET_CODE (XEXP (x, 0)) == SUBREG
3977 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG))
3978 ? 0 : 8));
3979 return (1 + ((GET_CODE (XEXP (x, 0)) == REG
3980 || (GET_CODE (XEXP (x, 0)) == SUBREG
3981 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG))
3982 ? 0 : 4)
3983 + ((GET_CODE (XEXP (x, 1)) == REG
3984 || (GET_CODE (XEXP (x, 1)) == SUBREG
3985 && GET_CODE (SUBREG_REG (XEXP (x, 1))) == REG)
3986 || (GET_CODE (XEXP (x, 1)) == CONST_INT))
3987 ? 0 : 4));
3989 case MINUS:
3990 if (mode == DImode)
3991 return (4 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 8)
3992 + ((REG_OR_SUBREG_REG (XEXP (x, 0))
3993 || (GET_CODE (XEXP (x, 0)) == CONST_INT
3994 && const_ok_for_arm (INTVAL (XEXP (x, 0)))))
3995 ? 0 : 8));
3997 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
3998 return (2 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
3999 || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
4000 && arm_const_double_rtx (XEXP (x, 1))))
4001 ? 0 : 8)
4002 + ((REG_OR_SUBREG_REG (XEXP (x, 0))
4003 || (GET_CODE (XEXP (x, 0)) == CONST_DOUBLE
4004 && arm_const_double_rtx (XEXP (x, 0))))
4005 ? 0 : 8));
4007 if (((GET_CODE (XEXP (x, 0)) == CONST_INT
4008 && const_ok_for_arm (INTVAL (XEXP (x, 0)))
4009 && REG_OR_SUBREG_REG (XEXP (x, 1))))
4010 || (((subcode = GET_CODE (XEXP (x, 1))) == ASHIFT
4011 || subcode == ASHIFTRT || subcode == LSHIFTRT
4012 || subcode == ROTATE || subcode == ROTATERT
4013 || (subcode == MULT
4014 && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
4015 && ((INTVAL (XEXP (XEXP (x, 1), 1)) &
4016 (INTVAL (XEXP (XEXP (x, 1), 1)) - 1)) == 0)))
4017 && REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 0))
4018 && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 1))
4019 || GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT)
4020 && REG_OR_SUBREG_REG (XEXP (x, 0))))
4021 return 1;
4022 /* Fall through */
4024 case PLUS:
4025 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
4026 return (2 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8)
4027 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
4028 || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
4029 && arm_const_double_rtx (XEXP (x, 1))))
4030 ? 0 : 8));
4032 /* Fall through */
4033 case AND: case XOR: case IOR:
4034 extra_cost = 0;
4036 /* Normally the frame registers will be spilt into reg+const during
4037 reload, so it is a bad idea to combine them with other instructions,
4038 since then they might not be moved outside of loops. As a compromise
4039 we allow integration with ops that have a constant as their second
4040 operand. */
4041 if ((REG_OR_SUBREG_REG (XEXP (x, 0))
4042 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0)))
4043 && GET_CODE (XEXP (x, 1)) != CONST_INT)
4044 || (REG_OR_SUBREG_REG (XEXP (x, 0))
4045 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0)))))
4046 extra_cost = 4;
4048 if (mode == DImode)
4049 return (4 + extra_cost + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8)
4050 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
4051 || (GET_CODE (XEXP (x, 1)) == CONST_INT
4052 && const_ok_for_op (INTVAL (XEXP (x, 1)), code)))
4053 ? 0 : 8));
4055 if (REG_OR_SUBREG_REG (XEXP (x, 0)))
4056 return (1 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : extra_cost)
4057 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
4058 || (GET_CODE (XEXP (x, 1)) == CONST_INT
4059 && const_ok_for_op (INTVAL (XEXP (x, 1)), code)))
4060 ? 0 : 4));
4062 else if (REG_OR_SUBREG_REG (XEXP (x, 1)))
4063 return (1 + extra_cost
4064 + ((((subcode = GET_CODE (XEXP (x, 0))) == ASHIFT
4065 || subcode == LSHIFTRT || subcode == ASHIFTRT
4066 || subcode == ROTATE || subcode == ROTATERT
4067 || (subcode == MULT
4068 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
4069 && ((INTVAL (XEXP (XEXP (x, 0), 1)) &
4070 (INTVAL (XEXP (XEXP (x, 0), 1)) - 1)) == 0)))
4071 && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 0)))
4072 && ((REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 1)))
4073 || GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT))
4074 ? 0 : 4));
4076 return 8;
4078 case MULT:
4079 /* This should have been handled by the CPU specific routines. */
4080 gcc_unreachable ();
4082 case TRUNCATE:
4083 if (arm_arch3m && mode == SImode
4084 && GET_CODE (XEXP (x, 0)) == LSHIFTRT
4085 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
4086 && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0))
4087 == GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)))
4088 && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == ZERO_EXTEND
4089 || GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == SIGN_EXTEND))
4090 return 8;
4091 return 99;
4093 case NEG:
4094 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
4095 return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 6);
4096 /* Fall through */
4097 case NOT:
4098 if (mode == DImode)
4099 return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4);
4101 return 1 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4);
4103 case IF_THEN_ELSE:
4104 if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
4105 return 14;
4106 return 2;
4108 case COMPARE:
4109 return 1;
4111 case ABS:
4112 return 4 + (mode == DImode ? 4 : 0);
4114 case SIGN_EXTEND:
4115 if (GET_MODE (XEXP (x, 0)) == QImode)
4116 return (4 + (mode == DImode ? 4 : 0)
4117 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4118 /* Fall through */
4119 case ZERO_EXTEND:
4120 switch (GET_MODE (XEXP (x, 0)))
4122 case QImode:
4123 return (1 + (mode == DImode ? 4 : 0)
4124 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4126 case HImode:
4127 return (4 + (mode == DImode ? 4 : 0)
4128 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4130 case SImode:
4131 return (1 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4133 case V8QImode:
4134 case V4HImode:
4135 case V2SImode:
4136 case V4QImode:
4137 case V2HImode:
4138 return 1;
4140 default:
4141 gcc_unreachable ();
4143 gcc_unreachable ();
4145 case CONST_INT:
4146 if (const_ok_for_arm (INTVAL (x)))
4147 return outer == SET ? 2 : -1;
4148 else if (outer == AND
4149 && const_ok_for_arm (~INTVAL (x)))
4150 return -1;
4151 else if ((outer == COMPARE
4152 || outer == PLUS || outer == MINUS)
4153 && const_ok_for_arm (-INTVAL (x)))
4154 return -1;
4155 else
4156 return 5;
4158 case CONST:
4159 case LABEL_REF:
4160 case SYMBOL_REF:
4161 return 6;
4163 case CONST_DOUBLE:
4164 if (arm_const_double_rtx (x))
4165 return outer == SET ? 2 : -1;
4166 else if ((outer == COMPARE || outer == PLUS)
4167 && neg_const_double_rtx_ok_for_fpa (x))
4168 return -1;
4169 return 7;
4171 default:
4172 return 99;
4176 /* RTX costs when optimizing for size. */
4177 static bool
4178 arm_size_rtx_costs (rtx x, int code, int outer_code, int *total)
4180 enum machine_mode mode = GET_MODE (x);
4182 if (TARGET_THUMB)
4184 /* XXX TBD. For now, use the standard costs. */
4185 *total = thumb_rtx_costs (x, code, outer_code);
4186 return true;
4189 switch (code)
4191 case MEM:
4192 /* A memory access costs 1 insn if the mode is small, or the address is
4193 a single register, otherwise it costs one insn per word. */
4194 if (REG_P (XEXP (x, 0)))
4195 *total = COSTS_N_INSNS (1);
4196 else
4197 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4198 return true;
4200 case DIV:
4201 case MOD:
4202 case UDIV:
4203 case UMOD:
4204 /* Needs a libcall, so it costs about this. */
4205 *total = COSTS_N_INSNS (2);
4206 return false;
4208 case ROTATE:
4209 if (mode == SImode && GET_CODE (XEXP (x, 1)) == REG)
4211 *total = COSTS_N_INSNS (2) + rtx_cost (XEXP (x, 0), code);
4212 return true;
4214 /* Fall through */
4215 case ROTATERT:
4216 case ASHIFT:
4217 case LSHIFTRT:
4218 case ASHIFTRT:
4219 if (mode == DImode && GET_CODE (XEXP (x, 1)) == CONST_INT)
4221 *total = COSTS_N_INSNS (3) + rtx_cost (XEXP (x, 0), code);
4222 return true;
4224 else if (mode == SImode)
4226 *total = COSTS_N_INSNS (1) + rtx_cost (XEXP (x, 0), code);
4227 /* Slightly disparage register shifts, but not by much. */
4228 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
4229 *total += 1 + rtx_cost (XEXP (x, 1), code);
4230 return true;
4233 /* Needs a libcall. */
4234 *total = COSTS_N_INSNS (2);
4235 return false;
4237 case MINUS:
4238 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4240 *total = COSTS_N_INSNS (1);
4241 return false;
4244 if (mode == SImode)
4246 enum rtx_code subcode0 = GET_CODE (XEXP (x, 0));
4247 enum rtx_code subcode1 = GET_CODE (XEXP (x, 1));
4249 if (subcode0 == ROTATE || subcode0 == ROTATERT || subcode0 == ASHIFT
4250 || subcode0 == LSHIFTRT || subcode0 == ASHIFTRT
4251 || subcode1 == ROTATE || subcode1 == ROTATERT
4252 || subcode1 == ASHIFT || subcode1 == LSHIFTRT
4253 || subcode1 == ASHIFTRT)
4255 /* It's just the cost of the two operands. */
4256 *total = 0;
4257 return false;
4260 *total = COSTS_N_INSNS (1);
4261 return false;
4264 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4265 return false;
4267 case PLUS:
4268 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4270 *total = COSTS_N_INSNS (1);
4271 return false;
4274 /* Fall through */
4275 case AND: case XOR: case IOR:
4276 if (mode == SImode)
4278 enum rtx_code subcode = GET_CODE (XEXP (x, 0));
4280 if (subcode == ROTATE || subcode == ROTATERT || subcode == ASHIFT
4281 || subcode == LSHIFTRT || subcode == ASHIFTRT
4282 || (code == AND && subcode == NOT))
4284 /* It's just the cost of the two operands. */
4285 *total = 0;
4286 return false;
4290 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4291 return false;
4293 case MULT:
4294 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4295 return false;
4297 case NEG:
4298 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4299 *total = COSTS_N_INSNS (1);
4300 /* Fall through */
4301 case NOT:
4302 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4304 return false;
4306 case IF_THEN_ELSE:
4307 *total = 0;
4308 return false;
4310 case COMPARE:
4311 if (cc_register (XEXP (x, 0), VOIDmode))
4312 * total = 0;
4313 else
4314 *total = COSTS_N_INSNS (1);
4315 return false;
4317 case ABS:
4318 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4319 *total = COSTS_N_INSNS (1);
4320 else
4321 *total = COSTS_N_INSNS (1 + ARM_NUM_REGS (mode));
4322 return false;
4324 case SIGN_EXTEND:
4325 *total = 0;
4326 if (GET_MODE_SIZE (GET_MODE (XEXP (x, 0))) < 4)
4328 if (!(arm_arch4 && MEM_P (XEXP (x, 0))))
4329 *total += COSTS_N_INSNS (arm_arch6 ? 1 : 2);
4331 if (mode == DImode)
4332 *total += COSTS_N_INSNS (1);
4333 return false;
4335 case ZERO_EXTEND:
4336 *total = 0;
4337 if (!(arm_arch4 && MEM_P (XEXP (x, 0))))
4339 switch (GET_MODE (XEXP (x, 0)))
4341 case QImode:
4342 *total += COSTS_N_INSNS (1);
4343 break;
4345 case HImode:
4346 *total += COSTS_N_INSNS (arm_arch6 ? 1 : 2);
4348 case SImode:
4349 break;
4351 default:
4352 *total += COSTS_N_INSNS (2);
4356 if (mode == DImode)
4357 *total += COSTS_N_INSNS (1);
4359 return false;
4361 case CONST_INT:
4362 if (const_ok_for_arm (INTVAL (x)))
4363 *total = COSTS_N_INSNS (outer_code == SET ? 1 : 0);
4364 else if (const_ok_for_arm (~INTVAL (x)))
4365 *total = COSTS_N_INSNS (outer_code == AND ? 0 : 1);
4366 else if (const_ok_for_arm (-INTVAL (x)))
4368 if (outer_code == COMPARE || outer_code == PLUS
4369 || outer_code == MINUS)
4370 *total = 0;
4371 else
4372 *total = COSTS_N_INSNS (1);
4374 else
4375 *total = COSTS_N_INSNS (2);
4376 return true;
4378 case CONST:
4379 case LABEL_REF:
4380 case SYMBOL_REF:
4381 *total = COSTS_N_INSNS (2);
4382 return true;
4384 case CONST_DOUBLE:
4385 *total = COSTS_N_INSNS (4);
4386 return true;
4388 default:
4389 if (mode != VOIDmode)
4390 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4391 else
4392 *total = COSTS_N_INSNS (4); /* How knows? */
4393 return false;
4397 /* RTX costs for cores with a slow MUL implementation. */
4399 static bool
4400 arm_slowmul_rtx_costs (rtx x, int code, int outer_code, int *total)
4402 enum machine_mode mode = GET_MODE (x);
4404 if (TARGET_THUMB)
4406 *total = thumb_rtx_costs (x, code, outer_code);
4407 return true;
4410 switch (code)
4412 case MULT:
4413 if (GET_MODE_CLASS (mode) == MODE_FLOAT
4414 || mode == DImode)
4416 *total = 30;
4417 return true;
4420 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
4422 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
4423 & (unsigned HOST_WIDE_INT) 0xffffffff);
4424 int cost, const_ok = const_ok_for_arm (i);
4425 int j, booth_unit_size;
4427 /* Tune as appropriate. */
4428 cost = const_ok ? 4 : 8;
4429 booth_unit_size = 2;
4430 for (j = 0; i && j < 32; j += booth_unit_size)
4432 i >>= booth_unit_size;
4433 cost += 2;
4436 *total = cost;
4437 return true;
4440 *total = 30 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
4441 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
4442 return true;
4444 default:
4445 *total = arm_rtx_costs_1 (x, code, outer_code);
4446 return true;
4451 /* RTX cost for cores with a fast multiply unit (M variants). */
4453 static bool
4454 arm_fastmul_rtx_costs (rtx x, int code, int outer_code, int *total)
4456 enum machine_mode mode = GET_MODE (x);
4458 if (TARGET_THUMB)
4460 *total = thumb_rtx_costs (x, code, outer_code);
4461 return true;
4464 switch (code)
4466 case MULT:
4467 /* There is no point basing this on the tuning, since it is always the
4468 fast variant if it exists at all. */
4469 if (mode == DImode
4470 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
4471 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
4472 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
4474 *total = 8;
4475 return true;
4479 if (GET_MODE_CLASS (mode) == MODE_FLOAT
4480 || mode == DImode)
4482 *total = 30;
4483 return true;
4486 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
4488 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
4489 & (unsigned HOST_WIDE_INT) 0xffffffff);
4490 int cost, const_ok = const_ok_for_arm (i);
4491 int j, booth_unit_size;
4493 /* Tune as appropriate. */
4494 cost = const_ok ? 4 : 8;
4495 booth_unit_size = 8;
4496 for (j = 0; i && j < 32; j += booth_unit_size)
4498 i >>= booth_unit_size;
4499 cost += 2;
4502 *total = cost;
4503 return true;
4506 *total = 8 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
4507 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
4508 return true;
4510 default:
4511 *total = arm_rtx_costs_1 (x, code, outer_code);
4512 return true;
4517 /* RTX cost for XScale CPUs. */
4519 static bool
4520 arm_xscale_rtx_costs (rtx x, int code, int outer_code, int *total)
4522 enum machine_mode mode = GET_MODE (x);
4524 if (TARGET_THUMB)
4526 *total = thumb_rtx_costs (x, code, outer_code);
4527 return true;
4530 switch (code)
4532 case MULT:
4533 /* There is no point basing this on the tuning, since it is always the
4534 fast variant if it exists at all. */
4535 if (mode == DImode
4536 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
4537 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
4538 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
4540 *total = 8;
4541 return true;
4545 if (GET_MODE_CLASS (mode) == MODE_FLOAT
4546 || mode == DImode)
4548 *total = 30;
4549 return true;
4552 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
4554 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
4555 & (unsigned HOST_WIDE_INT) 0xffffffff);
4556 int cost, const_ok = const_ok_for_arm (i);
4557 unsigned HOST_WIDE_INT masked_const;
4559 /* The cost will be related to two insns.
4560 First a load of the constant (MOV or LDR), then a multiply. */
4561 cost = 2;
4562 if (! const_ok)
4563 cost += 1; /* LDR is probably more expensive because
4564 of longer result latency. */
4565 masked_const = i & 0xffff8000;
4566 if (masked_const != 0 && masked_const != 0xffff8000)
4568 masked_const = i & 0xf8000000;
4569 if (masked_const == 0 || masked_const == 0xf8000000)
4570 cost += 1;
4571 else
4572 cost += 2;
4574 *total = cost;
4575 return true;
4578 *total = 8 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
4579 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
4580 return true;
4582 case COMPARE:
4583 /* A COMPARE of a MULT is slow on XScale; the muls instruction
4584 will stall until the multiplication is complete. */
4585 if (GET_CODE (XEXP (x, 0)) == MULT)
4586 *total = 4 + rtx_cost (XEXP (x, 0), code);
4587 else
4588 *total = arm_rtx_costs_1 (x, code, outer_code);
4589 return true;
4591 default:
4592 *total = arm_rtx_costs_1 (x, code, outer_code);
4593 return true;
4598 /* RTX costs for 9e (and later) cores. */
4600 static bool
4601 arm_9e_rtx_costs (rtx x, int code, int outer_code, int *total)
4603 enum machine_mode mode = GET_MODE (x);
4604 int nonreg_cost;
4605 int cost;
4607 if (TARGET_THUMB)
4609 switch (code)
4611 case MULT:
4612 *total = COSTS_N_INSNS (3);
4613 return true;
4615 default:
4616 *total = thumb_rtx_costs (x, code, outer_code);
4617 return true;
4621 switch (code)
4623 case MULT:
4624 /* There is no point basing this on the tuning, since it is always the
4625 fast variant if it exists at all. */
4626 if (mode == DImode
4627 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
4628 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
4629 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
4631 *total = 3;
4632 return true;
4636 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
4638 *total = 30;
4639 return true;
4641 if (mode == DImode)
4643 cost = 7;
4644 nonreg_cost = 8;
4646 else
4648 cost = 2;
4649 nonreg_cost = 4;
4653 *total = cost + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : nonreg_cost)
4654 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : nonreg_cost);
4655 return true;
4657 default:
4658 *total = arm_rtx_costs_1 (x, code, outer_code);
4659 return true;
4662 /* All address computations that can be done are free, but rtx cost returns
4663 the same for practically all of them. So we weight the different types
4664 of address here in the order (most pref first):
4665 PRE/POST_INC/DEC, SHIFT or NON-INT sum, INT sum, REG, MEM or LABEL. */
4666 static inline int
4667 arm_arm_address_cost (rtx x)
4669 enum rtx_code c = GET_CODE (x);
4671 if (c == PRE_INC || c == PRE_DEC || c == POST_INC || c == POST_DEC)
4672 return 0;
4673 if (c == MEM || c == LABEL_REF || c == SYMBOL_REF)
4674 return 10;
4676 if (c == PLUS || c == MINUS)
4678 if (GET_CODE (XEXP (x, 0)) == CONST_INT)
4679 return 2;
4681 if (ARITHMETIC_P (XEXP (x, 0)) || ARITHMETIC_P (XEXP (x, 1)))
4682 return 3;
4684 return 4;
4687 return 6;
4690 static inline int
4691 arm_thumb_address_cost (rtx x)
4693 enum rtx_code c = GET_CODE (x);
4695 if (c == REG)
4696 return 1;
4697 if (c == PLUS
4698 && GET_CODE (XEXP (x, 0)) == REG
4699 && GET_CODE (XEXP (x, 1)) == CONST_INT)
4700 return 1;
4702 return 2;
4705 static int
4706 arm_address_cost (rtx x)
4708 return TARGET_ARM ? arm_arm_address_cost (x) : arm_thumb_address_cost (x);
4711 static int
4712 arm_adjust_cost (rtx insn, rtx link, rtx dep, int cost)
4714 rtx i_pat, d_pat;
4716 /* Some true dependencies can have a higher cost depending
4717 on precisely how certain input operands are used. */
4718 if (arm_tune_xscale
4719 && REG_NOTE_KIND (link) == 0
4720 && recog_memoized (insn) >= 0
4721 && recog_memoized (dep) >= 0)
4723 int shift_opnum = get_attr_shift (insn);
4724 enum attr_type attr_type = get_attr_type (dep);
4726 /* If nonzero, SHIFT_OPNUM contains the operand number of a shifted
4727 operand for INSN. If we have a shifted input operand and the
4728 instruction we depend on is another ALU instruction, then we may
4729 have to account for an additional stall. */
4730 if (shift_opnum != 0
4731 && (attr_type == TYPE_ALU_SHIFT || attr_type == TYPE_ALU_SHIFT_REG))
4733 rtx shifted_operand;
4734 int opno;
4736 /* Get the shifted operand. */
4737 extract_insn (insn);
4738 shifted_operand = recog_data.operand[shift_opnum];
4740 /* Iterate over all the operands in DEP. If we write an operand
4741 that overlaps with SHIFTED_OPERAND, then we have increase the
4742 cost of this dependency. */
4743 extract_insn (dep);
4744 preprocess_constraints ();
4745 for (opno = 0; opno < recog_data.n_operands; opno++)
4747 /* We can ignore strict inputs. */
4748 if (recog_data.operand_type[opno] == OP_IN)
4749 continue;
4751 if (reg_overlap_mentioned_p (recog_data.operand[opno],
4752 shifted_operand))
4753 return 2;
4758 /* XXX This is not strictly true for the FPA. */
4759 if (REG_NOTE_KIND (link) == REG_DEP_ANTI
4760 || REG_NOTE_KIND (link) == REG_DEP_OUTPUT)
4761 return 0;
4763 /* Call insns don't incur a stall, even if they follow a load. */
4764 if (REG_NOTE_KIND (link) == 0
4765 && GET_CODE (insn) == CALL_INSN)
4766 return 1;
4768 if ((i_pat = single_set (insn)) != NULL
4769 && GET_CODE (SET_SRC (i_pat)) == MEM
4770 && (d_pat = single_set (dep)) != NULL
4771 && GET_CODE (SET_DEST (d_pat)) == MEM)
4773 rtx src_mem = XEXP (SET_SRC (i_pat), 0);
4774 /* This is a load after a store, there is no conflict if the load reads
4775 from a cached area. Assume that loads from the stack, and from the
4776 constant pool are cached, and that others will miss. This is a
4777 hack. */
4779 if ((GET_CODE (src_mem) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (src_mem))
4780 || reg_mentioned_p (stack_pointer_rtx, src_mem)
4781 || reg_mentioned_p (frame_pointer_rtx, src_mem)
4782 || reg_mentioned_p (hard_frame_pointer_rtx, src_mem))
4783 return 1;
4786 return cost;
4789 static int fp_consts_inited = 0;
4791 /* Only zero is valid for VFP. Other values are also valid for FPA. */
4792 static const char * const strings_fp[8] =
4794 "0", "1", "2", "3",
4795 "4", "5", "0.5", "10"
4798 static REAL_VALUE_TYPE values_fp[8];
4800 static void
4801 init_fp_table (void)
4803 int i;
4804 REAL_VALUE_TYPE r;
4806 if (TARGET_VFP)
4807 fp_consts_inited = 1;
4808 else
4809 fp_consts_inited = 8;
4811 for (i = 0; i < fp_consts_inited; i++)
4813 r = REAL_VALUE_ATOF (strings_fp[i], DFmode);
4814 values_fp[i] = r;
4818 /* Return TRUE if rtx X is a valid immediate FP constant. */
4820 arm_const_double_rtx (rtx x)
4822 REAL_VALUE_TYPE r;
4823 int i;
4825 if (!fp_consts_inited)
4826 init_fp_table ();
4828 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
4829 if (REAL_VALUE_MINUS_ZERO (r))
4830 return 0;
4832 for (i = 0; i < fp_consts_inited; i++)
4833 if (REAL_VALUES_EQUAL (r, values_fp[i]))
4834 return 1;
4836 return 0;
4839 /* Return TRUE if rtx X is a valid immediate FPA constant. */
4841 neg_const_double_rtx_ok_for_fpa (rtx x)
4843 REAL_VALUE_TYPE r;
4844 int i;
4846 if (!fp_consts_inited)
4847 init_fp_table ();
4849 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
4850 r = REAL_VALUE_NEGATE (r);
4851 if (REAL_VALUE_MINUS_ZERO (r))
4852 return 0;
4854 for (i = 0; i < 8; i++)
4855 if (REAL_VALUES_EQUAL (r, values_fp[i]))
4856 return 1;
4858 return 0;
4861 /* Predicates for `match_operand' and `match_operator'. */
4863 /* Return nonzero if OP is a valid Cirrus memory address pattern. */
4865 cirrus_memory_offset (rtx op)
4867 /* Reject eliminable registers. */
4868 if (! (reload_in_progress || reload_completed)
4869 && ( reg_mentioned_p (frame_pointer_rtx, op)
4870 || reg_mentioned_p (arg_pointer_rtx, op)
4871 || reg_mentioned_p (virtual_incoming_args_rtx, op)
4872 || reg_mentioned_p (virtual_outgoing_args_rtx, op)
4873 || reg_mentioned_p (virtual_stack_dynamic_rtx, op)
4874 || reg_mentioned_p (virtual_stack_vars_rtx, op)))
4875 return 0;
4877 if (GET_CODE (op) == MEM)
4879 rtx ind;
4881 ind = XEXP (op, 0);
4883 /* Match: (mem (reg)). */
4884 if (GET_CODE (ind) == REG)
4885 return 1;
4887 /* Match:
4888 (mem (plus (reg)
4889 (const))). */
4890 if (GET_CODE (ind) == PLUS
4891 && GET_CODE (XEXP (ind, 0)) == REG
4892 && REG_MODE_OK_FOR_BASE_P (XEXP (ind, 0), VOIDmode)
4893 && GET_CODE (XEXP (ind, 1)) == CONST_INT)
4894 return 1;
4897 return 0;
4900 /* Return TRUE if OP is a valid VFP memory address pattern.
4901 WB if true if writeback address modes are allowed. */
4904 arm_coproc_mem_operand (rtx op, bool wb)
4906 rtx ind;
4908 /* Reject eliminable registers. */
4909 if (! (reload_in_progress || reload_completed)
4910 && ( reg_mentioned_p (frame_pointer_rtx, op)
4911 || reg_mentioned_p (arg_pointer_rtx, op)
4912 || reg_mentioned_p (virtual_incoming_args_rtx, op)
4913 || reg_mentioned_p (virtual_outgoing_args_rtx, op)
4914 || reg_mentioned_p (virtual_stack_dynamic_rtx, op)
4915 || reg_mentioned_p (virtual_stack_vars_rtx, op)))
4916 return FALSE;
4918 /* Constants are converted into offsets from labels. */
4919 if (GET_CODE (op) != MEM)
4920 return FALSE;
4922 ind = XEXP (op, 0);
4924 if (reload_completed
4925 && (GET_CODE (ind) == LABEL_REF
4926 || (GET_CODE (ind) == CONST
4927 && GET_CODE (XEXP (ind, 0)) == PLUS
4928 && GET_CODE (XEXP (XEXP (ind, 0), 0)) == LABEL_REF
4929 && GET_CODE (XEXP (XEXP (ind, 0), 1)) == CONST_INT)))
4930 return TRUE;
4932 /* Match: (mem (reg)). */
4933 if (GET_CODE (ind) == REG)
4934 return arm_address_register_rtx_p (ind, 0);
4936 /* Autoincremment addressing modes. */
4937 if (wb
4938 && (GET_CODE (ind) == PRE_INC
4939 || GET_CODE (ind) == POST_INC
4940 || GET_CODE (ind) == PRE_DEC
4941 || GET_CODE (ind) == POST_DEC))
4942 return arm_address_register_rtx_p (XEXP (ind, 0), 0);
4944 if (wb
4945 && (GET_CODE (ind) == POST_MODIFY || GET_CODE (ind) == PRE_MODIFY)
4946 && arm_address_register_rtx_p (XEXP (ind, 0), 0)
4947 && GET_CODE (XEXP (ind, 1)) == PLUS
4948 && rtx_equal_p (XEXP (XEXP (ind, 1), 0), XEXP (ind, 0)))
4949 ind = XEXP (ind, 1);
4951 /* Match:
4952 (plus (reg)
4953 (const)). */
4954 if (GET_CODE (ind) == PLUS
4955 && GET_CODE (XEXP (ind, 0)) == REG
4956 && REG_MODE_OK_FOR_BASE_P (XEXP (ind, 0), VOIDmode)
4957 && GET_CODE (XEXP (ind, 1)) == CONST_INT
4958 && INTVAL (XEXP (ind, 1)) > -1024
4959 && INTVAL (XEXP (ind, 1)) < 1024
4960 && (INTVAL (XEXP (ind, 1)) & 3) == 0)
4961 return TRUE;
4963 return FALSE;
4966 /* Return true if X is a register that will be eliminated later on. */
4968 arm_eliminable_register (rtx x)
4970 return REG_P (x) && (REGNO (x) == FRAME_POINTER_REGNUM
4971 || REGNO (x) == ARG_POINTER_REGNUM
4972 || (REGNO (x) >= FIRST_VIRTUAL_REGISTER
4973 && REGNO (x) <= LAST_VIRTUAL_REGISTER));
4976 /* Return GENERAL_REGS if a scratch register required to reload x to/from
4977 VFP registers. Otherwise return NO_REGS. */
4979 enum reg_class
4980 vfp_secondary_reload_class (enum machine_mode mode, rtx x)
4982 if (arm_coproc_mem_operand (x, FALSE) || s_register_operand (x, mode))
4983 return NO_REGS;
4985 return GENERAL_REGS;
4988 /* Values which must be returned in the most-significant end of the return
4989 register. */
4991 static bool
4992 arm_return_in_msb (tree valtype)
4994 return (TARGET_AAPCS_BASED
4995 && BYTES_BIG_ENDIAN
4996 && (AGGREGATE_TYPE_P (valtype)
4997 || TREE_CODE (valtype) == COMPLEX_TYPE));
5000 /* Returns TRUE if INSN is an "LDR REG, ADDR" instruction.
5001 Use by the Cirrus Maverick code which has to workaround
5002 a hardware bug triggered by such instructions. */
5003 static bool
5004 arm_memory_load_p (rtx insn)
5006 rtx body, lhs, rhs;;
5008 if (insn == NULL_RTX || GET_CODE (insn) != INSN)
5009 return false;
5011 body = PATTERN (insn);
5013 if (GET_CODE (body) != SET)
5014 return false;
5016 lhs = XEXP (body, 0);
5017 rhs = XEXP (body, 1);
5019 lhs = REG_OR_SUBREG_RTX (lhs);
5021 /* If the destination is not a general purpose
5022 register we do not have to worry. */
5023 if (GET_CODE (lhs) != REG
5024 || REGNO_REG_CLASS (REGNO (lhs)) != GENERAL_REGS)
5025 return false;
5027 /* As well as loads from memory we also have to react
5028 to loads of invalid constants which will be turned
5029 into loads from the minipool. */
5030 return (GET_CODE (rhs) == MEM
5031 || GET_CODE (rhs) == SYMBOL_REF
5032 || note_invalid_constants (insn, -1, false));
5035 /* Return TRUE if INSN is a Cirrus instruction. */
5036 static bool
5037 arm_cirrus_insn_p (rtx insn)
5039 enum attr_cirrus attr;
5041 /* get_attr cannot accept USE or CLOBBER. */
5042 if (!insn
5043 || GET_CODE (insn) != INSN
5044 || GET_CODE (PATTERN (insn)) == USE
5045 || GET_CODE (PATTERN (insn)) == CLOBBER)
5046 return 0;
5048 attr = get_attr_cirrus (insn);
5050 return attr != CIRRUS_NOT;
5053 /* Cirrus reorg for invalid instruction combinations. */
5054 static void
5055 cirrus_reorg (rtx first)
5057 enum attr_cirrus attr;
5058 rtx body = PATTERN (first);
5059 rtx t;
5060 int nops;
5062 /* Any branch must be followed by 2 non Cirrus instructions. */
5063 if (GET_CODE (first) == JUMP_INSN && GET_CODE (body) != RETURN)
5065 nops = 0;
5066 t = next_nonnote_insn (first);
5068 if (arm_cirrus_insn_p (t))
5069 ++ nops;
5071 if (arm_cirrus_insn_p (next_nonnote_insn (t)))
5072 ++ nops;
5074 while (nops --)
5075 emit_insn_after (gen_nop (), first);
5077 return;
5080 /* (float (blah)) is in parallel with a clobber. */
5081 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) > 0)
5082 body = XVECEXP (body, 0, 0);
5084 if (GET_CODE (body) == SET)
5086 rtx lhs = XEXP (body, 0), rhs = XEXP (body, 1);
5088 /* cfldrd, cfldr64, cfstrd, cfstr64 must
5089 be followed by a non Cirrus insn. */
5090 if (get_attr_cirrus (first) == CIRRUS_DOUBLE)
5092 if (arm_cirrus_insn_p (next_nonnote_insn (first)))
5093 emit_insn_after (gen_nop (), first);
5095 return;
5097 else if (arm_memory_load_p (first))
5099 unsigned int arm_regno;
5101 /* Any ldr/cfmvdlr, ldr/cfmvdhr, ldr/cfmvsr, ldr/cfmv64lr,
5102 ldr/cfmv64hr combination where the Rd field is the same
5103 in both instructions must be split with a non Cirrus
5104 insn. Example:
5106 ldr r0, blah
5108 cfmvsr mvf0, r0. */
5110 /* Get Arm register number for ldr insn. */
5111 if (GET_CODE (lhs) == REG)
5112 arm_regno = REGNO (lhs);
5113 else
5115 gcc_assert (GET_CODE (rhs) == REG);
5116 arm_regno = REGNO (rhs);
5119 /* Next insn. */
5120 first = next_nonnote_insn (first);
5122 if (! arm_cirrus_insn_p (first))
5123 return;
5125 body = PATTERN (first);
5127 /* (float (blah)) is in parallel with a clobber. */
5128 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0))
5129 body = XVECEXP (body, 0, 0);
5131 if (GET_CODE (body) == FLOAT)
5132 body = XEXP (body, 0);
5134 if (get_attr_cirrus (first) == CIRRUS_MOVE
5135 && GET_CODE (XEXP (body, 1)) == REG
5136 && arm_regno == REGNO (XEXP (body, 1)))
5137 emit_insn_after (gen_nop (), first);
5139 return;
5143 /* get_attr cannot accept USE or CLOBBER. */
5144 if (!first
5145 || GET_CODE (first) != INSN
5146 || GET_CODE (PATTERN (first)) == USE
5147 || GET_CODE (PATTERN (first)) == CLOBBER)
5148 return;
5150 attr = get_attr_cirrus (first);
5152 /* Any coprocessor compare instruction (cfcmps, cfcmpd, ...)
5153 must be followed by a non-coprocessor instruction. */
5154 if (attr == CIRRUS_COMPARE)
5156 nops = 0;
5158 t = next_nonnote_insn (first);
5160 if (arm_cirrus_insn_p (t))
5161 ++ nops;
5163 if (arm_cirrus_insn_p (next_nonnote_insn (t)))
5164 ++ nops;
5166 while (nops --)
5167 emit_insn_after (gen_nop (), first);
5169 return;
5173 /* Return TRUE if X references a SYMBOL_REF. */
5175 symbol_mentioned_p (rtx x)
5177 const char * fmt;
5178 int i;
5180 if (GET_CODE (x) == SYMBOL_REF)
5181 return 1;
5183 fmt = GET_RTX_FORMAT (GET_CODE (x));
5185 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5187 if (fmt[i] == 'E')
5189 int j;
5191 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5192 if (symbol_mentioned_p (XVECEXP (x, i, j)))
5193 return 1;
5195 else if (fmt[i] == 'e' && symbol_mentioned_p (XEXP (x, i)))
5196 return 1;
5199 return 0;
5202 /* Return TRUE if X references a LABEL_REF. */
5204 label_mentioned_p (rtx x)
5206 const char * fmt;
5207 int i;
5209 if (GET_CODE (x) == LABEL_REF)
5210 return 1;
5212 fmt = GET_RTX_FORMAT (GET_CODE (x));
5213 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5215 if (fmt[i] == 'E')
5217 int j;
5219 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5220 if (label_mentioned_p (XVECEXP (x, i, j)))
5221 return 1;
5223 else if (fmt[i] == 'e' && label_mentioned_p (XEXP (x, i)))
5224 return 1;
5227 return 0;
5230 enum rtx_code
5231 minmax_code (rtx x)
5233 enum rtx_code code = GET_CODE (x);
5235 switch (code)
5237 case SMAX:
5238 return GE;
5239 case SMIN:
5240 return LE;
5241 case UMIN:
5242 return LEU;
5243 case UMAX:
5244 return GEU;
5245 default:
5246 gcc_unreachable ();
5250 /* Return 1 if memory locations are adjacent. */
5252 adjacent_mem_locations (rtx a, rtx b)
5254 /* We don't guarantee to preserve the order of these memory refs. */
5255 if (volatile_refs_p (a) || volatile_refs_p (b))
5256 return 0;
5258 if ((GET_CODE (XEXP (a, 0)) == REG
5259 || (GET_CODE (XEXP (a, 0)) == PLUS
5260 && GET_CODE (XEXP (XEXP (a, 0), 1)) == CONST_INT))
5261 && (GET_CODE (XEXP (b, 0)) == REG
5262 || (GET_CODE (XEXP (b, 0)) == PLUS
5263 && GET_CODE (XEXP (XEXP (b, 0), 1)) == CONST_INT)))
5265 HOST_WIDE_INT val0 = 0, val1 = 0;
5266 rtx reg0, reg1;
5267 int val_diff;
5269 if (GET_CODE (XEXP (a, 0)) == PLUS)
5271 reg0 = XEXP (XEXP (a, 0), 0);
5272 val0 = INTVAL (XEXP (XEXP (a, 0), 1));
5274 else
5275 reg0 = XEXP (a, 0);
5277 if (GET_CODE (XEXP (b, 0)) == PLUS)
5279 reg1 = XEXP (XEXP (b, 0), 0);
5280 val1 = INTVAL (XEXP (XEXP (b, 0), 1));
5282 else
5283 reg1 = XEXP (b, 0);
5285 /* Don't accept any offset that will require multiple
5286 instructions to handle, since this would cause the
5287 arith_adjacentmem pattern to output an overlong sequence. */
5288 if (!const_ok_for_op (PLUS, val0) || !const_ok_for_op (PLUS, val1))
5289 return 0;
5291 /* Don't allow an eliminable register: register elimination can make
5292 the offset too large. */
5293 if (arm_eliminable_register (reg0))
5294 return 0;
5296 val_diff = val1 - val0;
5298 if (arm_ld_sched)
5300 /* If the target has load delay slots, then there's no benefit
5301 to using an ldm instruction unless the offset is zero and
5302 we are optimizing for size. */
5303 return (optimize_size && (REGNO (reg0) == REGNO (reg1))
5304 && (val0 == 0 || val1 == 0 || val0 == 4 || val1 == 4)
5305 && (val_diff == 4 || val_diff == -4));
5308 return ((REGNO (reg0) == REGNO (reg1))
5309 && (val_diff == 4 || val_diff == -4));
5312 return 0;
5316 load_multiple_sequence (rtx *operands, int nops, int *regs, int *base,
5317 HOST_WIDE_INT *load_offset)
5319 int unsorted_regs[4];
5320 HOST_WIDE_INT unsorted_offsets[4];
5321 int order[4];
5322 int base_reg = -1;
5323 int i;
5325 /* Can only handle 2, 3, or 4 insns at present,
5326 though could be easily extended if required. */
5327 gcc_assert (nops >= 2 && nops <= 4);
5329 /* Loop over the operands and check that the memory references are
5330 suitable (i.e. immediate offsets from the same base register). At
5331 the same time, extract the target register, and the memory
5332 offsets. */
5333 for (i = 0; i < nops; i++)
5335 rtx reg;
5336 rtx offset;
5338 /* Convert a subreg of a mem into the mem itself. */
5339 if (GET_CODE (operands[nops + i]) == SUBREG)
5340 operands[nops + i] = alter_subreg (operands + (nops + i));
5342 gcc_assert (GET_CODE (operands[nops + i]) == MEM);
5344 /* Don't reorder volatile memory references; it doesn't seem worth
5345 looking for the case where the order is ok anyway. */
5346 if (MEM_VOLATILE_P (operands[nops + i]))
5347 return 0;
5349 offset = const0_rtx;
5351 if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG
5352 || (GET_CODE (reg) == SUBREG
5353 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5354 || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
5355 && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0))
5356 == REG)
5357 || (GET_CODE (reg) == SUBREG
5358 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5359 && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1))
5360 == CONST_INT)))
5362 if (i == 0)
5364 base_reg = REGNO (reg);
5365 unsorted_regs[0] = (GET_CODE (operands[i]) == REG
5366 ? REGNO (operands[i])
5367 : REGNO (SUBREG_REG (operands[i])));
5368 order[0] = 0;
5370 else
5372 if (base_reg != (int) REGNO (reg))
5373 /* Not addressed from the same base register. */
5374 return 0;
5376 unsorted_regs[i] = (GET_CODE (operands[i]) == REG
5377 ? REGNO (operands[i])
5378 : REGNO (SUBREG_REG (operands[i])));
5379 if (unsorted_regs[i] < unsorted_regs[order[0]])
5380 order[0] = i;
5383 /* If it isn't an integer register, or if it overwrites the
5384 base register but isn't the last insn in the list, then
5385 we can't do this. */
5386 if (unsorted_regs[i] < 0 || unsorted_regs[i] > 14
5387 || (i != nops - 1 && unsorted_regs[i] == base_reg))
5388 return 0;
5390 unsorted_offsets[i] = INTVAL (offset);
5392 else
5393 /* Not a suitable memory address. */
5394 return 0;
5397 /* All the useful information has now been extracted from the
5398 operands into unsorted_regs and unsorted_offsets; additionally,
5399 order[0] has been set to the lowest numbered register in the
5400 list. Sort the registers into order, and check that the memory
5401 offsets are ascending and adjacent. */
5403 for (i = 1; i < nops; i++)
5405 int j;
5407 order[i] = order[i - 1];
5408 for (j = 0; j < nops; j++)
5409 if (unsorted_regs[j] > unsorted_regs[order[i - 1]]
5410 && (order[i] == order[i - 1]
5411 || unsorted_regs[j] < unsorted_regs[order[i]]))
5412 order[i] = j;
5414 /* Have we found a suitable register? if not, one must be used more
5415 than once. */
5416 if (order[i] == order[i - 1])
5417 return 0;
5419 /* Is the memory address adjacent and ascending? */
5420 if (unsorted_offsets[order[i]] != unsorted_offsets[order[i - 1]] + 4)
5421 return 0;
5424 if (base)
5426 *base = base_reg;
5428 for (i = 0; i < nops; i++)
5429 regs[i] = unsorted_regs[order[i]];
5431 *load_offset = unsorted_offsets[order[0]];
5434 if (unsorted_offsets[order[0]] == 0)
5435 return 1; /* ldmia */
5437 if (unsorted_offsets[order[0]] == 4)
5438 return 2; /* ldmib */
5440 if (unsorted_offsets[order[nops - 1]] == 0)
5441 return 3; /* ldmda */
5443 if (unsorted_offsets[order[nops - 1]] == -4)
5444 return 4; /* ldmdb */
5446 /* For ARM8,9 & StrongARM, 2 ldr instructions are faster than an ldm
5447 if the offset isn't small enough. The reason 2 ldrs are faster
5448 is because these ARMs are able to do more than one cache access
5449 in a single cycle. The ARM9 and StrongARM have Harvard caches,
5450 whilst the ARM8 has a double bandwidth cache. This means that
5451 these cores can do both an instruction fetch and a data fetch in
5452 a single cycle, so the trick of calculating the address into a
5453 scratch register (one of the result regs) and then doing a load
5454 multiple actually becomes slower (and no smaller in code size).
5455 That is the transformation
5457 ldr rd1, [rbase + offset]
5458 ldr rd2, [rbase + offset + 4]
5462 add rd1, rbase, offset
5463 ldmia rd1, {rd1, rd2}
5465 produces worse code -- '3 cycles + any stalls on rd2' instead of
5466 '2 cycles + any stalls on rd2'. On ARMs with only one cache
5467 access per cycle, the first sequence could never complete in less
5468 than 6 cycles, whereas the ldm sequence would only take 5 and
5469 would make better use of sequential accesses if not hitting the
5470 cache.
5472 We cheat here and test 'arm_ld_sched' which we currently know to
5473 only be true for the ARM8, ARM9 and StrongARM. If this ever
5474 changes, then the test below needs to be reworked. */
5475 if (nops == 2 && arm_ld_sched)
5476 return 0;
5478 /* Can't do it without setting up the offset, only do this if it takes
5479 no more than one insn. */
5480 return (const_ok_for_arm (unsorted_offsets[order[0]])
5481 || const_ok_for_arm (-unsorted_offsets[order[0]])) ? 5 : 0;
5484 const char *
5485 emit_ldm_seq (rtx *operands, int nops)
5487 int regs[4];
5488 int base_reg;
5489 HOST_WIDE_INT offset;
5490 char buf[100];
5491 int i;
5493 switch (load_multiple_sequence (operands, nops, regs, &base_reg, &offset))
5495 case 1:
5496 strcpy (buf, "ldm%?ia\t");
5497 break;
5499 case 2:
5500 strcpy (buf, "ldm%?ib\t");
5501 break;
5503 case 3:
5504 strcpy (buf, "ldm%?da\t");
5505 break;
5507 case 4:
5508 strcpy (buf, "ldm%?db\t");
5509 break;
5511 case 5:
5512 if (offset >= 0)
5513 sprintf (buf, "add%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX,
5514 reg_names[regs[0]], REGISTER_PREFIX, reg_names[base_reg],
5515 (long) offset);
5516 else
5517 sprintf (buf, "sub%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX,
5518 reg_names[regs[0]], REGISTER_PREFIX, reg_names[base_reg],
5519 (long) -offset);
5520 output_asm_insn (buf, operands);
5521 base_reg = regs[0];
5522 strcpy (buf, "ldm%?ia\t");
5523 break;
5525 default:
5526 gcc_unreachable ();
5529 sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
5530 reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
5532 for (i = 1; i < nops; i++)
5533 sprintf (buf + strlen (buf), ", %s%s", REGISTER_PREFIX,
5534 reg_names[regs[i]]);
5536 strcat (buf, "}\t%@ phole ldm");
5538 output_asm_insn (buf, operands);
5539 return "";
5543 store_multiple_sequence (rtx *operands, int nops, int *regs, int *base,
5544 HOST_WIDE_INT * load_offset)
5546 int unsorted_regs[4];
5547 HOST_WIDE_INT unsorted_offsets[4];
5548 int order[4];
5549 int base_reg = -1;
5550 int i;
5552 /* Can only handle 2, 3, or 4 insns at present, though could be easily
5553 extended if required. */
5554 gcc_assert (nops >= 2 && nops <= 4);
5556 /* Loop over the operands and check that the memory references are
5557 suitable (i.e. immediate offsets from the same base register). At
5558 the same time, extract the target register, and the memory
5559 offsets. */
5560 for (i = 0; i < nops; i++)
5562 rtx reg;
5563 rtx offset;
5565 /* Convert a subreg of a mem into the mem itself. */
5566 if (GET_CODE (operands[nops + i]) == SUBREG)
5567 operands[nops + i] = alter_subreg (operands + (nops + i));
5569 gcc_assert (GET_CODE (operands[nops + i]) == MEM);
5571 /* Don't reorder volatile memory references; it doesn't seem worth
5572 looking for the case where the order is ok anyway. */
5573 if (MEM_VOLATILE_P (operands[nops + i]))
5574 return 0;
5576 offset = const0_rtx;
5578 if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG
5579 || (GET_CODE (reg) == SUBREG
5580 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5581 || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
5582 && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0))
5583 == REG)
5584 || (GET_CODE (reg) == SUBREG
5585 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5586 && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1))
5587 == CONST_INT)))
5589 if (i == 0)
5591 base_reg = REGNO (reg);
5592 unsorted_regs[0] = (GET_CODE (operands[i]) == REG
5593 ? REGNO (operands[i])
5594 : REGNO (SUBREG_REG (operands[i])));
5595 order[0] = 0;
5597 else
5599 if (base_reg != (int) REGNO (reg))
5600 /* Not addressed from the same base register. */
5601 return 0;
5603 unsorted_regs[i] = (GET_CODE (operands[i]) == REG
5604 ? REGNO (operands[i])
5605 : REGNO (SUBREG_REG (operands[i])));
5606 if (unsorted_regs[i] < unsorted_regs[order[0]])
5607 order[0] = i;
5610 /* If it isn't an integer register, then we can't do this. */
5611 if (unsorted_regs[i] < 0 || unsorted_regs[i] > 14)
5612 return 0;
5614 unsorted_offsets[i] = INTVAL (offset);
5616 else
5617 /* Not a suitable memory address. */
5618 return 0;
5621 /* All the useful information has now been extracted from the
5622 operands into unsorted_regs and unsorted_offsets; additionally,
5623 order[0] has been set to the lowest numbered register in the
5624 list. Sort the registers into order, and check that the memory
5625 offsets are ascending and adjacent. */
5627 for (i = 1; i < nops; i++)
5629 int j;
5631 order[i] = order[i - 1];
5632 for (j = 0; j < nops; j++)
5633 if (unsorted_regs[j] > unsorted_regs[order[i - 1]]
5634 && (order[i] == order[i - 1]
5635 || unsorted_regs[j] < unsorted_regs[order[i]]))
5636 order[i] = j;
5638 /* Have we found a suitable register? if not, one must be used more
5639 than once. */
5640 if (order[i] == order[i - 1])
5641 return 0;
5643 /* Is the memory address adjacent and ascending? */
5644 if (unsorted_offsets[order[i]] != unsorted_offsets[order[i - 1]] + 4)
5645 return 0;
5648 if (base)
5650 *base = base_reg;
5652 for (i = 0; i < nops; i++)
5653 regs[i] = unsorted_regs[order[i]];
5655 *load_offset = unsorted_offsets[order[0]];
5658 if (unsorted_offsets[order[0]] == 0)
5659 return 1; /* stmia */
5661 if (unsorted_offsets[order[0]] == 4)
5662 return 2; /* stmib */
5664 if (unsorted_offsets[order[nops - 1]] == 0)
5665 return 3; /* stmda */
5667 if (unsorted_offsets[order[nops - 1]] == -4)
5668 return 4; /* stmdb */
5670 return 0;
5673 const char *
5674 emit_stm_seq (rtx *operands, int nops)
5676 int regs[4];
5677 int base_reg;
5678 HOST_WIDE_INT offset;
5679 char buf[100];
5680 int i;
5682 switch (store_multiple_sequence (operands, nops, regs, &base_reg, &offset))
5684 case 1:
5685 strcpy (buf, "stm%?ia\t");
5686 break;
5688 case 2:
5689 strcpy (buf, "stm%?ib\t");
5690 break;
5692 case 3:
5693 strcpy (buf, "stm%?da\t");
5694 break;
5696 case 4:
5697 strcpy (buf, "stm%?db\t");
5698 break;
5700 default:
5701 gcc_unreachable ();
5704 sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
5705 reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
5707 for (i = 1; i < nops; i++)
5708 sprintf (buf + strlen (buf), ", %s%s", REGISTER_PREFIX,
5709 reg_names[regs[i]]);
5711 strcat (buf, "}\t%@ phole stm");
5713 output_asm_insn (buf, operands);
5714 return "";
5718 /* Routines for use in generating RTL. */
5721 arm_gen_load_multiple (int base_regno, int count, rtx from, int up,
5722 int write_back, rtx basemem, HOST_WIDE_INT *offsetp)
5724 HOST_WIDE_INT offset = *offsetp;
5725 int i = 0, j;
5726 rtx result;
5727 int sign = up ? 1 : -1;
5728 rtx mem, addr;
5730 /* XScale has load-store double instructions, but they have stricter
5731 alignment requirements than load-store multiple, so we cannot
5732 use them.
5734 For XScale ldm requires 2 + NREGS cycles to complete and blocks
5735 the pipeline until completion.
5737 NREGS CYCLES
5743 An ldr instruction takes 1-3 cycles, but does not block the
5744 pipeline.
5746 NREGS CYCLES
5747 1 1-3
5748 2 2-6
5749 3 3-9
5750 4 4-12
5752 Best case ldr will always win. However, the more ldr instructions
5753 we issue, the less likely we are to be able to schedule them well.
5754 Using ldr instructions also increases code size.
5756 As a compromise, we use ldr for counts of 1 or 2 regs, and ldm
5757 for counts of 3 or 4 regs. */
5758 if (arm_tune_xscale && count <= 2 && ! optimize_size)
5760 rtx seq;
5762 start_sequence ();
5764 for (i = 0; i < count; i++)
5766 addr = plus_constant (from, i * 4 * sign);
5767 mem = adjust_automodify_address (basemem, SImode, addr, offset);
5768 emit_move_insn (gen_rtx_REG (SImode, base_regno + i), mem);
5769 offset += 4 * sign;
5772 if (write_back)
5774 emit_move_insn (from, plus_constant (from, count * 4 * sign));
5775 *offsetp = offset;
5778 seq = get_insns ();
5779 end_sequence ();
5781 return seq;
5784 result = gen_rtx_PARALLEL (VOIDmode,
5785 rtvec_alloc (count + (write_back ? 1 : 0)));
5786 if (write_back)
5788 XVECEXP (result, 0, 0)
5789 = gen_rtx_SET (GET_MODE (from), from,
5790 plus_constant (from, count * 4 * sign));
5791 i = 1;
5792 count++;
5795 for (j = 0; i < count; i++, j++)
5797 addr = plus_constant (from, j * 4 * sign);
5798 mem = adjust_automodify_address_nv (basemem, SImode, addr, offset);
5799 XVECEXP (result, 0, i)
5800 = gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, base_regno + j), mem);
5801 offset += 4 * sign;
5804 if (write_back)
5805 *offsetp = offset;
5807 return result;
5811 arm_gen_store_multiple (int base_regno, int count, rtx to, int up,
5812 int write_back, rtx basemem, HOST_WIDE_INT *offsetp)
5814 HOST_WIDE_INT offset = *offsetp;
5815 int i = 0, j;
5816 rtx result;
5817 int sign = up ? 1 : -1;
5818 rtx mem, addr;
5820 /* See arm_gen_load_multiple for discussion of
5821 the pros/cons of ldm/stm usage for XScale. */
5822 if (arm_tune_xscale && count <= 2 && ! optimize_size)
5824 rtx seq;
5826 start_sequence ();
5828 for (i = 0; i < count; i++)
5830 addr = plus_constant (to, i * 4 * sign);
5831 mem = adjust_automodify_address (basemem, SImode, addr, offset);
5832 emit_move_insn (mem, gen_rtx_REG (SImode, base_regno + i));
5833 offset += 4 * sign;
5836 if (write_back)
5838 emit_move_insn (to, plus_constant (to, count * 4 * sign));
5839 *offsetp = offset;
5842 seq = get_insns ();
5843 end_sequence ();
5845 return seq;
5848 result = gen_rtx_PARALLEL (VOIDmode,
5849 rtvec_alloc (count + (write_back ? 1 : 0)));
5850 if (write_back)
5852 XVECEXP (result, 0, 0)
5853 = gen_rtx_SET (GET_MODE (to), to,
5854 plus_constant (to, count * 4 * sign));
5855 i = 1;
5856 count++;
5859 for (j = 0; i < count; i++, j++)
5861 addr = plus_constant (to, j * 4 * sign);
5862 mem = adjust_automodify_address_nv (basemem, SImode, addr, offset);
5863 XVECEXP (result, 0, i)
5864 = gen_rtx_SET (VOIDmode, mem, gen_rtx_REG (SImode, base_regno + j));
5865 offset += 4 * sign;
5868 if (write_back)
5869 *offsetp = offset;
5871 return result;
5875 arm_gen_movmemqi (rtx *operands)
5877 HOST_WIDE_INT in_words_to_go, out_words_to_go, last_bytes;
5878 HOST_WIDE_INT srcoffset, dstoffset;
5879 int i;
5880 rtx src, dst, srcbase, dstbase;
5881 rtx part_bytes_reg = NULL;
5882 rtx mem;
5884 if (GET_CODE (operands[2]) != CONST_INT
5885 || GET_CODE (operands[3]) != CONST_INT
5886 || INTVAL (operands[2]) > 64
5887 || INTVAL (operands[3]) & 3)
5888 return 0;
5890 dstbase = operands[0];
5891 srcbase = operands[1];
5893 dst = copy_to_mode_reg (SImode, XEXP (dstbase, 0));
5894 src = copy_to_mode_reg (SImode, XEXP (srcbase, 0));
5896 in_words_to_go = ARM_NUM_INTS (INTVAL (operands[2]));
5897 out_words_to_go = INTVAL (operands[2]) / 4;
5898 last_bytes = INTVAL (operands[2]) & 3;
5899 dstoffset = srcoffset = 0;
5901 if (out_words_to_go != in_words_to_go && ((in_words_to_go - 1) & 3) != 0)
5902 part_bytes_reg = gen_rtx_REG (SImode, (in_words_to_go - 1) & 3);
5904 for (i = 0; in_words_to_go >= 2; i+=4)
5906 if (in_words_to_go > 4)
5907 emit_insn (arm_gen_load_multiple (0, 4, src, TRUE, TRUE,
5908 srcbase, &srcoffset));
5909 else
5910 emit_insn (arm_gen_load_multiple (0, in_words_to_go, src, TRUE,
5911 FALSE, srcbase, &srcoffset));
5913 if (out_words_to_go)
5915 if (out_words_to_go > 4)
5916 emit_insn (arm_gen_store_multiple (0, 4, dst, TRUE, TRUE,
5917 dstbase, &dstoffset));
5918 else if (out_words_to_go != 1)
5919 emit_insn (arm_gen_store_multiple (0, out_words_to_go,
5920 dst, TRUE,
5921 (last_bytes == 0
5922 ? FALSE : TRUE),
5923 dstbase, &dstoffset));
5924 else
5926 mem = adjust_automodify_address (dstbase, SImode, dst, dstoffset);
5927 emit_move_insn (mem, gen_rtx_REG (SImode, 0));
5928 if (last_bytes != 0)
5930 emit_insn (gen_addsi3 (dst, dst, GEN_INT (4)));
5931 dstoffset += 4;
5936 in_words_to_go -= in_words_to_go < 4 ? in_words_to_go : 4;
5937 out_words_to_go -= out_words_to_go < 4 ? out_words_to_go : 4;
5940 /* OUT_WORDS_TO_GO will be zero here if there are byte stores to do. */
5941 if (out_words_to_go)
5943 rtx sreg;
5945 mem = adjust_automodify_address (srcbase, SImode, src, srcoffset);
5946 sreg = copy_to_reg (mem);
5948 mem = adjust_automodify_address (dstbase, SImode, dst, dstoffset);
5949 emit_move_insn (mem, sreg);
5950 in_words_to_go--;
5952 gcc_assert (!in_words_to_go); /* Sanity check */
5955 if (in_words_to_go)
5957 gcc_assert (in_words_to_go > 0);
5959 mem = adjust_automodify_address (srcbase, SImode, src, srcoffset);
5960 part_bytes_reg = copy_to_mode_reg (SImode, mem);
5963 gcc_assert (!last_bytes || part_bytes_reg);
5965 if (BYTES_BIG_ENDIAN && last_bytes)
5967 rtx tmp = gen_reg_rtx (SImode);
5969 /* The bytes we want are in the top end of the word. */
5970 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg,
5971 GEN_INT (8 * (4 - last_bytes))));
5972 part_bytes_reg = tmp;
5974 while (last_bytes)
5976 mem = adjust_automodify_address (dstbase, QImode,
5977 plus_constant (dst, last_bytes - 1),
5978 dstoffset + last_bytes - 1);
5979 emit_move_insn (mem, gen_lowpart (QImode, part_bytes_reg));
5981 if (--last_bytes)
5983 tmp = gen_reg_rtx (SImode);
5984 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (8)));
5985 part_bytes_reg = tmp;
5990 else
5992 if (last_bytes > 1)
5994 mem = adjust_automodify_address (dstbase, HImode, dst, dstoffset);
5995 emit_move_insn (mem, gen_lowpart (HImode, part_bytes_reg));
5996 last_bytes -= 2;
5997 if (last_bytes)
5999 rtx tmp = gen_reg_rtx (SImode);
6000 emit_insn (gen_addsi3 (dst, dst, const2_rtx));
6001 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (16)));
6002 part_bytes_reg = tmp;
6003 dstoffset += 2;
6007 if (last_bytes)
6009 mem = adjust_automodify_address (dstbase, QImode, dst, dstoffset);
6010 emit_move_insn (mem, gen_lowpart (QImode, part_bytes_reg));
6014 return 1;
6017 /* Generate a memory reference for a half word, such that it will be loaded
6018 into the top 16 bits of the word. We can assume that the address is
6019 known to be alignable and of the form reg, or plus (reg, const). */
6022 arm_gen_rotated_half_load (rtx memref)
6024 HOST_WIDE_INT offset = 0;
6025 rtx base = XEXP (memref, 0);
6027 if (GET_CODE (base) == PLUS)
6029 offset = INTVAL (XEXP (base, 1));
6030 base = XEXP (base, 0);
6033 /* If we aren't allowed to generate unaligned addresses, then fail. */
6034 if ((BYTES_BIG_ENDIAN ? 1 : 0) ^ ((offset & 2) == 0))
6035 return NULL;
6037 base = gen_rtx_MEM (SImode, plus_constant (base, offset & ~2));
6039 if ((BYTES_BIG_ENDIAN ? 1 : 0) ^ ((offset & 2) == 2))
6040 return base;
6042 return gen_rtx_ROTATE (SImode, base, GEN_INT (16));
6045 /* Select a dominance comparison mode if possible for a test of the general
6046 form (OP (COND_OR (X) (Y)) (const_int 0)). We support three forms.
6047 COND_OR == DOM_CC_X_AND_Y => (X && Y)
6048 COND_OR == DOM_CC_NX_OR_Y => ((! X) || Y)
6049 COND_OR == DOM_CC_X_OR_Y => (X || Y)
6050 In all cases OP will be either EQ or NE, but we don't need to know which
6051 here. If we are unable to support a dominance comparison we return
6052 CC mode. This will then fail to match for the RTL expressions that
6053 generate this call. */
6054 enum machine_mode
6055 arm_select_dominance_cc_mode (rtx x, rtx y, HOST_WIDE_INT cond_or)
6057 enum rtx_code cond1, cond2;
6058 int swapped = 0;
6060 /* Currently we will probably get the wrong result if the individual
6061 comparisons are not simple. This also ensures that it is safe to
6062 reverse a comparison if necessary. */
6063 if ((arm_select_cc_mode (cond1 = GET_CODE (x), XEXP (x, 0), XEXP (x, 1))
6064 != CCmode)
6065 || (arm_select_cc_mode (cond2 = GET_CODE (y), XEXP (y, 0), XEXP (y, 1))
6066 != CCmode))
6067 return CCmode;
6069 /* The if_then_else variant of this tests the second condition if the
6070 first passes, but is true if the first fails. Reverse the first
6071 condition to get a true "inclusive-or" expression. */
6072 if (cond_or == DOM_CC_NX_OR_Y)
6073 cond1 = reverse_condition (cond1);
6075 /* If the comparisons are not equal, and one doesn't dominate the other,
6076 then we can't do this. */
6077 if (cond1 != cond2
6078 && !comparison_dominates_p (cond1, cond2)
6079 && (swapped = 1, !comparison_dominates_p (cond2, cond1)))
6080 return CCmode;
6082 if (swapped)
6084 enum rtx_code temp = cond1;
6085 cond1 = cond2;
6086 cond2 = temp;
6089 switch (cond1)
6091 case EQ:
6092 if (cond_or == DOM_CC_X_AND_Y)
6093 return CC_DEQmode;
6095 switch (cond2)
6097 case EQ: return CC_DEQmode;
6098 case LE: return CC_DLEmode;
6099 case LEU: return CC_DLEUmode;
6100 case GE: return CC_DGEmode;
6101 case GEU: return CC_DGEUmode;
6102 default: gcc_unreachable ();
6105 case LT:
6106 if (cond_or == DOM_CC_X_AND_Y)
6107 return CC_DLTmode;
6109 switch (cond2)
6111 case LT:
6112 return CC_DLTmode;
6113 case LE:
6114 return CC_DLEmode;
6115 case NE:
6116 return CC_DNEmode;
6117 default:
6118 gcc_unreachable ();
6121 case GT:
6122 if (cond_or == DOM_CC_X_AND_Y)
6123 return CC_DGTmode;
6125 switch (cond2)
6127 case GT:
6128 return CC_DGTmode;
6129 case GE:
6130 return CC_DGEmode;
6131 case NE:
6132 return CC_DNEmode;
6133 default:
6134 gcc_unreachable ();
6137 case LTU:
6138 if (cond_or == DOM_CC_X_AND_Y)
6139 return CC_DLTUmode;
6141 switch (cond2)
6143 case LTU:
6144 return CC_DLTUmode;
6145 case LEU:
6146 return CC_DLEUmode;
6147 case NE:
6148 return CC_DNEmode;
6149 default:
6150 gcc_unreachable ();
6153 case GTU:
6154 if (cond_or == DOM_CC_X_AND_Y)
6155 return CC_DGTUmode;
6157 switch (cond2)
6159 case GTU:
6160 return CC_DGTUmode;
6161 case GEU:
6162 return CC_DGEUmode;
6163 case NE:
6164 return CC_DNEmode;
6165 default:
6166 gcc_unreachable ();
6169 /* The remaining cases only occur when both comparisons are the
6170 same. */
6171 case NE:
6172 gcc_assert (cond1 == cond2);
6173 return CC_DNEmode;
6175 case LE:
6176 gcc_assert (cond1 == cond2);
6177 return CC_DLEmode;
6179 case GE:
6180 gcc_assert (cond1 == cond2);
6181 return CC_DGEmode;
6183 case LEU:
6184 gcc_assert (cond1 == cond2);
6185 return CC_DLEUmode;
6187 case GEU:
6188 gcc_assert (cond1 == cond2);
6189 return CC_DGEUmode;
6191 default:
6192 gcc_unreachable ();
6196 enum machine_mode
6197 arm_select_cc_mode (enum rtx_code op, rtx x, rtx y)
6199 /* All floating point compares return CCFP if it is an equality
6200 comparison, and CCFPE otherwise. */
6201 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
6203 switch (op)
6205 case EQ:
6206 case NE:
6207 case UNORDERED:
6208 case ORDERED:
6209 case UNLT:
6210 case UNLE:
6211 case UNGT:
6212 case UNGE:
6213 case UNEQ:
6214 case LTGT:
6215 return CCFPmode;
6217 case LT:
6218 case LE:
6219 case GT:
6220 case GE:
6221 if (TARGET_HARD_FLOAT && TARGET_MAVERICK)
6222 return CCFPmode;
6223 return CCFPEmode;
6225 default:
6226 gcc_unreachable ();
6230 /* A compare with a shifted operand. Because of canonicalization, the
6231 comparison will have to be swapped when we emit the assembler. */
6232 if (GET_MODE (y) == SImode && GET_CODE (y) == REG
6233 && (GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
6234 || GET_CODE (x) == LSHIFTRT || GET_CODE (x) == ROTATE
6235 || GET_CODE (x) == ROTATERT))
6236 return CC_SWPmode;
6238 /* This operation is performed swapped, but since we only rely on the Z
6239 flag we don't need an additional mode. */
6240 if (GET_MODE (y) == SImode && REG_P (y)
6241 && GET_CODE (x) == NEG
6242 && (op == EQ || op == NE))
6243 return CC_Zmode;
6245 /* This is a special case that is used by combine to allow a
6246 comparison of a shifted byte load to be split into a zero-extend
6247 followed by a comparison of the shifted integer (only valid for
6248 equalities and unsigned inequalities). */
6249 if (GET_MODE (x) == SImode
6250 && GET_CODE (x) == ASHIFT
6251 && GET_CODE (XEXP (x, 1)) == CONST_INT && INTVAL (XEXP (x, 1)) == 24
6252 && GET_CODE (XEXP (x, 0)) == SUBREG
6253 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == MEM
6254 && GET_MODE (SUBREG_REG (XEXP (x, 0))) == QImode
6255 && (op == EQ || op == NE
6256 || op == GEU || op == GTU || op == LTU || op == LEU)
6257 && GET_CODE (y) == CONST_INT)
6258 return CC_Zmode;
6260 /* A construct for a conditional compare, if the false arm contains
6261 0, then both conditions must be true, otherwise either condition
6262 must be true. Not all conditions are possible, so CCmode is
6263 returned if it can't be done. */
6264 if (GET_CODE (x) == IF_THEN_ELSE
6265 && (XEXP (x, 2) == const0_rtx
6266 || XEXP (x, 2) == const1_rtx)
6267 && COMPARISON_P (XEXP (x, 0))
6268 && COMPARISON_P (XEXP (x, 1)))
6269 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
6270 INTVAL (XEXP (x, 2)));
6272 /* Alternate canonicalizations of the above. These are somewhat cleaner. */
6273 if (GET_CODE (x) == AND
6274 && COMPARISON_P (XEXP (x, 0))
6275 && COMPARISON_P (XEXP (x, 1)))
6276 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
6277 DOM_CC_X_AND_Y);
6279 if (GET_CODE (x) == IOR
6280 && COMPARISON_P (XEXP (x, 0))
6281 && COMPARISON_P (XEXP (x, 1)))
6282 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
6283 DOM_CC_X_OR_Y);
6285 /* An operation (on Thumb) where we want to test for a single bit.
6286 This is done by shifting that bit up into the top bit of a
6287 scratch register; we can then branch on the sign bit. */
6288 if (TARGET_THUMB
6289 && GET_MODE (x) == SImode
6290 && (op == EQ || op == NE)
6291 && (GET_CODE (x) == ZERO_EXTRACT))
6292 return CC_Nmode;
6294 /* An operation that sets the condition codes as a side-effect, the
6295 V flag is not set correctly, so we can only use comparisons where
6296 this doesn't matter. (For LT and GE we can use "mi" and "pl"
6297 instead.) */
6298 if (GET_MODE (x) == SImode
6299 && y == const0_rtx
6300 && (op == EQ || op == NE || op == LT || op == GE)
6301 && (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
6302 || GET_CODE (x) == AND || GET_CODE (x) == IOR
6303 || GET_CODE (x) == XOR || GET_CODE (x) == MULT
6304 || GET_CODE (x) == NOT || GET_CODE (x) == NEG
6305 || GET_CODE (x) == LSHIFTRT
6306 || GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
6307 || GET_CODE (x) == ROTATERT
6308 || (TARGET_ARM && GET_CODE (x) == ZERO_EXTRACT)))
6309 return CC_NOOVmode;
6311 if (GET_MODE (x) == QImode && (op == EQ || op == NE))
6312 return CC_Zmode;
6314 if (GET_MODE (x) == SImode && (op == LTU || op == GEU)
6315 && GET_CODE (x) == PLUS
6316 && (rtx_equal_p (XEXP (x, 0), y) || rtx_equal_p (XEXP (x, 1), y)))
6317 return CC_Cmode;
6319 return CCmode;
6322 /* X and Y are two things to compare using CODE. Emit the compare insn and
6323 return the rtx for register 0 in the proper mode. FP means this is a
6324 floating point compare: I don't think that it is needed on the arm. */
6326 arm_gen_compare_reg (enum rtx_code code, rtx x, rtx y)
6328 enum machine_mode mode = SELECT_CC_MODE (code, x, y);
6329 rtx cc_reg = gen_rtx_REG (mode, CC_REGNUM);
6331 emit_insn (gen_rtx_SET (VOIDmode, cc_reg,
6332 gen_rtx_COMPARE (mode, x, y)));
6334 return cc_reg;
6337 /* Generate a sequence of insns that will generate the correct return
6338 address mask depending on the physical architecture that the program
6339 is running on. */
6341 arm_gen_return_addr_mask (void)
6343 rtx reg = gen_reg_rtx (Pmode);
6345 emit_insn (gen_return_addr_mask (reg));
6346 return reg;
6349 void
6350 arm_reload_in_hi (rtx *operands)
6352 rtx ref = operands[1];
6353 rtx base, scratch;
6354 HOST_WIDE_INT offset = 0;
6356 if (GET_CODE (ref) == SUBREG)
6358 offset = SUBREG_BYTE (ref);
6359 ref = SUBREG_REG (ref);
6362 if (GET_CODE (ref) == REG)
6364 /* We have a pseudo which has been spilt onto the stack; there
6365 are two cases here: the first where there is a simple
6366 stack-slot replacement and a second where the stack-slot is
6367 out of range, or is used as a subreg. */
6368 if (reg_equiv_mem[REGNO (ref)])
6370 ref = reg_equiv_mem[REGNO (ref)];
6371 base = find_replacement (&XEXP (ref, 0));
6373 else
6374 /* The slot is out of range, or was dressed up in a SUBREG. */
6375 base = reg_equiv_address[REGNO (ref)];
6377 else
6378 base = find_replacement (&XEXP (ref, 0));
6380 /* Handle the case where the address is too complex to be offset by 1. */
6381 if (GET_CODE (base) == MINUS
6382 || (GET_CODE (base) == PLUS && GET_CODE (XEXP (base, 1)) != CONST_INT))
6384 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6386 emit_insn (gen_rtx_SET (VOIDmode, base_plus, base));
6387 base = base_plus;
6389 else if (GET_CODE (base) == PLUS)
6391 /* The addend must be CONST_INT, or we would have dealt with it above. */
6392 HOST_WIDE_INT hi, lo;
6394 offset += INTVAL (XEXP (base, 1));
6395 base = XEXP (base, 0);
6397 /* Rework the address into a legal sequence of insns. */
6398 /* Valid range for lo is -4095 -> 4095 */
6399 lo = (offset >= 0
6400 ? (offset & 0xfff)
6401 : -((-offset) & 0xfff));
6403 /* Corner case, if lo is the max offset then we would be out of range
6404 once we have added the additional 1 below, so bump the msb into the
6405 pre-loading insn(s). */
6406 if (lo == 4095)
6407 lo &= 0x7ff;
6409 hi = ((((offset - lo) & (HOST_WIDE_INT) 0xffffffff)
6410 ^ (HOST_WIDE_INT) 0x80000000)
6411 - (HOST_WIDE_INT) 0x80000000);
6413 gcc_assert (hi + lo == offset);
6415 if (hi != 0)
6417 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6419 /* Get the base address; addsi3 knows how to handle constants
6420 that require more than one insn. */
6421 emit_insn (gen_addsi3 (base_plus, base, GEN_INT (hi)));
6422 base = base_plus;
6423 offset = lo;
6427 /* Operands[2] may overlap operands[0] (though it won't overlap
6428 operands[1]), that's why we asked for a DImode reg -- so we can
6429 use the bit that does not overlap. */
6430 if (REGNO (operands[2]) == REGNO (operands[0]))
6431 scratch = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6432 else
6433 scratch = gen_rtx_REG (SImode, REGNO (operands[2]));
6435 emit_insn (gen_zero_extendqisi2 (scratch,
6436 gen_rtx_MEM (QImode,
6437 plus_constant (base,
6438 offset))));
6439 emit_insn (gen_zero_extendqisi2 (gen_rtx_SUBREG (SImode, operands[0], 0),
6440 gen_rtx_MEM (QImode,
6441 plus_constant (base,
6442 offset + 1))));
6443 if (!BYTES_BIG_ENDIAN)
6444 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_SUBREG (SImode, operands[0], 0),
6445 gen_rtx_IOR (SImode,
6446 gen_rtx_ASHIFT
6447 (SImode,
6448 gen_rtx_SUBREG (SImode, operands[0], 0),
6449 GEN_INT (8)),
6450 scratch)));
6451 else
6452 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_SUBREG (SImode, operands[0], 0),
6453 gen_rtx_IOR (SImode,
6454 gen_rtx_ASHIFT (SImode, scratch,
6455 GEN_INT (8)),
6456 gen_rtx_SUBREG (SImode, operands[0],
6457 0))));
6460 /* Handle storing a half-word to memory during reload by synthesizing as two
6461 byte stores. Take care not to clobber the input values until after we
6462 have moved them somewhere safe. This code assumes that if the DImode
6463 scratch in operands[2] overlaps either the input value or output address
6464 in some way, then that value must die in this insn (we absolutely need
6465 two scratch registers for some corner cases). */
6466 void
6467 arm_reload_out_hi (rtx *operands)
6469 rtx ref = operands[0];
6470 rtx outval = operands[1];
6471 rtx base, scratch;
6472 HOST_WIDE_INT offset = 0;
6474 if (GET_CODE (ref) == SUBREG)
6476 offset = SUBREG_BYTE (ref);
6477 ref = SUBREG_REG (ref);
6480 if (GET_CODE (ref) == REG)
6482 /* We have a pseudo which has been spilt onto the stack; there
6483 are two cases here: the first where there is a simple
6484 stack-slot replacement and a second where the stack-slot is
6485 out of range, or is used as a subreg. */
6486 if (reg_equiv_mem[REGNO (ref)])
6488 ref = reg_equiv_mem[REGNO (ref)];
6489 base = find_replacement (&XEXP (ref, 0));
6491 else
6492 /* The slot is out of range, or was dressed up in a SUBREG. */
6493 base = reg_equiv_address[REGNO (ref)];
6495 else
6496 base = find_replacement (&XEXP (ref, 0));
6498 scratch = gen_rtx_REG (SImode, REGNO (operands[2]));
6500 /* Handle the case where the address is too complex to be offset by 1. */
6501 if (GET_CODE (base) == MINUS
6502 || (GET_CODE (base) == PLUS && GET_CODE (XEXP (base, 1)) != CONST_INT))
6504 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6506 /* Be careful not to destroy OUTVAL. */
6507 if (reg_overlap_mentioned_p (base_plus, outval))
6509 /* Updating base_plus might destroy outval, see if we can
6510 swap the scratch and base_plus. */
6511 if (!reg_overlap_mentioned_p (scratch, outval))
6513 rtx tmp = scratch;
6514 scratch = base_plus;
6515 base_plus = tmp;
6517 else
6519 rtx scratch_hi = gen_rtx_REG (HImode, REGNO (operands[2]));
6521 /* Be conservative and copy OUTVAL into the scratch now,
6522 this should only be necessary if outval is a subreg
6523 of something larger than a word. */
6524 /* XXX Might this clobber base? I can't see how it can,
6525 since scratch is known to overlap with OUTVAL, and
6526 must be wider than a word. */
6527 emit_insn (gen_movhi (scratch_hi, outval));
6528 outval = scratch_hi;
6532 emit_insn (gen_rtx_SET (VOIDmode, base_plus, base));
6533 base = base_plus;
6535 else if (GET_CODE (base) == PLUS)
6537 /* The addend must be CONST_INT, or we would have dealt with it above. */
6538 HOST_WIDE_INT hi, lo;
6540 offset += INTVAL (XEXP (base, 1));
6541 base = XEXP (base, 0);
6543 /* Rework the address into a legal sequence of insns. */
6544 /* Valid range for lo is -4095 -> 4095 */
6545 lo = (offset >= 0
6546 ? (offset & 0xfff)
6547 : -((-offset) & 0xfff));
6549 /* Corner case, if lo is the max offset then we would be out of range
6550 once we have added the additional 1 below, so bump the msb into the
6551 pre-loading insn(s). */
6552 if (lo == 4095)
6553 lo &= 0x7ff;
6555 hi = ((((offset - lo) & (HOST_WIDE_INT) 0xffffffff)
6556 ^ (HOST_WIDE_INT) 0x80000000)
6557 - (HOST_WIDE_INT) 0x80000000);
6559 gcc_assert (hi + lo == offset);
6561 if (hi != 0)
6563 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6565 /* Be careful not to destroy OUTVAL. */
6566 if (reg_overlap_mentioned_p (base_plus, outval))
6568 /* Updating base_plus might destroy outval, see if we
6569 can swap the scratch and base_plus. */
6570 if (!reg_overlap_mentioned_p (scratch, outval))
6572 rtx tmp = scratch;
6573 scratch = base_plus;
6574 base_plus = tmp;
6576 else
6578 rtx scratch_hi = gen_rtx_REG (HImode, REGNO (operands[2]));
6580 /* Be conservative and copy outval into scratch now,
6581 this should only be necessary if outval is a
6582 subreg of something larger than a word. */
6583 /* XXX Might this clobber base? I can't see how it
6584 can, since scratch is known to overlap with
6585 outval. */
6586 emit_insn (gen_movhi (scratch_hi, outval));
6587 outval = scratch_hi;
6591 /* Get the base address; addsi3 knows how to handle constants
6592 that require more than one insn. */
6593 emit_insn (gen_addsi3 (base_plus, base, GEN_INT (hi)));
6594 base = base_plus;
6595 offset = lo;
6599 if (BYTES_BIG_ENDIAN)
6601 emit_insn (gen_movqi (gen_rtx_MEM (QImode,
6602 plus_constant (base, offset + 1)),
6603 gen_lowpart (QImode, outval)));
6604 emit_insn (gen_lshrsi3 (scratch,
6605 gen_rtx_SUBREG (SImode, outval, 0),
6606 GEN_INT (8)));
6607 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (base, offset)),
6608 gen_lowpart (QImode, scratch)));
6610 else
6612 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (base, offset)),
6613 gen_lowpart (QImode, outval)));
6614 emit_insn (gen_lshrsi3 (scratch,
6615 gen_rtx_SUBREG (SImode, outval, 0),
6616 GEN_INT (8)));
6617 emit_insn (gen_movqi (gen_rtx_MEM (QImode,
6618 plus_constant (base, offset + 1)),
6619 gen_lowpart (QImode, scratch)));
6623 /* Return true if a type must be passed in memory. For AAPCS, small aggregates
6624 (padded to the size of a word) should be passed in a register. */
6626 static bool
6627 arm_must_pass_in_stack (enum machine_mode mode, tree type)
6629 if (TARGET_AAPCS_BASED)
6630 return must_pass_in_stack_var_size (mode, type);
6631 else
6632 return must_pass_in_stack_var_size_or_pad (mode, type);
6636 /* For use by FUNCTION_ARG_PADDING (MODE, TYPE).
6637 Return true if an argument passed on the stack should be padded upwards,
6638 i.e. if the least-significant byte has useful data. */
6640 bool
6641 arm_pad_arg_upward (enum machine_mode mode, tree type)
6643 if (!TARGET_AAPCS_BASED)
6644 return DEFAULT_FUNCTION_ARG_PADDING(mode, type);
6646 if (type && BYTES_BIG_ENDIAN && INTEGRAL_TYPE_P (type))
6647 return false;
6649 return true;
6653 /* Similarly, for use by BLOCK_REG_PADDING (MODE, TYPE, FIRST).
6654 For non-AAPCS, return !BYTES_BIG_ENDIAN if the least significant
6655 byte of the register has useful data, and return the opposite if the
6656 most significant byte does.
6657 For AAPCS, small aggregates and small complex types are always padded
6658 upwards. */
6660 bool
6661 arm_pad_reg_upward (enum machine_mode mode ATTRIBUTE_UNUSED,
6662 tree type, int first ATTRIBUTE_UNUSED)
6664 if (TARGET_AAPCS_BASED
6665 && BYTES_BIG_ENDIAN
6666 && (AGGREGATE_TYPE_P (type) || TREE_CODE (type) == COMPLEX_TYPE)
6667 && int_size_in_bytes (type) <= 4)
6668 return true;
6670 /* Otherwise, use default padding. */
6671 return !BYTES_BIG_ENDIAN;
6676 /* Print a symbolic form of X to the debug file, F. */
6677 static void
6678 arm_print_value (FILE *f, rtx x)
6680 switch (GET_CODE (x))
6682 case CONST_INT:
6683 fprintf (f, HOST_WIDE_INT_PRINT_HEX, INTVAL (x));
6684 return;
6686 case CONST_DOUBLE:
6687 fprintf (f, "<0x%lx,0x%lx>", (long)XWINT (x, 2), (long)XWINT (x, 3));
6688 return;
6690 case CONST_VECTOR:
6692 int i;
6694 fprintf (f, "<");
6695 for (i = 0; i < CONST_VECTOR_NUNITS (x); i++)
6697 fprintf (f, HOST_WIDE_INT_PRINT_HEX, INTVAL (CONST_VECTOR_ELT (x, i)));
6698 if (i < (CONST_VECTOR_NUNITS (x) - 1))
6699 fputc (',', f);
6701 fprintf (f, ">");
6703 return;
6705 case CONST_STRING:
6706 fprintf (f, "\"%s\"", XSTR (x, 0));
6707 return;
6709 case SYMBOL_REF:
6710 fprintf (f, "`%s'", XSTR (x, 0));
6711 return;
6713 case LABEL_REF:
6714 fprintf (f, "L%d", INSN_UID (XEXP (x, 0)));
6715 return;
6717 case CONST:
6718 arm_print_value (f, XEXP (x, 0));
6719 return;
6721 case PLUS:
6722 arm_print_value (f, XEXP (x, 0));
6723 fprintf (f, "+");
6724 arm_print_value (f, XEXP (x, 1));
6725 return;
6727 case PC:
6728 fprintf (f, "pc");
6729 return;
6731 default:
6732 fprintf (f, "????");
6733 return;
6737 /* Routines for manipulation of the constant pool. */
6739 /* Arm instructions cannot load a large constant directly into a
6740 register; they have to come from a pc relative load. The constant
6741 must therefore be placed in the addressable range of the pc
6742 relative load. Depending on the precise pc relative load
6743 instruction the range is somewhere between 256 bytes and 4k. This
6744 means that we often have to dump a constant inside a function, and
6745 generate code to branch around it.
6747 It is important to minimize this, since the branches will slow
6748 things down and make the code larger.
6750 Normally we can hide the table after an existing unconditional
6751 branch so that there is no interruption of the flow, but in the
6752 worst case the code looks like this:
6754 ldr rn, L1
6756 b L2
6757 align
6758 L1: .long value
6762 ldr rn, L3
6764 b L4
6765 align
6766 L3: .long value
6770 We fix this by performing a scan after scheduling, which notices
6771 which instructions need to have their operands fetched from the
6772 constant table and builds the table.
6774 The algorithm starts by building a table of all the constants that
6775 need fixing up and all the natural barriers in the function (places
6776 where a constant table can be dropped without breaking the flow).
6777 For each fixup we note how far the pc-relative replacement will be
6778 able to reach and the offset of the instruction into the function.
6780 Having built the table we then group the fixes together to form
6781 tables that are as large as possible (subject to addressing
6782 constraints) and emit each table of constants after the last
6783 barrier that is within range of all the instructions in the group.
6784 If a group does not contain a barrier, then we forcibly create one
6785 by inserting a jump instruction into the flow. Once the table has
6786 been inserted, the insns are then modified to reference the
6787 relevant entry in the pool.
6789 Possible enhancements to the algorithm (not implemented) are:
6791 1) For some processors and object formats, there may be benefit in
6792 aligning the pools to the start of cache lines; this alignment
6793 would need to be taken into account when calculating addressability
6794 of a pool. */
6796 /* These typedefs are located at the start of this file, so that
6797 they can be used in the prototypes there. This comment is to
6798 remind readers of that fact so that the following structures
6799 can be understood more easily.
6801 typedef struct minipool_node Mnode;
6802 typedef struct minipool_fixup Mfix; */
6804 struct minipool_node
6806 /* Doubly linked chain of entries. */
6807 Mnode * next;
6808 Mnode * prev;
6809 /* The maximum offset into the code that this entry can be placed. While
6810 pushing fixes for forward references, all entries are sorted in order
6811 of increasing max_address. */
6812 HOST_WIDE_INT max_address;
6813 /* Similarly for an entry inserted for a backwards ref. */
6814 HOST_WIDE_INT min_address;
6815 /* The number of fixes referencing this entry. This can become zero
6816 if we "unpush" an entry. In this case we ignore the entry when we
6817 come to emit the code. */
6818 int refcount;
6819 /* The offset from the start of the minipool. */
6820 HOST_WIDE_INT offset;
6821 /* The value in table. */
6822 rtx value;
6823 /* The mode of value. */
6824 enum machine_mode mode;
6825 /* The size of the value. With iWMMXt enabled
6826 sizes > 4 also imply an alignment of 8-bytes. */
6827 int fix_size;
6830 struct minipool_fixup
6832 Mfix * next;
6833 rtx insn;
6834 HOST_WIDE_INT address;
6835 rtx * loc;
6836 enum machine_mode mode;
6837 int fix_size;
6838 rtx value;
6839 Mnode * minipool;
6840 HOST_WIDE_INT forwards;
6841 HOST_WIDE_INT backwards;
6844 /* Fixes less than a word need padding out to a word boundary. */
6845 #define MINIPOOL_FIX_SIZE(mode) \
6846 (GET_MODE_SIZE ((mode)) >= 4 ? GET_MODE_SIZE ((mode)) : 4)
6848 static Mnode * minipool_vector_head;
6849 static Mnode * minipool_vector_tail;
6850 static rtx minipool_vector_label;
6852 /* The linked list of all minipool fixes required for this function. */
6853 Mfix * minipool_fix_head;
6854 Mfix * minipool_fix_tail;
6855 /* The fix entry for the current minipool, once it has been placed. */
6856 Mfix * minipool_barrier;
6858 /* Determines if INSN is the start of a jump table. Returns the end
6859 of the TABLE or NULL_RTX. */
6860 static rtx
6861 is_jump_table (rtx insn)
6863 rtx table;
6865 if (GET_CODE (insn) == JUMP_INSN
6866 && JUMP_LABEL (insn) != NULL
6867 && ((table = next_real_insn (JUMP_LABEL (insn)))
6868 == next_real_insn (insn))
6869 && table != NULL
6870 && GET_CODE (table) == JUMP_INSN
6871 && (GET_CODE (PATTERN (table)) == ADDR_VEC
6872 || GET_CODE (PATTERN (table)) == ADDR_DIFF_VEC))
6873 return table;
6875 return NULL_RTX;
6878 #ifndef JUMP_TABLES_IN_TEXT_SECTION
6879 #define JUMP_TABLES_IN_TEXT_SECTION 0
6880 #endif
6882 static HOST_WIDE_INT
6883 get_jump_table_size (rtx insn)
6885 /* ADDR_VECs only take room if read-only data does into the text
6886 section. */
6887 if (JUMP_TABLES_IN_TEXT_SECTION
6888 #if !defined(READONLY_DATA_SECTION) && !defined(READONLY_DATA_SECTION_ASM_OP)
6889 || 1
6890 #endif
6893 rtx body = PATTERN (insn);
6894 int elt = GET_CODE (body) == ADDR_DIFF_VEC ? 1 : 0;
6896 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, elt);
6899 return 0;
6902 /* Move a minipool fix MP from its current location to before MAX_MP.
6903 If MAX_MP is NULL, then MP doesn't need moving, but the addressing
6904 constraints may need updating. */
6905 static Mnode *
6906 move_minipool_fix_forward_ref (Mnode *mp, Mnode *max_mp,
6907 HOST_WIDE_INT max_address)
6909 /* The code below assumes these are different. */
6910 gcc_assert (mp != max_mp);
6912 if (max_mp == NULL)
6914 if (max_address < mp->max_address)
6915 mp->max_address = max_address;
6917 else
6919 if (max_address > max_mp->max_address - mp->fix_size)
6920 mp->max_address = max_mp->max_address - mp->fix_size;
6921 else
6922 mp->max_address = max_address;
6924 /* Unlink MP from its current position. Since max_mp is non-null,
6925 mp->prev must be non-null. */
6926 mp->prev->next = mp->next;
6927 if (mp->next != NULL)
6928 mp->next->prev = mp->prev;
6929 else
6930 minipool_vector_tail = mp->prev;
6932 /* Re-insert it before MAX_MP. */
6933 mp->next = max_mp;
6934 mp->prev = max_mp->prev;
6935 max_mp->prev = mp;
6937 if (mp->prev != NULL)
6938 mp->prev->next = mp;
6939 else
6940 minipool_vector_head = mp;
6943 /* Save the new entry. */
6944 max_mp = mp;
6946 /* Scan over the preceding entries and adjust their addresses as
6947 required. */
6948 while (mp->prev != NULL
6949 && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
6951 mp->prev->max_address = mp->max_address - mp->prev->fix_size;
6952 mp = mp->prev;
6955 return max_mp;
6958 /* Add a constant to the minipool for a forward reference. Returns the
6959 node added or NULL if the constant will not fit in this pool. */
6960 static Mnode *
6961 add_minipool_forward_ref (Mfix *fix)
6963 /* If set, max_mp is the first pool_entry that has a lower
6964 constraint than the one we are trying to add. */
6965 Mnode * max_mp = NULL;
6966 HOST_WIDE_INT max_address = fix->address + fix->forwards;
6967 Mnode * mp;
6969 /* If this fix's address is greater than the address of the first
6970 entry, then we can't put the fix in this pool. We subtract the
6971 size of the current fix to ensure that if the table is fully
6972 packed we still have enough room to insert this value by suffling
6973 the other fixes forwards. */
6974 if (minipool_vector_head &&
6975 fix->address >= minipool_vector_head->max_address - fix->fix_size)
6976 return NULL;
6978 /* Scan the pool to see if a constant with the same value has
6979 already been added. While we are doing this, also note the
6980 location where we must insert the constant if it doesn't already
6981 exist. */
6982 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
6984 if (GET_CODE (fix->value) == GET_CODE (mp->value)
6985 && fix->mode == mp->mode
6986 && (GET_CODE (fix->value) != CODE_LABEL
6987 || (CODE_LABEL_NUMBER (fix->value)
6988 == CODE_LABEL_NUMBER (mp->value)))
6989 && rtx_equal_p (fix->value, mp->value))
6991 /* More than one fix references this entry. */
6992 mp->refcount++;
6993 return move_minipool_fix_forward_ref (mp, max_mp, max_address);
6996 /* Note the insertion point if necessary. */
6997 if (max_mp == NULL
6998 && mp->max_address > max_address)
6999 max_mp = mp;
7001 /* If we are inserting an 8-bytes aligned quantity and
7002 we have not already found an insertion point, then
7003 make sure that all such 8-byte aligned quantities are
7004 placed at the start of the pool. */
7005 if (ARM_DOUBLEWORD_ALIGN
7006 && max_mp == NULL
7007 && fix->fix_size == 8
7008 && mp->fix_size != 8)
7010 max_mp = mp;
7011 max_address = mp->max_address;
7015 /* The value is not currently in the minipool, so we need to create
7016 a new entry for it. If MAX_MP is NULL, the entry will be put on
7017 the end of the list since the placement is less constrained than
7018 any existing entry. Otherwise, we insert the new fix before
7019 MAX_MP and, if necessary, adjust the constraints on the other
7020 entries. */
7021 mp = xmalloc (sizeof (* mp));
7022 mp->fix_size = fix->fix_size;
7023 mp->mode = fix->mode;
7024 mp->value = fix->value;
7025 mp->refcount = 1;
7026 /* Not yet required for a backwards ref. */
7027 mp->min_address = -65536;
7029 if (max_mp == NULL)
7031 mp->max_address = max_address;
7032 mp->next = NULL;
7033 mp->prev = minipool_vector_tail;
7035 if (mp->prev == NULL)
7037 minipool_vector_head = mp;
7038 minipool_vector_label = gen_label_rtx ();
7040 else
7041 mp->prev->next = mp;
7043 minipool_vector_tail = mp;
7045 else
7047 if (max_address > max_mp->max_address - mp->fix_size)
7048 mp->max_address = max_mp->max_address - mp->fix_size;
7049 else
7050 mp->max_address = max_address;
7052 mp->next = max_mp;
7053 mp->prev = max_mp->prev;
7054 max_mp->prev = mp;
7055 if (mp->prev != NULL)
7056 mp->prev->next = mp;
7057 else
7058 minipool_vector_head = mp;
7061 /* Save the new entry. */
7062 max_mp = mp;
7064 /* Scan over the preceding entries and adjust their addresses as
7065 required. */
7066 while (mp->prev != NULL
7067 && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
7069 mp->prev->max_address = mp->max_address - mp->prev->fix_size;
7070 mp = mp->prev;
7073 return max_mp;
7076 static Mnode *
7077 move_minipool_fix_backward_ref (Mnode *mp, Mnode *min_mp,
7078 HOST_WIDE_INT min_address)
7080 HOST_WIDE_INT offset;
7082 /* The code below assumes these are different. */
7083 gcc_assert (mp != min_mp);
7085 if (min_mp == NULL)
7087 if (min_address > mp->min_address)
7088 mp->min_address = min_address;
7090 else
7092 /* We will adjust this below if it is too loose. */
7093 mp->min_address = min_address;
7095 /* Unlink MP from its current position. Since min_mp is non-null,
7096 mp->next must be non-null. */
7097 mp->next->prev = mp->prev;
7098 if (mp->prev != NULL)
7099 mp->prev->next = mp->next;
7100 else
7101 minipool_vector_head = mp->next;
7103 /* Reinsert it after MIN_MP. */
7104 mp->prev = min_mp;
7105 mp->next = min_mp->next;
7106 min_mp->next = mp;
7107 if (mp->next != NULL)
7108 mp->next->prev = mp;
7109 else
7110 minipool_vector_tail = mp;
7113 min_mp = mp;
7115 offset = 0;
7116 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7118 mp->offset = offset;
7119 if (mp->refcount > 0)
7120 offset += mp->fix_size;
7122 if (mp->next && mp->next->min_address < mp->min_address + mp->fix_size)
7123 mp->next->min_address = mp->min_address + mp->fix_size;
7126 return min_mp;
7129 /* Add a constant to the minipool for a backward reference. Returns the
7130 node added or NULL if the constant will not fit in this pool.
7132 Note that the code for insertion for a backwards reference can be
7133 somewhat confusing because the calculated offsets for each fix do
7134 not take into account the size of the pool (which is still under
7135 construction. */
7136 static Mnode *
7137 add_minipool_backward_ref (Mfix *fix)
7139 /* If set, min_mp is the last pool_entry that has a lower constraint
7140 than the one we are trying to add. */
7141 Mnode *min_mp = NULL;
7142 /* This can be negative, since it is only a constraint. */
7143 HOST_WIDE_INT min_address = fix->address - fix->backwards;
7144 Mnode *mp;
7146 /* If we can't reach the current pool from this insn, or if we can't
7147 insert this entry at the end of the pool without pushing other
7148 fixes out of range, then we don't try. This ensures that we
7149 can't fail later on. */
7150 if (min_address >= minipool_barrier->address
7151 || (minipool_vector_tail->min_address + fix->fix_size
7152 >= minipool_barrier->address))
7153 return NULL;
7155 /* Scan the pool to see if a constant with the same value has
7156 already been added. While we are doing this, also note the
7157 location where we must insert the constant if it doesn't already
7158 exist. */
7159 for (mp = minipool_vector_tail; mp != NULL; mp = mp->prev)
7161 if (GET_CODE (fix->value) == GET_CODE (mp->value)
7162 && fix->mode == mp->mode
7163 && (GET_CODE (fix->value) != CODE_LABEL
7164 || (CODE_LABEL_NUMBER (fix->value)
7165 == CODE_LABEL_NUMBER (mp->value)))
7166 && rtx_equal_p (fix->value, mp->value)
7167 /* Check that there is enough slack to move this entry to the
7168 end of the table (this is conservative). */
7169 && (mp->max_address
7170 > (minipool_barrier->address
7171 + minipool_vector_tail->offset
7172 + minipool_vector_tail->fix_size)))
7174 mp->refcount++;
7175 return move_minipool_fix_backward_ref (mp, min_mp, min_address);
7178 if (min_mp != NULL)
7179 mp->min_address += fix->fix_size;
7180 else
7182 /* Note the insertion point if necessary. */
7183 if (mp->min_address < min_address)
7185 /* For now, we do not allow the insertion of 8-byte alignment
7186 requiring nodes anywhere but at the start of the pool. */
7187 if (ARM_DOUBLEWORD_ALIGN
7188 && fix->fix_size == 8 && mp->fix_size != 8)
7189 return NULL;
7190 else
7191 min_mp = mp;
7193 else if (mp->max_address
7194 < minipool_barrier->address + mp->offset + fix->fix_size)
7196 /* Inserting before this entry would push the fix beyond
7197 its maximum address (which can happen if we have
7198 re-located a forwards fix); force the new fix to come
7199 after it. */
7200 min_mp = mp;
7201 min_address = mp->min_address + fix->fix_size;
7203 /* If we are inserting an 8-bytes aligned quantity and
7204 we have not already found an insertion point, then
7205 make sure that all such 8-byte aligned quantities are
7206 placed at the start of the pool. */
7207 else if (ARM_DOUBLEWORD_ALIGN
7208 && min_mp == NULL
7209 && fix->fix_size == 8
7210 && mp->fix_size < 8)
7212 min_mp = mp;
7213 min_address = mp->min_address + fix->fix_size;
7218 /* We need to create a new entry. */
7219 mp = xmalloc (sizeof (* mp));
7220 mp->fix_size = fix->fix_size;
7221 mp->mode = fix->mode;
7222 mp->value = fix->value;
7223 mp->refcount = 1;
7224 mp->max_address = minipool_barrier->address + 65536;
7226 mp->min_address = min_address;
7228 if (min_mp == NULL)
7230 mp->prev = NULL;
7231 mp->next = minipool_vector_head;
7233 if (mp->next == NULL)
7235 minipool_vector_tail = mp;
7236 minipool_vector_label = gen_label_rtx ();
7238 else
7239 mp->next->prev = mp;
7241 minipool_vector_head = mp;
7243 else
7245 mp->next = min_mp->next;
7246 mp->prev = min_mp;
7247 min_mp->next = mp;
7249 if (mp->next != NULL)
7250 mp->next->prev = mp;
7251 else
7252 minipool_vector_tail = mp;
7255 /* Save the new entry. */
7256 min_mp = mp;
7258 if (mp->prev)
7259 mp = mp->prev;
7260 else
7261 mp->offset = 0;
7263 /* Scan over the following entries and adjust their offsets. */
7264 while (mp->next != NULL)
7266 if (mp->next->min_address < mp->min_address + mp->fix_size)
7267 mp->next->min_address = mp->min_address + mp->fix_size;
7269 if (mp->refcount)
7270 mp->next->offset = mp->offset + mp->fix_size;
7271 else
7272 mp->next->offset = mp->offset;
7274 mp = mp->next;
7277 return min_mp;
7280 static void
7281 assign_minipool_offsets (Mfix *barrier)
7283 HOST_WIDE_INT offset = 0;
7284 Mnode *mp;
7286 minipool_barrier = barrier;
7288 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7290 mp->offset = offset;
7292 if (mp->refcount > 0)
7293 offset += mp->fix_size;
7297 /* Output the literal table */
7298 static void
7299 dump_minipool (rtx scan)
7301 Mnode * mp;
7302 Mnode * nmp;
7303 int align64 = 0;
7305 if (ARM_DOUBLEWORD_ALIGN)
7306 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7307 if (mp->refcount > 0 && mp->fix_size == 8)
7309 align64 = 1;
7310 break;
7313 if (dump_file)
7314 fprintf (dump_file,
7315 ";; Emitting minipool after insn %u; address %ld; align %d (bytes)\n",
7316 INSN_UID (scan), (unsigned long) minipool_barrier->address, align64 ? 8 : 4);
7318 scan = emit_label_after (gen_label_rtx (), scan);
7319 scan = emit_insn_after (align64 ? gen_align_8 () : gen_align_4 (), scan);
7320 scan = emit_label_after (minipool_vector_label, scan);
7322 for (mp = minipool_vector_head; mp != NULL; mp = nmp)
7324 if (mp->refcount > 0)
7326 if (dump_file)
7328 fprintf (dump_file,
7329 ";; Offset %u, min %ld, max %ld ",
7330 (unsigned) mp->offset, (unsigned long) mp->min_address,
7331 (unsigned long) mp->max_address);
7332 arm_print_value (dump_file, mp->value);
7333 fputc ('\n', dump_file);
7336 switch (mp->fix_size)
7338 #ifdef HAVE_consttable_1
7339 case 1:
7340 scan = emit_insn_after (gen_consttable_1 (mp->value), scan);
7341 break;
7343 #endif
7344 #ifdef HAVE_consttable_2
7345 case 2:
7346 scan = emit_insn_after (gen_consttable_2 (mp->value), scan);
7347 break;
7349 #endif
7350 #ifdef HAVE_consttable_4
7351 case 4:
7352 scan = emit_insn_after (gen_consttable_4 (mp->value), scan);
7353 break;
7355 #endif
7356 #ifdef HAVE_consttable_8
7357 case 8:
7358 scan = emit_insn_after (gen_consttable_8 (mp->value), scan);
7359 break;
7361 #endif
7362 default:
7363 gcc_unreachable ();
7367 nmp = mp->next;
7368 free (mp);
7371 minipool_vector_head = minipool_vector_tail = NULL;
7372 scan = emit_insn_after (gen_consttable_end (), scan);
7373 scan = emit_barrier_after (scan);
7376 /* Return the cost of forcibly inserting a barrier after INSN. */
7377 static int
7378 arm_barrier_cost (rtx insn)
7380 /* Basing the location of the pool on the loop depth is preferable,
7381 but at the moment, the basic block information seems to be
7382 corrupt by this stage of the compilation. */
7383 int base_cost = 50;
7384 rtx next = next_nonnote_insn (insn);
7386 if (next != NULL && GET_CODE (next) == CODE_LABEL)
7387 base_cost -= 20;
7389 switch (GET_CODE (insn))
7391 case CODE_LABEL:
7392 /* It will always be better to place the table before the label, rather
7393 than after it. */
7394 return 50;
7396 case INSN:
7397 case CALL_INSN:
7398 return base_cost;
7400 case JUMP_INSN:
7401 return base_cost - 10;
7403 default:
7404 return base_cost + 10;
7408 /* Find the best place in the insn stream in the range
7409 (FIX->address,MAX_ADDRESS) to forcibly insert a minipool barrier.
7410 Create the barrier by inserting a jump and add a new fix entry for
7411 it. */
7412 static Mfix *
7413 create_fix_barrier (Mfix *fix, HOST_WIDE_INT max_address)
7415 HOST_WIDE_INT count = 0;
7416 rtx barrier;
7417 rtx from = fix->insn;
7418 rtx selected = from;
7419 int selected_cost;
7420 HOST_WIDE_INT selected_address;
7421 Mfix * new_fix;
7422 HOST_WIDE_INT max_count = max_address - fix->address;
7423 rtx label = gen_label_rtx ();
7425 selected_cost = arm_barrier_cost (from);
7426 selected_address = fix->address;
7428 while (from && count < max_count)
7430 rtx tmp;
7431 int new_cost;
7433 /* This code shouldn't have been called if there was a natural barrier
7434 within range. */
7435 gcc_assert (GET_CODE (from) != BARRIER);
7437 /* Count the length of this insn. */
7438 count += get_attr_length (from);
7440 /* If there is a jump table, add its length. */
7441 tmp = is_jump_table (from);
7442 if (tmp != NULL)
7444 count += get_jump_table_size (tmp);
7446 /* Jump tables aren't in a basic block, so base the cost on
7447 the dispatch insn. If we select this location, we will
7448 still put the pool after the table. */
7449 new_cost = arm_barrier_cost (from);
7451 if (count < max_count && new_cost <= selected_cost)
7453 selected = tmp;
7454 selected_cost = new_cost;
7455 selected_address = fix->address + count;
7458 /* Continue after the dispatch table. */
7459 from = NEXT_INSN (tmp);
7460 continue;
7463 new_cost = arm_barrier_cost (from);
7465 if (count < max_count && new_cost <= selected_cost)
7467 selected = from;
7468 selected_cost = new_cost;
7469 selected_address = fix->address + count;
7472 from = NEXT_INSN (from);
7475 /* Create a new JUMP_INSN that branches around a barrier. */
7476 from = emit_jump_insn_after (gen_jump (label), selected);
7477 JUMP_LABEL (from) = label;
7478 barrier = emit_barrier_after (from);
7479 emit_label_after (label, barrier);
7481 /* Create a minipool barrier entry for the new barrier. */
7482 new_fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* new_fix));
7483 new_fix->insn = barrier;
7484 new_fix->address = selected_address;
7485 new_fix->next = fix->next;
7486 fix->next = new_fix;
7488 return new_fix;
7491 /* Record that there is a natural barrier in the insn stream at
7492 ADDRESS. */
7493 static void
7494 push_minipool_barrier (rtx insn, HOST_WIDE_INT address)
7496 Mfix * fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* fix));
7498 fix->insn = insn;
7499 fix->address = address;
7501 fix->next = NULL;
7502 if (minipool_fix_head != NULL)
7503 minipool_fix_tail->next = fix;
7504 else
7505 minipool_fix_head = fix;
7507 minipool_fix_tail = fix;
7510 /* Record INSN, which will need fixing up to load a value from the
7511 minipool. ADDRESS is the offset of the insn since the start of the
7512 function; LOC is a pointer to the part of the insn which requires
7513 fixing; VALUE is the constant that must be loaded, which is of type
7514 MODE. */
7515 static void
7516 push_minipool_fix (rtx insn, HOST_WIDE_INT address, rtx *loc,
7517 enum machine_mode mode, rtx value)
7519 Mfix * fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* fix));
7521 #ifdef AOF_ASSEMBLER
7522 /* PIC symbol references need to be converted into offsets into the
7523 based area. */
7524 /* XXX This shouldn't be done here. */
7525 if (flag_pic && GET_CODE (value) == SYMBOL_REF)
7526 value = aof_pic_entry (value);
7527 #endif /* AOF_ASSEMBLER */
7529 fix->insn = insn;
7530 fix->address = address;
7531 fix->loc = loc;
7532 fix->mode = mode;
7533 fix->fix_size = MINIPOOL_FIX_SIZE (mode);
7534 fix->value = value;
7535 fix->forwards = get_attr_pool_range (insn);
7536 fix->backwards = get_attr_neg_pool_range (insn);
7537 fix->minipool = NULL;
7539 /* If an insn doesn't have a range defined for it, then it isn't
7540 expecting to be reworked by this code. Better to stop now than
7541 to generate duff assembly code. */
7542 gcc_assert (fix->forwards || fix->backwards);
7544 /* With AAPCS/iWMMXt enabled, the pool is aligned to an 8-byte boundary.
7545 So there might be an empty word before the start of the pool.
7546 Hence we reduce the forward range by 4 to allow for this
7547 possibility. */
7548 if (ARM_DOUBLEWORD_ALIGN && fix->fix_size == 8)
7549 fix->forwards -= 4;
7551 if (dump_file)
7553 fprintf (dump_file,
7554 ";; %smode fixup for i%d; addr %lu, range (%ld,%ld): ",
7555 GET_MODE_NAME (mode),
7556 INSN_UID (insn), (unsigned long) address,
7557 -1 * (long)fix->backwards, (long)fix->forwards);
7558 arm_print_value (dump_file, fix->value);
7559 fprintf (dump_file, "\n");
7562 /* Add it to the chain of fixes. */
7563 fix->next = NULL;
7565 if (minipool_fix_head != NULL)
7566 minipool_fix_tail->next = fix;
7567 else
7568 minipool_fix_head = fix;
7570 minipool_fix_tail = fix;
7573 /* Return the cost of synthesizing a 64-bit constant VAL inline.
7574 Returns the number of insns needed, or 99 if we don't know how to
7575 do it. */
7577 arm_const_double_inline_cost (rtx val)
7579 rtx lowpart, highpart;
7580 enum machine_mode mode;
7582 mode = GET_MODE (val);
7584 if (mode == VOIDmode)
7585 mode = DImode;
7587 gcc_assert (GET_MODE_SIZE (mode) == 8);
7589 lowpart = gen_lowpart (SImode, val);
7590 highpart = gen_highpart_mode (SImode, mode, val);
7592 gcc_assert (GET_CODE (lowpart) == CONST_INT);
7593 gcc_assert (GET_CODE (highpart) == CONST_INT);
7595 return (arm_gen_constant (SET, SImode, NULL_RTX, INTVAL (lowpart),
7596 NULL_RTX, NULL_RTX, 0, 0)
7597 + arm_gen_constant (SET, SImode, NULL_RTX, INTVAL (highpart),
7598 NULL_RTX, NULL_RTX, 0, 0));
7601 /* Return true if it is worthwhile to split a 64-bit constant into two
7602 32-bit operations. This is the case if optimizing for size, or
7603 if we have load delay slots, or if one 32-bit part can be done with
7604 a single data operation. */
7605 bool
7606 arm_const_double_by_parts (rtx val)
7608 enum machine_mode mode = GET_MODE (val);
7609 rtx part;
7611 if (optimize_size || arm_ld_sched)
7612 return true;
7614 if (mode == VOIDmode)
7615 mode = DImode;
7617 part = gen_highpart_mode (SImode, mode, val);
7619 gcc_assert (GET_CODE (part) == CONST_INT);
7621 if (const_ok_for_arm (INTVAL (part))
7622 || const_ok_for_arm (~INTVAL (part)))
7623 return true;
7625 part = gen_lowpart (SImode, val);
7627 gcc_assert (GET_CODE (part) == CONST_INT);
7629 if (const_ok_for_arm (INTVAL (part))
7630 || const_ok_for_arm (~INTVAL (part)))
7631 return true;
7633 return false;
7636 /* Scan INSN and note any of its operands that need fixing.
7637 If DO_PUSHES is false we do not actually push any of the fixups
7638 needed. The function returns TRUE if any fixups were needed/pushed.
7639 This is used by arm_memory_load_p() which needs to know about loads
7640 of constants that will be converted into minipool loads. */
7641 static bool
7642 note_invalid_constants (rtx insn, HOST_WIDE_INT address, int do_pushes)
7644 bool result = false;
7645 int opno;
7647 extract_insn (insn);
7649 if (!constrain_operands (1))
7650 fatal_insn_not_found (insn);
7652 if (recog_data.n_alternatives == 0)
7653 return false;
7655 /* Fill in recog_op_alt with information about the constraints of
7656 this insn. */
7657 preprocess_constraints ();
7659 for (opno = 0; opno < recog_data.n_operands; opno++)
7661 /* Things we need to fix can only occur in inputs. */
7662 if (recog_data.operand_type[opno] != OP_IN)
7663 continue;
7665 /* If this alternative is a memory reference, then any mention
7666 of constants in this alternative is really to fool reload
7667 into allowing us to accept one there. We need to fix them up
7668 now so that we output the right code. */
7669 if (recog_op_alt[opno][which_alternative].memory_ok)
7671 rtx op = recog_data.operand[opno];
7673 if (CONSTANT_P (op))
7675 if (do_pushes)
7676 push_minipool_fix (insn, address, recog_data.operand_loc[opno],
7677 recog_data.operand_mode[opno], op);
7678 result = true;
7680 else if (GET_CODE (op) == MEM
7681 && GET_CODE (XEXP (op, 0)) == SYMBOL_REF
7682 && CONSTANT_POOL_ADDRESS_P (XEXP (op, 0)))
7684 if (do_pushes)
7686 rtx cop = avoid_constant_pool_reference (op);
7688 /* Casting the address of something to a mode narrower
7689 than a word can cause avoid_constant_pool_reference()
7690 to return the pool reference itself. That's no good to
7691 us here. Lets just hope that we can use the
7692 constant pool value directly. */
7693 if (op == cop)
7694 cop = get_pool_constant (XEXP (op, 0));
7696 push_minipool_fix (insn, address,
7697 recog_data.operand_loc[opno],
7698 recog_data.operand_mode[opno], cop);
7701 result = true;
7706 return result;
7709 /* Gcc puts the pool in the wrong place for ARM, since we can only
7710 load addresses a limited distance around the pc. We do some
7711 special munging to move the constant pool values to the correct
7712 point in the code. */
7713 static void
7714 arm_reorg (void)
7716 rtx insn;
7717 HOST_WIDE_INT address = 0;
7718 Mfix * fix;
7720 minipool_fix_head = minipool_fix_tail = NULL;
7722 /* The first insn must always be a note, or the code below won't
7723 scan it properly. */
7724 insn = get_insns ();
7725 gcc_assert (GET_CODE (insn) == NOTE);
7727 /* Scan all the insns and record the operands that will need fixing. */
7728 for (insn = next_nonnote_insn (insn); insn; insn = next_nonnote_insn (insn))
7730 if (TARGET_CIRRUS_FIX_INVALID_INSNS
7731 && (arm_cirrus_insn_p (insn)
7732 || GET_CODE (insn) == JUMP_INSN
7733 || arm_memory_load_p (insn)))
7734 cirrus_reorg (insn);
7736 if (GET_CODE (insn) == BARRIER)
7737 push_minipool_barrier (insn, address);
7738 else if (INSN_P (insn))
7740 rtx table;
7742 note_invalid_constants (insn, address, true);
7743 address += get_attr_length (insn);
7745 /* If the insn is a vector jump, add the size of the table
7746 and skip the table. */
7747 if ((table = is_jump_table (insn)) != NULL)
7749 address += get_jump_table_size (table);
7750 insn = table;
7755 fix = minipool_fix_head;
7757 /* Now scan the fixups and perform the required changes. */
7758 while (fix)
7760 Mfix * ftmp;
7761 Mfix * fdel;
7762 Mfix * last_added_fix;
7763 Mfix * last_barrier = NULL;
7764 Mfix * this_fix;
7766 /* Skip any further barriers before the next fix. */
7767 while (fix && GET_CODE (fix->insn) == BARRIER)
7768 fix = fix->next;
7770 /* No more fixes. */
7771 if (fix == NULL)
7772 break;
7774 last_added_fix = NULL;
7776 for (ftmp = fix; ftmp; ftmp = ftmp->next)
7778 if (GET_CODE (ftmp->insn) == BARRIER)
7780 if (ftmp->address >= minipool_vector_head->max_address)
7781 break;
7783 last_barrier = ftmp;
7785 else if ((ftmp->minipool = add_minipool_forward_ref (ftmp)) == NULL)
7786 break;
7788 last_added_fix = ftmp; /* Keep track of the last fix added. */
7791 /* If we found a barrier, drop back to that; any fixes that we
7792 could have reached but come after the barrier will now go in
7793 the next mini-pool. */
7794 if (last_barrier != NULL)
7796 /* Reduce the refcount for those fixes that won't go into this
7797 pool after all. */
7798 for (fdel = last_barrier->next;
7799 fdel && fdel != ftmp;
7800 fdel = fdel->next)
7802 fdel->minipool->refcount--;
7803 fdel->minipool = NULL;
7806 ftmp = last_barrier;
7808 else
7810 /* ftmp is first fix that we can't fit into this pool and
7811 there no natural barriers that we could use. Insert a
7812 new barrier in the code somewhere between the previous
7813 fix and this one, and arrange to jump around it. */
7814 HOST_WIDE_INT max_address;
7816 /* The last item on the list of fixes must be a barrier, so
7817 we can never run off the end of the list of fixes without
7818 last_barrier being set. */
7819 gcc_assert (ftmp);
7821 max_address = minipool_vector_head->max_address;
7822 /* Check that there isn't another fix that is in range that
7823 we couldn't fit into this pool because the pool was
7824 already too large: we need to put the pool before such an
7825 instruction. */
7826 if (ftmp->address < max_address)
7827 max_address = ftmp->address;
7829 last_barrier = create_fix_barrier (last_added_fix, max_address);
7832 assign_minipool_offsets (last_barrier);
7834 while (ftmp)
7836 if (GET_CODE (ftmp->insn) != BARRIER
7837 && ((ftmp->minipool = add_minipool_backward_ref (ftmp))
7838 == NULL))
7839 break;
7841 ftmp = ftmp->next;
7844 /* Scan over the fixes we have identified for this pool, fixing them
7845 up and adding the constants to the pool itself. */
7846 for (this_fix = fix; this_fix && ftmp != this_fix;
7847 this_fix = this_fix->next)
7848 if (GET_CODE (this_fix->insn) != BARRIER)
7850 rtx addr
7851 = plus_constant (gen_rtx_LABEL_REF (VOIDmode,
7852 minipool_vector_label),
7853 this_fix->minipool->offset);
7854 *this_fix->loc = gen_rtx_MEM (this_fix->mode, addr);
7857 dump_minipool (last_barrier->insn);
7858 fix = ftmp;
7861 /* From now on we must synthesize any constants that we can't handle
7862 directly. This can happen if the RTL gets split during final
7863 instruction generation. */
7864 after_arm_reorg = 1;
7866 /* Free the minipool memory. */
7867 obstack_free (&minipool_obstack, minipool_startobj);
7870 /* Routines to output assembly language. */
7872 /* If the rtx is the correct value then return the string of the number.
7873 In this way we can ensure that valid double constants are generated even
7874 when cross compiling. */
7875 const char *
7876 fp_immediate_constant (rtx x)
7878 REAL_VALUE_TYPE r;
7879 int i;
7881 if (!fp_consts_inited)
7882 init_fp_table ();
7884 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
7885 for (i = 0; i < 8; i++)
7886 if (REAL_VALUES_EQUAL (r, values_fp[i]))
7887 return strings_fp[i];
7889 gcc_unreachable ();
7892 /* As for fp_immediate_constant, but value is passed directly, not in rtx. */
7893 static const char *
7894 fp_const_from_val (REAL_VALUE_TYPE *r)
7896 int i;
7898 if (!fp_consts_inited)
7899 init_fp_table ();
7901 for (i = 0; i < 8; i++)
7902 if (REAL_VALUES_EQUAL (*r, values_fp[i]))
7903 return strings_fp[i];
7905 gcc_unreachable ();
7908 /* Output the operands of a LDM/STM instruction to STREAM.
7909 MASK is the ARM register set mask of which only bits 0-15 are important.
7910 REG is the base register, either the frame pointer or the stack pointer,
7911 INSTR is the possibly suffixed load or store instruction. */
7913 static void
7914 print_multi_reg (FILE *stream, const char *instr, unsigned reg,
7915 unsigned long mask)
7917 unsigned i;
7918 bool not_first = FALSE;
7920 fputc ('\t', stream);
7921 asm_fprintf (stream, instr, reg);
7922 fputs (", {", stream);
7924 for (i = 0; i <= LAST_ARM_REGNUM; i++)
7925 if (mask & (1 << i))
7927 if (not_first)
7928 fprintf (stream, ", ");
7930 asm_fprintf (stream, "%r", i);
7931 not_first = TRUE;
7934 fprintf (stream, "}\n");
7938 /* Output a FLDMX instruction to STREAM.
7939 BASE if the register containing the address.
7940 REG and COUNT specify the register range.
7941 Extra registers may be added to avoid hardware bugs. */
7943 static void
7944 arm_output_fldmx (FILE * stream, unsigned int base, int reg, int count)
7946 int i;
7948 /* Workaround ARM10 VFPr1 bug. */
7949 if (count == 2 && !arm_arch6)
7951 if (reg == 15)
7952 reg--;
7953 count++;
7956 fputc ('\t', stream);
7957 asm_fprintf (stream, "fldmfdx\t%r!, {", base);
7959 for (i = reg; i < reg + count; i++)
7961 if (i > reg)
7962 fputs (", ", stream);
7963 asm_fprintf (stream, "d%d", i);
7965 fputs ("}\n", stream);
7970 /* Output the assembly for a store multiple. */
7972 const char *
7973 vfp_output_fstmx (rtx * operands)
7975 char pattern[100];
7976 int p;
7977 int base;
7978 int i;
7980 strcpy (pattern, "fstmfdx\t%m0!, {%P1");
7981 p = strlen (pattern);
7983 gcc_assert (GET_CODE (operands[1]) == REG);
7985 base = (REGNO (operands[1]) - FIRST_VFP_REGNUM) / 2;
7986 for (i = 1; i < XVECLEN (operands[2], 0); i++)
7988 p += sprintf (&pattern[p], ", d%d", base + i);
7990 strcpy (&pattern[p], "}");
7992 output_asm_insn (pattern, operands);
7993 return "";
7997 /* Emit RTL to save block of VFP register pairs to the stack. Returns the
7998 number of bytes pushed. */
8000 static int
8001 vfp_emit_fstmx (int base_reg, int count)
8003 rtx par;
8004 rtx dwarf;
8005 rtx tmp, reg;
8006 int i;
8008 /* Workaround ARM10 VFPr1 bug. Data corruption can occur when exactly two
8009 register pairs are stored by a store multiple insn. We avoid this
8010 by pushing an extra pair. */
8011 if (count == 2 && !arm_arch6)
8013 if (base_reg == LAST_VFP_REGNUM - 3)
8014 base_reg -= 2;
8015 count++;
8018 /* ??? The frame layout is implementation defined. We describe
8019 standard format 1 (equivalent to a FSTMD insn and unused pad word).
8020 We really need some way of representing the whole block so that the
8021 unwinder can figure it out at runtime. */
8022 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
8023 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (count + 1));
8025 reg = gen_rtx_REG (DFmode, base_reg);
8026 base_reg += 2;
8028 XVECEXP (par, 0, 0)
8029 = gen_rtx_SET (VOIDmode,
8030 gen_rtx_MEM (BLKmode,
8031 gen_rtx_PRE_DEC (BLKmode, stack_pointer_rtx)),
8032 gen_rtx_UNSPEC (BLKmode,
8033 gen_rtvec (1, reg),
8034 UNSPEC_PUSH_MULT));
8036 tmp = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8037 gen_rtx_PLUS (SImode, stack_pointer_rtx,
8038 GEN_INT (-(count * 8 + 4))));
8039 RTX_FRAME_RELATED_P (tmp) = 1;
8040 XVECEXP (dwarf, 0, 0) = tmp;
8042 tmp = gen_rtx_SET (VOIDmode,
8043 gen_rtx_MEM (DFmode, stack_pointer_rtx),
8044 reg);
8045 RTX_FRAME_RELATED_P (tmp) = 1;
8046 XVECEXP (dwarf, 0, 1) = tmp;
8048 for (i = 1; i < count; i++)
8050 reg = gen_rtx_REG (DFmode, base_reg);
8051 base_reg += 2;
8052 XVECEXP (par, 0, i) = gen_rtx_USE (VOIDmode, reg);
8054 tmp = gen_rtx_SET (VOIDmode,
8055 gen_rtx_MEM (DFmode,
8056 gen_rtx_PLUS (SImode,
8057 stack_pointer_rtx,
8058 GEN_INT (i * 8))),
8059 reg);
8060 RTX_FRAME_RELATED_P (tmp) = 1;
8061 XVECEXP (dwarf, 0, i + 1) = tmp;
8064 par = emit_insn (par);
8065 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
8066 REG_NOTES (par));
8067 RTX_FRAME_RELATED_P (par) = 1;
8069 return count * 8 + 4;
8073 /* Output a 'call' insn. */
8074 const char *
8075 output_call (rtx *operands)
8077 gcc_assert (!arm_arch5); /* Patterns should call blx <reg> directly. */
8079 /* Handle calls to lr using ip (which may be clobbered in subr anyway). */
8080 if (REGNO (operands[0]) == LR_REGNUM)
8082 operands[0] = gen_rtx_REG (SImode, IP_REGNUM);
8083 output_asm_insn ("mov%?\t%0, %|lr", operands);
8086 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
8088 if (TARGET_INTERWORK || arm_arch4t)
8089 output_asm_insn ("bx%?\t%0", operands);
8090 else
8091 output_asm_insn ("mov%?\t%|pc, %0", operands);
8093 return "";
8096 /* Output a 'call' insn that is a reference in memory. */
8097 const char *
8098 output_call_mem (rtx *operands)
8100 if (TARGET_INTERWORK && !arm_arch5)
8102 output_asm_insn ("ldr%?\t%|ip, %0", operands);
8103 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
8104 output_asm_insn ("bx%?\t%|ip", operands);
8106 else if (regno_use_in (LR_REGNUM, operands[0]))
8108 /* LR is used in the memory address. We load the address in the
8109 first instruction. It's safe to use IP as the target of the
8110 load since the call will kill it anyway. */
8111 output_asm_insn ("ldr%?\t%|ip, %0", operands);
8112 if (arm_arch5)
8113 output_asm_insn ("blx%?\t%|ip", operands);
8114 else
8116 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
8117 if (arm_arch4t)
8118 output_asm_insn ("bx%?\t%|ip", operands);
8119 else
8120 output_asm_insn ("mov%?\t%|pc, %|ip", operands);
8123 else
8125 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
8126 output_asm_insn ("ldr%?\t%|pc, %0", operands);
8129 return "";
8133 /* Output a move from arm registers to an fpa registers.
8134 OPERANDS[0] is an fpa register.
8135 OPERANDS[1] is the first registers of an arm register pair. */
8136 const char *
8137 output_mov_long_double_fpa_from_arm (rtx *operands)
8139 int arm_reg0 = REGNO (operands[1]);
8140 rtx ops[3];
8142 gcc_assert (arm_reg0 != IP_REGNUM);
8144 ops[0] = gen_rtx_REG (SImode, arm_reg0);
8145 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
8146 ops[2] = gen_rtx_REG (SImode, 2 + arm_reg0);
8148 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1, %2}", ops);
8149 output_asm_insn ("ldf%?e\t%0, [%|sp], #12", operands);
8151 return "";
8154 /* Output a move from an fpa register to arm registers.
8155 OPERANDS[0] is the first registers of an arm register pair.
8156 OPERANDS[1] is an fpa register. */
8157 const char *
8158 output_mov_long_double_arm_from_fpa (rtx *operands)
8160 int arm_reg0 = REGNO (operands[0]);
8161 rtx ops[3];
8163 gcc_assert (arm_reg0 != IP_REGNUM);
8165 ops[0] = gen_rtx_REG (SImode, arm_reg0);
8166 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
8167 ops[2] = gen_rtx_REG (SImode, 2 + arm_reg0);
8169 output_asm_insn ("stf%?e\t%1, [%|sp, #-12]!", operands);
8170 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1, %2}", ops);
8171 return "";
8174 /* Output a move from arm registers to arm registers of a long double
8175 OPERANDS[0] is the destination.
8176 OPERANDS[1] is the source. */
8177 const char *
8178 output_mov_long_double_arm_from_arm (rtx *operands)
8180 /* We have to be careful here because the two might overlap. */
8181 int dest_start = REGNO (operands[0]);
8182 int src_start = REGNO (operands[1]);
8183 rtx ops[2];
8184 int i;
8186 if (dest_start < src_start)
8188 for (i = 0; i < 3; i++)
8190 ops[0] = gen_rtx_REG (SImode, dest_start + i);
8191 ops[1] = gen_rtx_REG (SImode, src_start + i);
8192 output_asm_insn ("mov%?\t%0, %1", ops);
8195 else
8197 for (i = 2; i >= 0; i--)
8199 ops[0] = gen_rtx_REG (SImode, dest_start + i);
8200 ops[1] = gen_rtx_REG (SImode, src_start + i);
8201 output_asm_insn ("mov%?\t%0, %1", ops);
8205 return "";
8209 /* Output a move from arm registers to an fpa registers.
8210 OPERANDS[0] is an fpa register.
8211 OPERANDS[1] is the first registers of an arm register pair. */
8212 const char *
8213 output_mov_double_fpa_from_arm (rtx *operands)
8215 int arm_reg0 = REGNO (operands[1]);
8216 rtx ops[2];
8218 gcc_assert (arm_reg0 != IP_REGNUM);
8220 ops[0] = gen_rtx_REG (SImode, arm_reg0);
8221 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
8222 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1}", ops);
8223 output_asm_insn ("ldf%?d\t%0, [%|sp], #8", operands);
8224 return "";
8227 /* Output a move from an fpa register to arm registers.
8228 OPERANDS[0] is the first registers of an arm register pair.
8229 OPERANDS[1] is an fpa register. */
8230 const char *
8231 output_mov_double_arm_from_fpa (rtx *operands)
8233 int arm_reg0 = REGNO (operands[0]);
8234 rtx ops[2];
8236 gcc_assert (arm_reg0 != IP_REGNUM);
8238 ops[0] = gen_rtx_REG (SImode, arm_reg0);
8239 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
8240 output_asm_insn ("stf%?d\t%1, [%|sp, #-8]!", operands);
8241 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1}", ops);
8242 return "";
8245 /* Output a move between double words.
8246 It must be REG<-REG, REG<-CONST_DOUBLE, REG<-CONST_INT, REG<-MEM
8247 or MEM<-REG and all MEMs must be offsettable addresses. */
8248 const char *
8249 output_move_double (rtx *operands)
8251 enum rtx_code code0 = GET_CODE (operands[0]);
8252 enum rtx_code code1 = GET_CODE (operands[1]);
8253 rtx otherops[3];
8255 if (code0 == REG)
8257 int reg0 = REGNO (operands[0]);
8259 otherops[0] = gen_rtx_REG (SImode, 1 + reg0);
8261 gcc_assert (code1 == MEM); /* Constraints should ensure this. */
8263 switch (GET_CODE (XEXP (operands[1], 0)))
8265 case REG:
8266 output_asm_insn ("ldm%?ia\t%m1, %M0", operands);
8267 break;
8269 case PRE_INC:
8270 gcc_assert (TARGET_LDRD);
8271 output_asm_insn ("ldr%?d\t%0, [%m1, #8]!", operands);
8272 break;
8274 case PRE_DEC:
8275 output_asm_insn ("ldm%?db\t%m1!, %M0", operands);
8276 break;
8278 case POST_INC:
8279 output_asm_insn ("ldm%?ia\t%m1!, %M0", operands);
8280 break;
8282 case POST_DEC:
8283 gcc_assert (TARGET_LDRD);
8284 output_asm_insn ("ldr%?d\t%0, [%m1], #-8", operands);
8285 break;
8287 case PRE_MODIFY:
8288 case POST_MODIFY:
8289 otherops[0] = operands[0];
8290 otherops[1] = XEXP (XEXP (XEXP (operands[1], 0), 1), 0);
8291 otherops[2] = XEXP (XEXP (XEXP (operands[1], 0), 1), 1);
8293 if (GET_CODE (XEXP (operands[1], 0)) == PRE_MODIFY)
8295 if (reg_overlap_mentioned_p (otherops[0], otherops[2]))
8297 /* Registers overlap so split out the increment. */
8298 output_asm_insn ("add%?\t%1, %1, %2", otherops);
8299 output_asm_insn ("ldr%?d\t%0, [%1] @split", otherops);
8301 else
8302 output_asm_insn ("ldr%?d\t%0, [%1, %2]!", otherops);
8304 else
8306 /* We only allow constant increments, so this is safe. */
8307 output_asm_insn ("ldr%?d\t%0, [%1], %2", otherops);
8309 break;
8311 case LABEL_REF:
8312 case CONST:
8313 output_asm_insn ("adr%?\t%0, %1", operands);
8314 output_asm_insn ("ldm%?ia\t%0, %M0", operands);
8315 break;
8317 default:
8318 if (arm_add_operand (XEXP (XEXP (operands[1], 0), 1),
8319 GET_MODE (XEXP (XEXP (operands[1], 0), 1))))
8321 otherops[0] = operands[0];
8322 otherops[1] = XEXP (XEXP (operands[1], 0), 0);
8323 otherops[2] = XEXP (XEXP (operands[1], 0), 1);
8325 if (GET_CODE (XEXP (operands[1], 0)) == PLUS)
8327 if (GET_CODE (otherops[2]) == CONST_INT)
8329 switch ((int) INTVAL (otherops[2]))
8331 case -8:
8332 output_asm_insn ("ldm%?db\t%1, %M0", otherops);
8333 return "";
8334 case -4:
8335 output_asm_insn ("ldm%?da\t%1, %M0", otherops);
8336 return "";
8337 case 4:
8338 output_asm_insn ("ldm%?ib\t%1, %M0", otherops);
8339 return "";
8342 if (TARGET_LDRD
8343 && (GET_CODE (otherops[2]) == REG
8344 || (GET_CODE (otherops[2]) == CONST_INT
8345 && INTVAL (otherops[2]) > -256
8346 && INTVAL (otherops[2]) < 256)))
8348 if (reg_overlap_mentioned_p (otherops[0],
8349 otherops[2]))
8351 /* Swap base and index registers over to
8352 avoid a conflict. */
8353 otherops[1] = XEXP (XEXP (operands[1], 0), 1);
8354 otherops[2] = XEXP (XEXP (operands[1], 0), 0);
8357 /* If both registers conflict, it will usually
8358 have been fixed by a splitter. */
8359 if (reg_overlap_mentioned_p (otherops[0], otherops[2]))
8361 output_asm_insn ("add%?\t%1, %1, %2", otherops);
8362 output_asm_insn ("ldr%?d\t%0, [%1]",
8363 otherops);
8365 else
8366 output_asm_insn ("ldr%?d\t%0, [%1, %2]", otherops);
8367 return "";
8370 if (GET_CODE (otherops[2]) == CONST_INT)
8372 if (!(const_ok_for_arm (INTVAL (otherops[2]))))
8373 output_asm_insn ("sub%?\t%0, %1, #%n2", otherops);
8374 else
8375 output_asm_insn ("add%?\t%0, %1, %2", otherops);
8377 else
8378 output_asm_insn ("add%?\t%0, %1, %2", otherops);
8380 else
8381 output_asm_insn ("sub%?\t%0, %1, %2", otherops);
8383 return "ldm%?ia\t%0, %M0";
8385 else
8387 otherops[1] = adjust_address (operands[1], SImode, 4);
8388 /* Take care of overlapping base/data reg. */
8389 if (reg_mentioned_p (operands[0], operands[1]))
8391 output_asm_insn ("ldr%?\t%0, %1", otherops);
8392 output_asm_insn ("ldr%?\t%0, %1", operands);
8394 else
8396 output_asm_insn ("ldr%?\t%0, %1", operands);
8397 output_asm_insn ("ldr%?\t%0, %1", otherops);
8402 else
8404 /* Constraints should ensure this. */
8405 gcc_assert (code0 == MEM && code1 == REG);
8406 gcc_assert (REGNO (operands[1]) != IP_REGNUM);
8408 switch (GET_CODE (XEXP (operands[0], 0)))
8410 case REG:
8411 output_asm_insn ("stm%?ia\t%m0, %M1", operands);
8412 break;
8414 case PRE_INC:
8415 gcc_assert (TARGET_LDRD);
8416 output_asm_insn ("str%?d\t%1, [%m0, #8]!", operands);
8417 break;
8419 case PRE_DEC:
8420 output_asm_insn ("stm%?db\t%m0!, %M1", operands);
8421 break;
8423 case POST_INC:
8424 output_asm_insn ("stm%?ia\t%m0!, %M1", operands);
8425 break;
8427 case POST_DEC:
8428 gcc_assert (TARGET_LDRD);
8429 output_asm_insn ("str%?d\t%1, [%m0], #-8", operands);
8430 break;
8432 case PRE_MODIFY:
8433 case POST_MODIFY:
8434 otherops[0] = operands[1];
8435 otherops[1] = XEXP (XEXP (XEXP (operands[0], 0), 1), 0);
8436 otherops[2] = XEXP (XEXP (XEXP (operands[0], 0), 1), 1);
8438 if (GET_CODE (XEXP (operands[0], 0)) == PRE_MODIFY)
8439 output_asm_insn ("str%?d\t%0, [%1, %2]!", otherops);
8440 else
8441 output_asm_insn ("str%?d\t%0, [%1], %2", otherops);
8442 break;
8444 case PLUS:
8445 otherops[2] = XEXP (XEXP (operands[0], 0), 1);
8446 if (GET_CODE (otherops[2]) == CONST_INT)
8448 switch ((int) INTVAL (XEXP (XEXP (operands[0], 0), 1)))
8450 case -8:
8451 output_asm_insn ("stm%?db\t%m0, %M1", operands);
8452 return "";
8454 case -4:
8455 output_asm_insn ("stm%?da\t%m0, %M1", operands);
8456 return "";
8458 case 4:
8459 output_asm_insn ("stm%?ib\t%m0, %M1", operands);
8460 return "";
8463 if (TARGET_LDRD
8464 && (GET_CODE (otherops[2]) == REG
8465 || (GET_CODE (otherops[2]) == CONST_INT
8466 && INTVAL (otherops[2]) > -256
8467 && INTVAL (otherops[2]) < 256)))
8469 otherops[0] = operands[1];
8470 otherops[1] = XEXP (XEXP (operands[0], 0), 0);
8471 output_asm_insn ("str%?d\t%0, [%1, %2]", otherops);
8472 return "";
8474 /* Fall through */
8476 default:
8477 otherops[0] = adjust_address (operands[0], SImode, 4);
8478 otherops[1] = gen_rtx_REG (SImode, 1 + REGNO (operands[1]));
8479 output_asm_insn ("str%?\t%1, %0", operands);
8480 output_asm_insn ("str%?\t%1, %0", otherops);
8484 return "";
8487 /* Output an ADD r, s, #n where n may be too big for one instruction.
8488 If adding zero to one register, output nothing. */
8489 const char *
8490 output_add_immediate (rtx *operands)
8492 HOST_WIDE_INT n = INTVAL (operands[2]);
8494 if (n != 0 || REGNO (operands[0]) != REGNO (operands[1]))
8496 if (n < 0)
8497 output_multi_immediate (operands,
8498 "sub%?\t%0, %1, %2", "sub%?\t%0, %0, %2", 2,
8499 -n);
8500 else
8501 output_multi_immediate (operands,
8502 "add%?\t%0, %1, %2", "add%?\t%0, %0, %2", 2,
8506 return "";
8509 /* Output a multiple immediate operation.
8510 OPERANDS is the vector of operands referred to in the output patterns.
8511 INSTR1 is the output pattern to use for the first constant.
8512 INSTR2 is the output pattern to use for subsequent constants.
8513 IMMED_OP is the index of the constant slot in OPERANDS.
8514 N is the constant value. */
8515 static const char *
8516 output_multi_immediate (rtx *operands, const char *instr1, const char *instr2,
8517 int immed_op, HOST_WIDE_INT n)
8519 #if HOST_BITS_PER_WIDE_INT > 32
8520 n &= 0xffffffff;
8521 #endif
8523 if (n == 0)
8525 /* Quick and easy output. */
8526 operands[immed_op] = const0_rtx;
8527 output_asm_insn (instr1, operands);
8529 else
8531 int i;
8532 const char * instr = instr1;
8534 /* Note that n is never zero here (which would give no output). */
8535 for (i = 0; i < 32; i += 2)
8537 if (n & (3 << i))
8539 operands[immed_op] = GEN_INT (n & (255 << i));
8540 output_asm_insn (instr, operands);
8541 instr = instr2;
8542 i += 6;
8547 return "";
8550 /* Return the appropriate ARM instruction for the operation code.
8551 The returned result should not be overwritten. OP is the rtx of the
8552 operation. SHIFT_FIRST_ARG is TRUE if the first argument of the operator
8553 was shifted. */
8554 const char *
8555 arithmetic_instr (rtx op, int shift_first_arg)
8557 switch (GET_CODE (op))
8559 case PLUS:
8560 return "add";
8562 case MINUS:
8563 return shift_first_arg ? "rsb" : "sub";
8565 case IOR:
8566 return "orr";
8568 case XOR:
8569 return "eor";
8571 case AND:
8572 return "and";
8574 default:
8575 gcc_unreachable ();
8579 /* Ensure valid constant shifts and return the appropriate shift mnemonic
8580 for the operation code. The returned result should not be overwritten.
8581 OP is the rtx code of the shift.
8582 On exit, *AMOUNTP will be -1 if the shift is by a register, or a constant
8583 shift. */
8584 static const char *
8585 shift_op (rtx op, HOST_WIDE_INT *amountp)
8587 const char * mnem;
8588 enum rtx_code code = GET_CODE (op);
8590 switch (GET_CODE (XEXP (op, 1)))
8592 case REG:
8593 case SUBREG:
8594 *amountp = -1;
8595 break;
8597 case CONST_INT:
8598 *amountp = INTVAL (XEXP (op, 1));
8599 break;
8601 default:
8602 gcc_unreachable ();
8605 switch (code)
8607 case ASHIFT:
8608 mnem = "asl";
8609 break;
8611 case ASHIFTRT:
8612 mnem = "asr";
8613 break;
8615 case LSHIFTRT:
8616 mnem = "lsr";
8617 break;
8619 case ROTATE:
8620 gcc_assert (*amountp != -1);
8621 *amountp = 32 - *amountp;
8623 /* Fall through. */
8625 case ROTATERT:
8626 mnem = "ror";
8627 break;
8629 case MULT:
8630 /* We never have to worry about the amount being other than a
8631 power of 2, since this case can never be reloaded from a reg. */
8632 gcc_assert (*amountp != -1);
8633 *amountp = int_log2 (*amountp);
8634 return "asl";
8636 default:
8637 gcc_unreachable ();
8640 if (*amountp != -1)
8642 /* This is not 100% correct, but follows from the desire to merge
8643 multiplication by a power of 2 with the recognizer for a
8644 shift. >=32 is not a valid shift for "asl", so we must try and
8645 output a shift that produces the correct arithmetical result.
8646 Using lsr #32 is identical except for the fact that the carry bit
8647 is not set correctly if we set the flags; but we never use the
8648 carry bit from such an operation, so we can ignore that. */
8649 if (code == ROTATERT)
8650 /* Rotate is just modulo 32. */
8651 *amountp &= 31;
8652 else if (*amountp != (*amountp & 31))
8654 if (code == ASHIFT)
8655 mnem = "lsr";
8656 *amountp = 32;
8659 /* Shifts of 0 are no-ops. */
8660 if (*amountp == 0)
8661 return NULL;
8664 return mnem;
8667 /* Obtain the shift from the POWER of two. */
8669 static HOST_WIDE_INT
8670 int_log2 (HOST_WIDE_INT power)
8672 HOST_WIDE_INT shift = 0;
8674 while ((((HOST_WIDE_INT) 1 << shift) & power) == 0)
8676 gcc_assert (shift <= 31);
8677 shift++;
8680 return shift;
8683 /* Output a .ascii pseudo-op, keeping track of lengths. This is
8684 because /bin/as is horribly restrictive. The judgement about
8685 whether or not each character is 'printable' (and can be output as
8686 is) or not (and must be printed with an octal escape) must be made
8687 with reference to the *host* character set -- the situation is
8688 similar to that discussed in the comments above pp_c_char in
8689 c-pretty-print.c. */
8691 #define MAX_ASCII_LEN 51
8693 void
8694 output_ascii_pseudo_op (FILE *stream, const unsigned char *p, int len)
8696 int i;
8697 int len_so_far = 0;
8699 fputs ("\t.ascii\t\"", stream);
8701 for (i = 0; i < len; i++)
8703 int c = p[i];
8705 if (len_so_far >= MAX_ASCII_LEN)
8707 fputs ("\"\n\t.ascii\t\"", stream);
8708 len_so_far = 0;
8711 if (ISPRINT (c))
8713 if (c == '\\' || c == '\"')
8715 putc ('\\', stream);
8716 len_so_far++;
8718 putc (c, stream);
8719 len_so_far++;
8721 else
8723 fprintf (stream, "\\%03o", c);
8724 len_so_far += 4;
8728 fputs ("\"\n", stream);
8731 /* Compute the register save mask for registers 0 through 12
8732 inclusive. This code is used by arm_compute_save_reg_mask. */
8734 static unsigned long
8735 arm_compute_save_reg0_reg12_mask (void)
8737 unsigned long func_type = arm_current_func_type ();
8738 unsigned long save_reg_mask = 0;
8739 unsigned int reg;
8741 if (IS_INTERRUPT (func_type))
8743 unsigned int max_reg;
8744 /* Interrupt functions must not corrupt any registers,
8745 even call clobbered ones. If this is a leaf function
8746 we can just examine the registers used by the RTL, but
8747 otherwise we have to assume that whatever function is
8748 called might clobber anything, and so we have to save
8749 all the call-clobbered registers as well. */
8750 if (ARM_FUNC_TYPE (func_type) == ARM_FT_FIQ)
8751 /* FIQ handlers have registers r8 - r12 banked, so
8752 we only need to check r0 - r7, Normal ISRs only
8753 bank r14 and r15, so we must check up to r12.
8754 r13 is the stack pointer which is always preserved,
8755 so we do not need to consider it here. */
8756 max_reg = 7;
8757 else
8758 max_reg = 12;
8760 for (reg = 0; reg <= max_reg; reg++)
8761 if (regs_ever_live[reg]
8762 || (! current_function_is_leaf && call_used_regs [reg]))
8763 save_reg_mask |= (1 << reg);
8765 /* Also save the pic base register if necessary. */
8766 if (flag_pic
8767 && !TARGET_SINGLE_PIC_BASE
8768 && current_function_uses_pic_offset_table)
8769 save_reg_mask |= 1 << PIC_OFFSET_TABLE_REGNUM;
8771 else
8773 /* In the normal case we only need to save those registers
8774 which are call saved and which are used by this function. */
8775 for (reg = 0; reg <= 10; reg++)
8776 if (regs_ever_live[reg] && ! call_used_regs [reg])
8777 save_reg_mask |= (1 << reg);
8779 /* Handle the frame pointer as a special case. */
8780 if (! TARGET_APCS_FRAME
8781 && ! frame_pointer_needed
8782 && regs_ever_live[HARD_FRAME_POINTER_REGNUM]
8783 && ! call_used_regs[HARD_FRAME_POINTER_REGNUM])
8784 save_reg_mask |= 1 << HARD_FRAME_POINTER_REGNUM;
8786 /* If we aren't loading the PIC register,
8787 don't stack it even though it may be live. */
8788 if (flag_pic
8789 && !TARGET_SINGLE_PIC_BASE
8790 && (regs_ever_live[PIC_OFFSET_TABLE_REGNUM]
8791 || current_function_uses_pic_offset_table))
8792 save_reg_mask |= 1 << PIC_OFFSET_TABLE_REGNUM;
8795 /* Save registers so the exception handler can modify them. */
8796 if (current_function_calls_eh_return)
8798 unsigned int i;
8800 for (i = 0; ; i++)
8802 reg = EH_RETURN_DATA_REGNO (i);
8803 if (reg == INVALID_REGNUM)
8804 break;
8805 save_reg_mask |= 1 << reg;
8809 return save_reg_mask;
8812 /* Compute a bit mask of which registers need to be
8813 saved on the stack for the current function. */
8815 static unsigned long
8816 arm_compute_save_reg_mask (void)
8818 unsigned int save_reg_mask = 0;
8819 unsigned long func_type = arm_current_func_type ();
8821 if (IS_NAKED (func_type))
8822 /* This should never really happen. */
8823 return 0;
8825 /* If we are creating a stack frame, then we must save the frame pointer,
8826 IP (which will hold the old stack pointer), LR and the PC. */
8827 if (frame_pointer_needed)
8828 save_reg_mask |=
8829 (1 << ARM_HARD_FRAME_POINTER_REGNUM)
8830 | (1 << IP_REGNUM)
8831 | (1 << LR_REGNUM)
8832 | (1 << PC_REGNUM);
8834 /* Volatile functions do not return, so there
8835 is no need to save any other registers. */
8836 if (IS_VOLATILE (func_type))
8837 return save_reg_mask;
8839 save_reg_mask |= arm_compute_save_reg0_reg12_mask ();
8841 /* Decide if we need to save the link register.
8842 Interrupt routines have their own banked link register,
8843 so they never need to save it.
8844 Otherwise if we do not use the link register we do not need to save
8845 it. If we are pushing other registers onto the stack however, we
8846 can save an instruction in the epilogue by pushing the link register
8847 now and then popping it back into the PC. This incurs extra memory
8848 accesses though, so we only do it when optimizing for size, and only
8849 if we know that we will not need a fancy return sequence. */
8850 if (regs_ever_live [LR_REGNUM]
8851 || (save_reg_mask
8852 && optimize_size
8853 && ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
8854 && !current_function_calls_eh_return))
8855 save_reg_mask |= 1 << LR_REGNUM;
8857 if (cfun->machine->lr_save_eliminated)
8858 save_reg_mask &= ~ (1 << LR_REGNUM);
8860 if (TARGET_REALLY_IWMMXT
8861 && ((bit_count (save_reg_mask)
8862 + ARM_NUM_INTS (current_function_pretend_args_size)) % 2) != 0)
8864 unsigned int reg;
8866 /* The total number of registers that are going to be pushed
8867 onto the stack is odd. We need to ensure that the stack
8868 is 64-bit aligned before we start to save iWMMXt registers,
8869 and also before we start to create locals. (A local variable
8870 might be a double or long long which we will load/store using
8871 an iWMMXt instruction). Therefore we need to push another
8872 ARM register, so that the stack will be 64-bit aligned. We
8873 try to avoid using the arg registers (r0 -r3) as they might be
8874 used to pass values in a tail call. */
8875 for (reg = 4; reg <= 12; reg++)
8876 if ((save_reg_mask & (1 << reg)) == 0)
8877 break;
8879 if (reg <= 12)
8880 save_reg_mask |= (1 << reg);
8881 else
8883 cfun->machine->sibcall_blocked = 1;
8884 save_reg_mask |= (1 << 3);
8888 return save_reg_mask;
8892 /* Compute a bit mask of which registers need to be
8893 saved on the stack for the current function. */
8894 static unsigned long
8895 thumb_compute_save_reg_mask (void)
8897 unsigned long mask;
8898 unsigned reg;
8900 mask = 0;
8901 for (reg = 0; reg < 12; reg ++)
8902 if (regs_ever_live[reg] && !call_used_regs[reg])
8903 mask |= 1 << reg;
8905 if (flag_pic && !TARGET_SINGLE_PIC_BASE)
8906 mask |= (1 << PIC_OFFSET_TABLE_REGNUM);
8908 if (TARGET_SINGLE_PIC_BASE)
8909 mask &= ~(1 << arm_pic_register);
8911 /* See if we might need r11 for calls to _interwork_r11_call_via_rN(). */
8912 if (!frame_pointer_needed && CALLER_INTERWORKING_SLOT_SIZE > 0)
8913 mask |= 1 << ARM_HARD_FRAME_POINTER_REGNUM;
8915 /* LR will also be pushed if any lo regs are pushed. */
8916 if (mask & 0xff || thumb_force_lr_save ())
8917 mask |= (1 << LR_REGNUM);
8919 /* Make sure we have a low work register if we need one.
8920 We will need one if we are going to push a high register,
8921 but we are not currently intending to push a low register. */
8922 if ((mask & 0xff) == 0
8923 && ((mask & 0x0f00) || TARGET_BACKTRACE))
8925 /* Use thumb_find_work_register to choose which register
8926 we will use. If the register is live then we will
8927 have to push it. Use LAST_LO_REGNUM as our fallback
8928 choice for the register to select. */
8929 reg = thumb_find_work_register (1 << LAST_LO_REGNUM);
8931 if (! call_used_regs[reg])
8932 mask |= 1 << reg;
8935 return mask;
8939 /* Return the number of bytes required to save VFP registers. */
8940 static int
8941 arm_get_vfp_saved_size (void)
8943 unsigned int regno;
8944 int count;
8945 int saved;
8947 saved = 0;
8948 /* Space for saved VFP registers. */
8949 if (TARGET_HARD_FLOAT && TARGET_VFP)
8951 count = 0;
8952 for (regno = FIRST_VFP_REGNUM;
8953 regno < LAST_VFP_REGNUM;
8954 regno += 2)
8956 if ((!regs_ever_live[regno] || call_used_regs[regno])
8957 && (!regs_ever_live[regno + 1] || call_used_regs[regno + 1]))
8959 if (count > 0)
8961 /* Workaround ARM10 VFPr1 bug. */
8962 if (count == 2 && !arm_arch6)
8963 count++;
8964 saved += count * 8 + 4;
8966 count = 0;
8968 else
8969 count++;
8971 if (count > 0)
8973 if (count == 2 && !arm_arch6)
8974 count++;
8975 saved += count * 8 + 4;
8978 return saved;
8982 /* Generate a function exit sequence. If REALLY_RETURN is false, then do
8983 everything bar the final return instruction. */
8984 const char *
8985 output_return_instruction (rtx operand, int really_return, int reverse)
8987 char conditional[10];
8988 char instr[100];
8989 unsigned reg;
8990 unsigned long live_regs_mask;
8991 unsigned long func_type;
8992 arm_stack_offsets *offsets;
8994 func_type = arm_current_func_type ();
8996 if (IS_NAKED (func_type))
8997 return "";
8999 if (IS_VOLATILE (func_type) && TARGET_ABORT_NORETURN)
9001 /* If this function was declared non-returning, and we have
9002 found a tail call, then we have to trust that the called
9003 function won't return. */
9004 if (really_return)
9006 rtx ops[2];
9008 /* Otherwise, trap an attempted return by aborting. */
9009 ops[0] = operand;
9010 ops[1] = gen_rtx_SYMBOL_REF (Pmode, NEED_PLT_RELOC ? "abort(PLT)"
9011 : "abort");
9012 assemble_external_libcall (ops[1]);
9013 output_asm_insn (reverse ? "bl%D0\t%a1" : "bl%d0\t%a1", ops);
9016 return "";
9019 gcc_assert (!current_function_calls_alloca || really_return);
9021 sprintf (conditional, "%%?%%%c0", reverse ? 'D' : 'd');
9023 return_used_this_function = 1;
9025 live_regs_mask = arm_compute_save_reg_mask ();
9027 if (live_regs_mask)
9029 const char * return_reg;
9031 /* If we do not have any special requirements for function exit
9032 (e.g. interworking, or ISR) then we can load the return address
9033 directly into the PC. Otherwise we must load it into LR. */
9034 if (really_return
9035 && ! TARGET_INTERWORK)
9036 return_reg = reg_names[PC_REGNUM];
9037 else
9038 return_reg = reg_names[LR_REGNUM];
9040 if ((live_regs_mask & (1 << IP_REGNUM)) == (1 << IP_REGNUM))
9042 /* There are three possible reasons for the IP register
9043 being saved. 1) a stack frame was created, in which case
9044 IP contains the old stack pointer, or 2) an ISR routine
9045 corrupted it, or 3) it was saved to align the stack on
9046 iWMMXt. In case 1, restore IP into SP, otherwise just
9047 restore IP. */
9048 if (frame_pointer_needed)
9050 live_regs_mask &= ~ (1 << IP_REGNUM);
9051 live_regs_mask |= (1 << SP_REGNUM);
9053 else
9054 gcc_assert (IS_INTERRUPT (func_type) || TARGET_REALLY_IWMMXT);
9057 /* On some ARM architectures it is faster to use LDR rather than
9058 LDM to load a single register. On other architectures, the
9059 cost is the same. In 26 bit mode, or for exception handlers,
9060 we have to use LDM to load the PC so that the CPSR is also
9061 restored. */
9062 for (reg = 0; reg <= LAST_ARM_REGNUM; reg++)
9063 if (live_regs_mask == (1U << reg))
9064 break;
9066 if (reg <= LAST_ARM_REGNUM
9067 && (reg != LR_REGNUM
9068 || ! really_return
9069 || ! IS_INTERRUPT (func_type)))
9071 sprintf (instr, "ldr%s\t%%|%s, [%%|sp], #4", conditional,
9072 (reg == LR_REGNUM) ? return_reg : reg_names[reg]);
9074 else
9076 char *p;
9077 int first = 1;
9079 /* Generate the load multiple instruction to restore the
9080 registers. Note we can get here, even if
9081 frame_pointer_needed is true, but only if sp already
9082 points to the base of the saved core registers. */
9083 if (live_regs_mask & (1 << SP_REGNUM))
9085 unsigned HOST_WIDE_INT stack_adjust;
9087 offsets = arm_get_frame_offsets ();
9088 stack_adjust = offsets->outgoing_args - offsets->saved_regs;
9089 gcc_assert (stack_adjust == 0 || stack_adjust == 4);
9091 if (stack_adjust && arm_arch5)
9092 sprintf (instr, "ldm%sib\t%%|sp, {", conditional);
9093 else
9095 /* If we can't use ldmib (SA110 bug),
9096 then try to pop r3 instead. */
9097 if (stack_adjust)
9098 live_regs_mask |= 1 << 3;
9099 sprintf (instr, "ldm%sfd\t%%|sp, {", conditional);
9102 else
9103 sprintf (instr, "ldm%sfd\t%%|sp!, {", conditional);
9105 p = instr + strlen (instr);
9107 for (reg = 0; reg <= SP_REGNUM; reg++)
9108 if (live_regs_mask & (1 << reg))
9110 int l = strlen (reg_names[reg]);
9112 if (first)
9113 first = 0;
9114 else
9116 memcpy (p, ", ", 2);
9117 p += 2;
9120 memcpy (p, "%|", 2);
9121 memcpy (p + 2, reg_names[reg], l);
9122 p += l + 2;
9125 if (live_regs_mask & (1 << LR_REGNUM))
9127 sprintf (p, "%s%%|%s}", first ? "" : ", ", return_reg);
9128 /* If returning from an interrupt, restore the CPSR. */
9129 if (IS_INTERRUPT (func_type))
9130 strcat (p, "^");
9132 else
9133 strcpy (p, "}");
9136 output_asm_insn (instr, & operand);
9138 /* See if we need to generate an extra instruction to
9139 perform the actual function return. */
9140 if (really_return
9141 && func_type != ARM_FT_INTERWORKED
9142 && (live_regs_mask & (1 << LR_REGNUM)) != 0)
9144 /* The return has already been handled
9145 by loading the LR into the PC. */
9146 really_return = 0;
9150 if (really_return)
9152 switch ((int) ARM_FUNC_TYPE (func_type))
9154 case ARM_FT_ISR:
9155 case ARM_FT_FIQ:
9156 sprintf (instr, "sub%ss\t%%|pc, %%|lr, #4", conditional);
9157 break;
9159 case ARM_FT_INTERWORKED:
9160 sprintf (instr, "bx%s\t%%|lr", conditional);
9161 break;
9163 case ARM_FT_EXCEPTION:
9164 sprintf (instr, "mov%ss\t%%|pc, %%|lr", conditional);
9165 break;
9167 default:
9168 /* Use bx if it's available. */
9169 if (arm_arch5 || arm_arch4t)
9170 sprintf (instr, "bx%s\t%%|lr", conditional);
9171 else
9172 sprintf (instr, "mov%s\t%%|pc, %%|lr", conditional);
9173 break;
9176 output_asm_insn (instr, & operand);
9179 return "";
9182 /* Write the function name into the code section, directly preceding
9183 the function prologue.
9185 Code will be output similar to this:
9187 .ascii "arm_poke_function_name", 0
9188 .align
9190 .word 0xff000000 + (t1 - t0)
9191 arm_poke_function_name
9192 mov ip, sp
9193 stmfd sp!, {fp, ip, lr, pc}
9194 sub fp, ip, #4
9196 When performing a stack backtrace, code can inspect the value
9197 of 'pc' stored at 'fp' + 0. If the trace function then looks
9198 at location pc - 12 and the top 8 bits are set, then we know
9199 that there is a function name embedded immediately preceding this
9200 location and has length ((pc[-3]) & 0xff000000).
9202 We assume that pc is declared as a pointer to an unsigned long.
9204 It is of no benefit to output the function name if we are assembling
9205 a leaf function. These function types will not contain a stack
9206 backtrace structure, therefore it is not possible to determine the
9207 function name. */
9208 void
9209 arm_poke_function_name (FILE *stream, const char *name)
9211 unsigned long alignlength;
9212 unsigned long length;
9213 rtx x;
9215 length = strlen (name) + 1;
9216 alignlength = ROUND_UP_WORD (length);
9218 ASM_OUTPUT_ASCII (stream, name, length);
9219 ASM_OUTPUT_ALIGN (stream, 2);
9220 x = GEN_INT ((unsigned HOST_WIDE_INT) 0xff000000 + alignlength);
9221 assemble_aligned_integer (UNITS_PER_WORD, x);
9224 /* Place some comments into the assembler stream
9225 describing the current function. */
9226 static void
9227 arm_output_function_prologue (FILE *f, HOST_WIDE_INT frame_size)
9229 unsigned long func_type;
9231 if (!TARGET_ARM)
9233 thumb_output_function_prologue (f, frame_size);
9234 return;
9237 /* Sanity check. */
9238 gcc_assert (!arm_ccfsm_state && !arm_target_insn);
9240 func_type = arm_current_func_type ();
9242 switch ((int) ARM_FUNC_TYPE (func_type))
9244 default:
9245 case ARM_FT_NORMAL:
9246 break;
9247 case ARM_FT_INTERWORKED:
9248 asm_fprintf (f, "\t%@ Function supports interworking.\n");
9249 break;
9250 case ARM_FT_ISR:
9251 asm_fprintf (f, "\t%@ Interrupt Service Routine.\n");
9252 break;
9253 case ARM_FT_FIQ:
9254 asm_fprintf (f, "\t%@ Fast Interrupt Service Routine.\n");
9255 break;
9256 case ARM_FT_EXCEPTION:
9257 asm_fprintf (f, "\t%@ ARM Exception Handler.\n");
9258 break;
9261 if (IS_NAKED (func_type))
9262 asm_fprintf (f, "\t%@ Naked Function: prologue and epilogue provided by programmer.\n");
9264 if (IS_VOLATILE (func_type))
9265 asm_fprintf (f, "\t%@ Volatile: function does not return.\n");
9267 if (IS_NESTED (func_type))
9268 asm_fprintf (f, "\t%@ Nested: function declared inside another function.\n");
9270 asm_fprintf (f, "\t%@ args = %d, pretend = %d, frame = %wd\n",
9271 current_function_args_size,
9272 current_function_pretend_args_size, frame_size);
9274 asm_fprintf (f, "\t%@ frame_needed = %d, uses_anonymous_args = %d\n",
9275 frame_pointer_needed,
9276 cfun->machine->uses_anonymous_args);
9278 if (cfun->machine->lr_save_eliminated)
9279 asm_fprintf (f, "\t%@ link register save eliminated.\n");
9281 if (current_function_calls_eh_return)
9282 asm_fprintf (f, "\t@ Calls __builtin_eh_return.\n");
9284 #ifdef AOF_ASSEMBLER
9285 if (flag_pic)
9286 asm_fprintf (f, "\tmov\t%r, %r\n", IP_REGNUM, PIC_OFFSET_TABLE_REGNUM);
9287 #endif
9289 return_used_this_function = 0;
9292 const char *
9293 arm_output_epilogue (rtx sibling)
9295 int reg;
9296 unsigned long saved_regs_mask;
9297 unsigned long func_type;
9298 /* Floats_offset is the offset from the "virtual" frame. In an APCS
9299 frame that is $fp + 4 for a non-variadic function. */
9300 int floats_offset = 0;
9301 rtx operands[3];
9302 FILE * f = asm_out_file;
9303 unsigned int lrm_count = 0;
9304 int really_return = (sibling == NULL);
9305 int start_reg;
9306 arm_stack_offsets *offsets;
9308 /* If we have already generated the return instruction
9309 then it is futile to generate anything else. */
9310 if (use_return_insn (FALSE, sibling) && return_used_this_function)
9311 return "";
9313 func_type = arm_current_func_type ();
9315 if (IS_NAKED (func_type))
9316 /* Naked functions don't have epilogues. */
9317 return "";
9319 if (IS_VOLATILE (func_type) && TARGET_ABORT_NORETURN)
9321 rtx op;
9323 /* A volatile function should never return. Call abort. */
9324 op = gen_rtx_SYMBOL_REF (Pmode, NEED_PLT_RELOC ? "abort(PLT)" : "abort");
9325 assemble_external_libcall (op);
9326 output_asm_insn ("bl\t%a0", &op);
9328 return "";
9331 /* If we are throwing an exception, then we really must be doing a
9332 return, so we can't tail-call. */
9333 gcc_assert (!current_function_calls_eh_return || really_return);
9335 offsets = arm_get_frame_offsets ();
9336 saved_regs_mask = arm_compute_save_reg_mask ();
9338 if (TARGET_IWMMXT)
9339 lrm_count = bit_count (saved_regs_mask);
9341 floats_offset = offsets->saved_args;
9342 /* Compute how far away the floats will be. */
9343 for (reg = 0; reg <= LAST_ARM_REGNUM; reg++)
9344 if (saved_regs_mask & (1 << reg))
9345 floats_offset += 4;
9347 if (frame_pointer_needed)
9349 /* This variable is for the Virtual Frame Pointer, not VFP regs. */
9350 int vfp_offset = offsets->frame;
9352 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
9354 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
9355 if (regs_ever_live[reg] && !call_used_regs[reg])
9357 floats_offset += 12;
9358 asm_fprintf (f, "\tldfe\t%r, [%r, #-%d]\n",
9359 reg, FP_REGNUM, floats_offset - vfp_offset);
9362 else
9364 start_reg = LAST_FPA_REGNUM;
9366 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
9368 if (regs_ever_live[reg] && !call_used_regs[reg])
9370 floats_offset += 12;
9372 /* We can't unstack more than four registers at once. */
9373 if (start_reg - reg == 3)
9375 asm_fprintf (f, "\tlfm\t%r, 4, [%r, #-%d]\n",
9376 reg, FP_REGNUM, floats_offset - vfp_offset);
9377 start_reg = reg - 1;
9380 else
9382 if (reg != start_reg)
9383 asm_fprintf (f, "\tlfm\t%r, %d, [%r, #-%d]\n",
9384 reg + 1, start_reg - reg,
9385 FP_REGNUM, floats_offset - vfp_offset);
9386 start_reg = reg - 1;
9390 /* Just in case the last register checked also needs unstacking. */
9391 if (reg != start_reg)
9392 asm_fprintf (f, "\tlfm\t%r, %d, [%r, #-%d]\n",
9393 reg + 1, start_reg - reg,
9394 FP_REGNUM, floats_offset - vfp_offset);
9397 if (TARGET_HARD_FLOAT && TARGET_VFP)
9399 int saved_size;
9401 /* The fldmx insn does not have base+offset addressing modes,
9402 so we use IP to hold the address. */
9403 saved_size = arm_get_vfp_saved_size ();
9405 if (saved_size > 0)
9407 floats_offset += saved_size;
9408 asm_fprintf (f, "\tsub\t%r, %r, #%d\n", IP_REGNUM,
9409 FP_REGNUM, floats_offset - vfp_offset);
9411 start_reg = FIRST_VFP_REGNUM;
9412 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
9414 if ((!regs_ever_live[reg] || call_used_regs[reg])
9415 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
9417 if (start_reg != reg)
9418 arm_output_fldmx (f, IP_REGNUM,
9419 (start_reg - FIRST_VFP_REGNUM) / 2,
9420 (reg - start_reg) / 2);
9421 start_reg = reg + 2;
9424 if (start_reg != reg)
9425 arm_output_fldmx (f, IP_REGNUM,
9426 (start_reg - FIRST_VFP_REGNUM) / 2,
9427 (reg - start_reg) / 2);
9430 if (TARGET_IWMMXT)
9432 /* The frame pointer is guaranteed to be non-double-word aligned.
9433 This is because it is set to (old_stack_pointer - 4) and the
9434 old_stack_pointer was double word aligned. Thus the offset to
9435 the iWMMXt registers to be loaded must also be non-double-word
9436 sized, so that the resultant address *is* double-word aligned.
9437 We can ignore floats_offset since that was already included in
9438 the live_regs_mask. */
9439 lrm_count += (lrm_count % 2 ? 2 : 1);
9441 for (reg = LAST_IWMMXT_REGNUM; reg >= FIRST_IWMMXT_REGNUM; reg--)
9442 if (regs_ever_live[reg] && !call_used_regs[reg])
9444 asm_fprintf (f, "\twldrd\t%r, [%r, #-%d]\n",
9445 reg, FP_REGNUM, lrm_count * 4);
9446 lrm_count += 2;
9450 /* saved_regs_mask should contain the IP, which at the time of stack
9451 frame generation actually contains the old stack pointer. So a
9452 quick way to unwind the stack is just pop the IP register directly
9453 into the stack pointer. */
9454 gcc_assert (saved_regs_mask & (1 << IP_REGNUM));
9455 saved_regs_mask &= ~ (1 << IP_REGNUM);
9456 saved_regs_mask |= (1 << SP_REGNUM);
9458 /* There are two registers left in saved_regs_mask - LR and PC. We
9459 only need to restore the LR register (the return address), but to
9460 save time we can load it directly into the PC, unless we need a
9461 special function exit sequence, or we are not really returning. */
9462 if (really_return
9463 && ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
9464 && !current_function_calls_eh_return)
9465 /* Delete the LR from the register mask, so that the LR on
9466 the stack is loaded into the PC in the register mask. */
9467 saved_regs_mask &= ~ (1 << LR_REGNUM);
9468 else
9469 saved_regs_mask &= ~ (1 << PC_REGNUM);
9471 /* We must use SP as the base register, because SP is one of the
9472 registers being restored. If an interrupt or page fault
9473 happens in the ldm instruction, the SP might or might not
9474 have been restored. That would be bad, as then SP will no
9475 longer indicate the safe area of stack, and we can get stack
9476 corruption. Using SP as the base register means that it will
9477 be reset correctly to the original value, should an interrupt
9478 occur. If the stack pointer already points at the right
9479 place, then omit the subtraction. */
9480 if (offsets->outgoing_args != (1 + (int) bit_count (saved_regs_mask))
9481 || current_function_calls_alloca)
9482 asm_fprintf (f, "\tsub\t%r, %r, #%d\n", SP_REGNUM, FP_REGNUM,
9483 4 * bit_count (saved_regs_mask));
9484 print_multi_reg (f, "ldmfd\t%r", SP_REGNUM, saved_regs_mask);
9486 if (IS_INTERRUPT (func_type))
9487 /* Interrupt handlers will have pushed the
9488 IP onto the stack, so restore it now. */
9489 print_multi_reg (f, "ldmfd\t%r!", SP_REGNUM, 1 << IP_REGNUM);
9491 else
9493 /* Restore stack pointer if necessary. */
9494 if (offsets->outgoing_args != offsets->saved_regs)
9496 operands[0] = operands[1] = stack_pointer_rtx;
9497 operands[2] = GEN_INT (offsets->outgoing_args - offsets->saved_regs);
9498 output_add_immediate (operands);
9501 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
9503 for (reg = FIRST_FPA_REGNUM; reg <= LAST_FPA_REGNUM; reg++)
9504 if (regs_ever_live[reg] && !call_used_regs[reg])
9505 asm_fprintf (f, "\tldfe\t%r, [%r], #12\n",
9506 reg, SP_REGNUM);
9508 else
9510 start_reg = FIRST_FPA_REGNUM;
9512 for (reg = FIRST_FPA_REGNUM; reg <= LAST_FPA_REGNUM; reg++)
9514 if (regs_ever_live[reg] && !call_used_regs[reg])
9516 if (reg - start_reg == 3)
9518 asm_fprintf (f, "\tlfmfd\t%r, 4, [%r]!\n",
9519 start_reg, SP_REGNUM);
9520 start_reg = reg + 1;
9523 else
9525 if (reg != start_reg)
9526 asm_fprintf (f, "\tlfmfd\t%r, %d, [%r]!\n",
9527 start_reg, reg - start_reg,
9528 SP_REGNUM);
9530 start_reg = reg + 1;
9534 /* Just in case the last register checked also needs unstacking. */
9535 if (reg != start_reg)
9536 asm_fprintf (f, "\tlfmfd\t%r, %d, [%r]!\n",
9537 start_reg, reg - start_reg, SP_REGNUM);
9540 if (TARGET_HARD_FLOAT && TARGET_VFP)
9542 start_reg = FIRST_VFP_REGNUM;
9543 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
9545 if ((!regs_ever_live[reg] || call_used_regs[reg])
9546 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
9548 if (start_reg != reg)
9549 arm_output_fldmx (f, SP_REGNUM,
9550 (start_reg - FIRST_VFP_REGNUM) / 2,
9551 (reg - start_reg) / 2);
9552 start_reg = reg + 2;
9555 if (start_reg != reg)
9556 arm_output_fldmx (f, SP_REGNUM,
9557 (start_reg - FIRST_VFP_REGNUM) / 2,
9558 (reg - start_reg) / 2);
9560 if (TARGET_IWMMXT)
9561 for (reg = FIRST_IWMMXT_REGNUM; reg <= LAST_IWMMXT_REGNUM; reg++)
9562 if (regs_ever_live[reg] && !call_used_regs[reg])
9563 asm_fprintf (f, "\twldrd\t%r, [%r], #8\n", reg, SP_REGNUM);
9565 /* If we can, restore the LR into the PC. */
9566 if (ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
9567 && really_return
9568 && current_function_pretend_args_size == 0
9569 && saved_regs_mask & (1 << LR_REGNUM)
9570 && !current_function_calls_eh_return)
9572 saved_regs_mask &= ~ (1 << LR_REGNUM);
9573 saved_regs_mask |= (1 << PC_REGNUM);
9576 /* Load the registers off the stack. If we only have one register
9577 to load use the LDR instruction - it is faster. */
9578 if (saved_regs_mask == (1 << LR_REGNUM))
9580 asm_fprintf (f, "\tldr\t%r, [%r], #4\n", LR_REGNUM, SP_REGNUM);
9582 else if (saved_regs_mask)
9584 if (saved_regs_mask & (1 << SP_REGNUM))
9585 /* Note - write back to the stack register is not enabled
9586 (i.e. "ldmfd sp!..."). We know that the stack pointer is
9587 in the list of registers and if we add writeback the
9588 instruction becomes UNPREDICTABLE. */
9589 print_multi_reg (f, "ldmfd\t%r", SP_REGNUM, saved_regs_mask);
9590 else
9591 print_multi_reg (f, "ldmfd\t%r!", SP_REGNUM, saved_regs_mask);
9594 if (current_function_pretend_args_size)
9596 /* Unwind the pre-pushed regs. */
9597 operands[0] = operands[1] = stack_pointer_rtx;
9598 operands[2] = GEN_INT (current_function_pretend_args_size);
9599 output_add_immediate (operands);
9603 /* We may have already restored PC directly from the stack. */
9604 if (!really_return || saved_regs_mask & (1 << PC_REGNUM))
9605 return "";
9607 /* Stack adjustment for exception handler. */
9608 if (current_function_calls_eh_return)
9609 asm_fprintf (f, "\tadd\t%r, %r, %r\n", SP_REGNUM, SP_REGNUM,
9610 ARM_EH_STACKADJ_REGNUM);
9612 /* Generate the return instruction. */
9613 switch ((int) ARM_FUNC_TYPE (func_type))
9615 case ARM_FT_ISR:
9616 case ARM_FT_FIQ:
9617 asm_fprintf (f, "\tsubs\t%r, %r, #4\n", PC_REGNUM, LR_REGNUM);
9618 break;
9620 case ARM_FT_EXCEPTION:
9621 asm_fprintf (f, "\tmovs\t%r, %r\n", PC_REGNUM, LR_REGNUM);
9622 break;
9624 case ARM_FT_INTERWORKED:
9625 asm_fprintf (f, "\tbx\t%r\n", LR_REGNUM);
9626 break;
9628 default:
9629 if (arm_arch5 || arm_arch4t)
9630 asm_fprintf (f, "\tbx\t%r\n", LR_REGNUM);
9631 else
9632 asm_fprintf (f, "\tmov\t%r, %r\n", PC_REGNUM, LR_REGNUM);
9633 break;
9636 return "";
9639 static void
9640 arm_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
9641 HOST_WIDE_INT frame_size ATTRIBUTE_UNUSED)
9643 arm_stack_offsets *offsets;
9645 if (TARGET_THUMB)
9647 int regno;
9649 /* Emit any call-via-reg trampolines that are needed for v4t support
9650 of call_reg and call_value_reg type insns. */
9651 for (regno = 0; regno < LR_REGNUM; regno++)
9653 rtx label = cfun->machine->call_via[regno];
9655 if (label != NULL)
9657 function_section (current_function_decl);
9658 targetm.asm_out.internal_label (asm_out_file, "L",
9659 CODE_LABEL_NUMBER (label));
9660 asm_fprintf (asm_out_file, "\tbx\t%r\n", regno);
9664 /* ??? Probably not safe to set this here, since it assumes that a
9665 function will be emitted as assembly immediately after we generate
9666 RTL for it. This does not happen for inline functions. */
9667 return_used_this_function = 0;
9669 else
9671 /* We need to take into account any stack-frame rounding. */
9672 offsets = arm_get_frame_offsets ();
9674 gcc_assert (!use_return_insn (FALSE, NULL)
9675 || !return_used_this_function
9676 || offsets->saved_regs == offsets->outgoing_args
9677 || frame_pointer_needed);
9679 /* Reset the ARM-specific per-function variables. */
9680 after_arm_reorg = 0;
9684 /* Generate and emit an insn that we will recognize as a push_multi.
9685 Unfortunately, since this insn does not reflect very well the actual
9686 semantics of the operation, we need to annotate the insn for the benefit
9687 of DWARF2 frame unwind information. */
9688 static rtx
9689 emit_multi_reg_push (unsigned long mask)
9691 int num_regs = 0;
9692 int num_dwarf_regs;
9693 int i, j;
9694 rtx par;
9695 rtx dwarf;
9696 int dwarf_par_index;
9697 rtx tmp, reg;
9699 for (i = 0; i <= LAST_ARM_REGNUM; i++)
9700 if (mask & (1 << i))
9701 num_regs++;
9703 gcc_assert (num_regs && num_regs <= 16);
9705 /* We don't record the PC in the dwarf frame information. */
9706 num_dwarf_regs = num_regs;
9707 if (mask & (1 << PC_REGNUM))
9708 num_dwarf_regs--;
9710 /* For the body of the insn we are going to generate an UNSPEC in
9711 parallel with several USEs. This allows the insn to be recognized
9712 by the push_multi pattern in the arm.md file. The insn looks
9713 something like this:
9715 (parallel [
9716 (set (mem:BLK (pre_dec:BLK (reg:SI sp)))
9717 (unspec:BLK [(reg:SI r4)] UNSPEC_PUSH_MULT))
9718 (use (reg:SI 11 fp))
9719 (use (reg:SI 12 ip))
9720 (use (reg:SI 14 lr))
9721 (use (reg:SI 15 pc))
9724 For the frame note however, we try to be more explicit and actually
9725 show each register being stored into the stack frame, plus a (single)
9726 decrement of the stack pointer. We do it this way in order to be
9727 friendly to the stack unwinding code, which only wants to see a single
9728 stack decrement per instruction. The RTL we generate for the note looks
9729 something like this:
9731 (sequence [
9732 (set (reg:SI sp) (plus:SI (reg:SI sp) (const_int -20)))
9733 (set (mem:SI (reg:SI sp)) (reg:SI r4))
9734 (set (mem:SI (plus:SI (reg:SI sp) (const_int 4))) (reg:SI fp))
9735 (set (mem:SI (plus:SI (reg:SI sp) (const_int 8))) (reg:SI ip))
9736 (set (mem:SI (plus:SI (reg:SI sp) (const_int 12))) (reg:SI lr))
9739 This sequence is used both by the code to support stack unwinding for
9740 exceptions handlers and the code to generate dwarf2 frame debugging. */
9742 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (num_regs));
9743 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (num_dwarf_regs + 1));
9744 dwarf_par_index = 1;
9746 for (i = 0; i <= LAST_ARM_REGNUM; i++)
9748 if (mask & (1 << i))
9750 reg = gen_rtx_REG (SImode, i);
9752 XVECEXP (par, 0, 0)
9753 = gen_rtx_SET (VOIDmode,
9754 gen_rtx_MEM (BLKmode,
9755 gen_rtx_PRE_DEC (BLKmode,
9756 stack_pointer_rtx)),
9757 gen_rtx_UNSPEC (BLKmode,
9758 gen_rtvec (1, reg),
9759 UNSPEC_PUSH_MULT));
9761 if (i != PC_REGNUM)
9763 tmp = gen_rtx_SET (VOIDmode,
9764 gen_rtx_MEM (SImode, stack_pointer_rtx),
9765 reg);
9766 RTX_FRAME_RELATED_P (tmp) = 1;
9767 XVECEXP (dwarf, 0, dwarf_par_index) = tmp;
9768 dwarf_par_index++;
9771 break;
9775 for (j = 1, i++; j < num_regs; i++)
9777 if (mask & (1 << i))
9779 reg = gen_rtx_REG (SImode, i);
9781 XVECEXP (par, 0, j) = gen_rtx_USE (VOIDmode, reg);
9783 if (i != PC_REGNUM)
9785 tmp = gen_rtx_SET (VOIDmode,
9786 gen_rtx_MEM (SImode,
9787 plus_constant (stack_pointer_rtx,
9788 4 * j)),
9789 reg);
9790 RTX_FRAME_RELATED_P (tmp) = 1;
9791 XVECEXP (dwarf, 0, dwarf_par_index++) = tmp;
9794 j++;
9798 par = emit_insn (par);
9800 tmp = gen_rtx_SET (SImode,
9801 stack_pointer_rtx,
9802 gen_rtx_PLUS (SImode,
9803 stack_pointer_rtx,
9804 GEN_INT (-4 * num_regs)));
9805 RTX_FRAME_RELATED_P (tmp) = 1;
9806 XVECEXP (dwarf, 0, 0) = tmp;
9808 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
9809 REG_NOTES (par));
9810 return par;
9813 static rtx
9814 emit_sfm (int base_reg, int count)
9816 rtx par;
9817 rtx dwarf;
9818 rtx tmp, reg;
9819 int i;
9821 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
9822 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (count + 1));
9824 reg = gen_rtx_REG (XFmode, base_reg++);
9826 XVECEXP (par, 0, 0)
9827 = gen_rtx_SET (VOIDmode,
9828 gen_rtx_MEM (BLKmode,
9829 gen_rtx_PRE_DEC (BLKmode, stack_pointer_rtx)),
9830 gen_rtx_UNSPEC (BLKmode,
9831 gen_rtvec (1, reg),
9832 UNSPEC_PUSH_MULT));
9833 tmp = gen_rtx_SET (VOIDmode,
9834 gen_rtx_MEM (XFmode, stack_pointer_rtx), reg);
9835 RTX_FRAME_RELATED_P (tmp) = 1;
9836 XVECEXP (dwarf, 0, 1) = tmp;
9838 for (i = 1; i < count; i++)
9840 reg = gen_rtx_REG (XFmode, base_reg++);
9841 XVECEXP (par, 0, i) = gen_rtx_USE (VOIDmode, reg);
9843 tmp = gen_rtx_SET (VOIDmode,
9844 gen_rtx_MEM (XFmode,
9845 plus_constant (stack_pointer_rtx,
9846 i * 12)),
9847 reg);
9848 RTX_FRAME_RELATED_P (tmp) = 1;
9849 XVECEXP (dwarf, 0, i + 1) = tmp;
9852 tmp = gen_rtx_SET (VOIDmode,
9853 stack_pointer_rtx,
9854 gen_rtx_PLUS (SImode,
9855 stack_pointer_rtx,
9856 GEN_INT (-12 * count)));
9857 RTX_FRAME_RELATED_P (tmp) = 1;
9858 XVECEXP (dwarf, 0, 0) = tmp;
9860 par = emit_insn (par);
9861 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
9862 REG_NOTES (par));
9863 return par;
9867 /* Return true if the current function needs to save/restore LR. */
9869 static bool
9870 thumb_force_lr_save (void)
9872 return !cfun->machine->lr_save_eliminated
9873 && (!leaf_function_p ()
9874 || thumb_far_jump_used_p ()
9875 || regs_ever_live [LR_REGNUM]);
9879 /* Compute the distance from register FROM to register TO.
9880 These can be the arg pointer (26), the soft frame pointer (25),
9881 the stack pointer (13) or the hard frame pointer (11).
9882 In thumb mode r7 is used as the soft frame pointer, if needed.
9883 Typical stack layout looks like this:
9885 old stack pointer -> | |
9886 ----
9887 | | \
9888 | | saved arguments for
9889 | | vararg functions
9890 | | /
9892 hard FP & arg pointer -> | | \
9893 | | stack
9894 | | frame
9895 | | /
9897 | | \
9898 | | call saved
9899 | | registers
9900 soft frame pointer -> | | /
9902 | | \
9903 | | local
9904 | | variables
9905 | | /
9907 | | \
9908 | | outgoing
9909 | | arguments
9910 current stack pointer -> | | /
9913 For a given function some or all of these stack components
9914 may not be needed, giving rise to the possibility of
9915 eliminating some of the registers.
9917 The values returned by this function must reflect the behavior
9918 of arm_expand_prologue() and arm_compute_save_reg_mask().
9920 The sign of the number returned reflects the direction of stack
9921 growth, so the values are positive for all eliminations except
9922 from the soft frame pointer to the hard frame pointer.
9924 SFP may point just inside the local variables block to ensure correct
9925 alignment. */
9928 /* Calculate stack offsets. These are used to calculate register elimination
9929 offsets and in prologue/epilogue code. */
9931 static arm_stack_offsets *
9932 arm_get_frame_offsets (void)
9934 struct arm_stack_offsets *offsets;
9935 unsigned long func_type;
9936 int leaf;
9937 int saved;
9938 HOST_WIDE_INT frame_size;
9940 offsets = &cfun->machine->stack_offsets;
9942 /* We need to know if we are a leaf function. Unfortunately, it
9943 is possible to be called after start_sequence has been called,
9944 which causes get_insns to return the insns for the sequence,
9945 not the function, which will cause leaf_function_p to return
9946 the incorrect result.
9948 to know about leaf functions once reload has completed, and the
9949 frame size cannot be changed after that time, so we can safely
9950 use the cached value. */
9952 if (reload_completed)
9953 return offsets;
9955 /* Initially this is the size of the local variables. It will translated
9956 into an offset once we have determined the size of preceding data. */
9957 frame_size = ROUND_UP_WORD (get_frame_size ());
9959 leaf = leaf_function_p ();
9961 /* Space for variadic functions. */
9962 offsets->saved_args = current_function_pretend_args_size;
9964 offsets->frame = offsets->saved_args + (frame_pointer_needed ? 4 : 0);
9966 if (TARGET_ARM)
9968 unsigned int regno;
9970 saved = bit_count (arm_compute_save_reg_mask ()) * 4;
9972 /* We know that SP will be doubleword aligned on entry, and we must
9973 preserve that condition at any subroutine call. We also require the
9974 soft frame pointer to be doubleword aligned. */
9976 if (TARGET_REALLY_IWMMXT)
9978 /* Check for the call-saved iWMMXt registers. */
9979 for (regno = FIRST_IWMMXT_REGNUM;
9980 regno <= LAST_IWMMXT_REGNUM;
9981 regno++)
9982 if (regs_ever_live [regno] && ! call_used_regs [regno])
9983 saved += 8;
9986 func_type = arm_current_func_type ();
9987 if (! IS_VOLATILE (func_type))
9989 /* Space for saved FPA registers. */
9990 for (regno = FIRST_FPA_REGNUM; regno <= LAST_FPA_REGNUM; regno++)
9991 if (regs_ever_live[regno] && ! call_used_regs[regno])
9992 saved += 12;
9994 /* Space for saved VFP registers. */
9995 if (TARGET_HARD_FLOAT && TARGET_VFP)
9996 saved += arm_get_vfp_saved_size ();
9999 else /* TARGET_THUMB */
10001 saved = bit_count (thumb_compute_save_reg_mask ()) * 4;
10002 if (TARGET_BACKTRACE)
10003 saved += 16;
10006 /* Saved registers include the stack frame. */
10007 offsets->saved_regs = offsets->saved_args + saved;
10008 offsets->soft_frame = offsets->saved_regs + CALLER_INTERWORKING_SLOT_SIZE;
10009 /* A leaf function does not need any stack alignment if it has nothing
10010 on the stack. */
10011 if (leaf && frame_size == 0)
10013 offsets->outgoing_args = offsets->soft_frame;
10014 return offsets;
10017 /* Ensure SFP has the correct alignment. */
10018 if (ARM_DOUBLEWORD_ALIGN
10019 && (offsets->soft_frame & 7))
10020 offsets->soft_frame += 4;
10022 offsets->outgoing_args = offsets->soft_frame + frame_size
10023 + current_function_outgoing_args_size;
10025 if (ARM_DOUBLEWORD_ALIGN)
10027 /* Ensure SP remains doubleword aligned. */
10028 if (offsets->outgoing_args & 7)
10029 offsets->outgoing_args += 4;
10030 gcc_assert (!(offsets->outgoing_args & 7));
10033 return offsets;
10037 /* Calculate the relative offsets for the different stack pointers. Positive
10038 offsets are in the direction of stack growth. */
10040 HOST_WIDE_INT
10041 arm_compute_initial_elimination_offset (unsigned int from, unsigned int to)
10043 arm_stack_offsets *offsets;
10045 offsets = arm_get_frame_offsets ();
10047 /* OK, now we have enough information to compute the distances.
10048 There must be an entry in these switch tables for each pair
10049 of registers in ELIMINABLE_REGS, even if some of the entries
10050 seem to be redundant or useless. */
10051 switch (from)
10053 case ARG_POINTER_REGNUM:
10054 switch (to)
10056 case THUMB_HARD_FRAME_POINTER_REGNUM:
10057 return 0;
10059 case FRAME_POINTER_REGNUM:
10060 /* This is the reverse of the soft frame pointer
10061 to hard frame pointer elimination below. */
10062 return offsets->soft_frame - offsets->saved_args;
10064 case ARM_HARD_FRAME_POINTER_REGNUM:
10065 /* If there is no stack frame then the hard
10066 frame pointer and the arg pointer coincide. */
10067 if (offsets->frame == offsets->saved_regs)
10068 return 0;
10069 /* FIXME: Not sure about this. Maybe we should always return 0 ? */
10070 return (frame_pointer_needed
10071 && cfun->static_chain_decl != NULL
10072 && ! cfun->machine->uses_anonymous_args) ? 4 : 0;
10074 case STACK_POINTER_REGNUM:
10075 /* If nothing has been pushed on the stack at all
10076 then this will return -4. This *is* correct! */
10077 return offsets->outgoing_args - (offsets->saved_args + 4);
10079 default:
10080 gcc_unreachable ();
10082 gcc_unreachable ();
10084 case FRAME_POINTER_REGNUM:
10085 switch (to)
10087 case THUMB_HARD_FRAME_POINTER_REGNUM:
10088 return 0;
10090 case ARM_HARD_FRAME_POINTER_REGNUM:
10091 /* The hard frame pointer points to the top entry in the
10092 stack frame. The soft frame pointer to the bottom entry
10093 in the stack frame. If there is no stack frame at all,
10094 then they are identical. */
10096 return offsets->frame - offsets->soft_frame;
10098 case STACK_POINTER_REGNUM:
10099 return offsets->outgoing_args - offsets->soft_frame;
10101 default:
10102 gcc_unreachable ();
10104 gcc_unreachable ();
10106 default:
10107 /* You cannot eliminate from the stack pointer.
10108 In theory you could eliminate from the hard frame
10109 pointer to the stack pointer, but this will never
10110 happen, since if a stack frame is not needed the
10111 hard frame pointer will never be used. */
10112 gcc_unreachable ();
10117 /* Generate the prologue instructions for entry into an ARM function. */
10118 void
10119 arm_expand_prologue (void)
10121 int reg;
10122 rtx amount;
10123 rtx insn;
10124 rtx ip_rtx;
10125 unsigned long live_regs_mask;
10126 unsigned long func_type;
10127 int fp_offset = 0;
10128 int saved_pretend_args = 0;
10129 int saved_regs = 0;
10130 unsigned HOST_WIDE_INT args_to_push;
10131 arm_stack_offsets *offsets;
10133 func_type = arm_current_func_type ();
10135 /* Naked functions don't have prologues. */
10136 if (IS_NAKED (func_type))
10137 return;
10139 /* Make a copy of c_f_p_a_s as we may need to modify it locally. */
10140 args_to_push = current_function_pretend_args_size;
10142 /* Compute which register we will have to save onto the stack. */
10143 live_regs_mask = arm_compute_save_reg_mask ();
10145 ip_rtx = gen_rtx_REG (SImode, IP_REGNUM);
10147 if (frame_pointer_needed)
10149 if (IS_INTERRUPT (func_type))
10151 /* Interrupt functions must not corrupt any registers.
10152 Creating a frame pointer however, corrupts the IP
10153 register, so we must push it first. */
10154 insn = emit_multi_reg_push (1 << IP_REGNUM);
10156 /* Do not set RTX_FRAME_RELATED_P on this insn.
10157 The dwarf stack unwinding code only wants to see one
10158 stack decrement per function, and this is not it. If
10159 this instruction is labeled as being part of the frame
10160 creation sequence then dwarf2out_frame_debug_expr will
10161 die when it encounters the assignment of IP to FP
10162 later on, since the use of SP here establishes SP as
10163 the CFA register and not IP.
10165 Anyway this instruction is not really part of the stack
10166 frame creation although it is part of the prologue. */
10168 else if (IS_NESTED (func_type))
10170 /* The Static chain register is the same as the IP register
10171 used as a scratch register during stack frame creation.
10172 To get around this need to find somewhere to store IP
10173 whilst the frame is being created. We try the following
10174 places in order:
10176 1. The last argument register.
10177 2. A slot on the stack above the frame. (This only
10178 works if the function is not a varargs function).
10179 3. Register r3, after pushing the argument registers
10180 onto the stack.
10182 Note - we only need to tell the dwarf2 backend about the SP
10183 adjustment in the second variant; the static chain register
10184 doesn't need to be unwound, as it doesn't contain a value
10185 inherited from the caller. */
10187 if (regs_ever_live[3] == 0)
10189 insn = gen_rtx_REG (SImode, 3);
10190 insn = gen_rtx_SET (SImode, insn, ip_rtx);
10191 insn = emit_insn (insn);
10193 else if (args_to_push == 0)
10195 rtx dwarf;
10196 insn = gen_rtx_PRE_DEC (SImode, stack_pointer_rtx);
10197 insn = gen_rtx_MEM (SImode, insn);
10198 insn = gen_rtx_SET (VOIDmode, insn, ip_rtx);
10199 insn = emit_insn (insn);
10201 fp_offset = 4;
10203 /* Just tell the dwarf backend that we adjusted SP. */
10204 dwarf = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
10205 gen_rtx_PLUS (SImode, stack_pointer_rtx,
10206 GEN_INT (-fp_offset)));
10207 RTX_FRAME_RELATED_P (insn) = 1;
10208 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
10209 dwarf, REG_NOTES (insn));
10211 else
10213 /* Store the args on the stack. */
10214 if (cfun->machine->uses_anonymous_args)
10215 insn = emit_multi_reg_push
10216 ((0xf0 >> (args_to_push / 4)) & 0xf);
10217 else
10218 insn = emit_insn
10219 (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10220 GEN_INT (- args_to_push)));
10222 RTX_FRAME_RELATED_P (insn) = 1;
10224 saved_pretend_args = 1;
10225 fp_offset = args_to_push;
10226 args_to_push = 0;
10228 /* Now reuse r3 to preserve IP. */
10229 insn = gen_rtx_REG (SImode, 3);
10230 insn = gen_rtx_SET (SImode, insn, ip_rtx);
10231 (void) emit_insn (insn);
10235 if (fp_offset)
10237 insn = gen_rtx_PLUS (SImode, stack_pointer_rtx, GEN_INT (fp_offset));
10238 insn = gen_rtx_SET (SImode, ip_rtx, insn);
10240 else
10241 insn = gen_movsi (ip_rtx, stack_pointer_rtx);
10243 insn = emit_insn (insn);
10244 RTX_FRAME_RELATED_P (insn) = 1;
10247 if (args_to_push)
10249 /* Push the argument registers, or reserve space for them. */
10250 if (cfun->machine->uses_anonymous_args)
10251 insn = emit_multi_reg_push
10252 ((0xf0 >> (args_to_push / 4)) & 0xf);
10253 else
10254 insn = emit_insn
10255 (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10256 GEN_INT (- args_to_push)));
10257 RTX_FRAME_RELATED_P (insn) = 1;
10260 /* If this is an interrupt service routine, and the link register
10261 is going to be pushed, and we are not creating a stack frame,
10262 (which would involve an extra push of IP and a pop in the epilogue)
10263 subtracting four from LR now will mean that the function return
10264 can be done with a single instruction. */
10265 if ((func_type == ARM_FT_ISR || func_type == ARM_FT_FIQ)
10266 && (live_regs_mask & (1 << LR_REGNUM)) != 0
10267 && ! frame_pointer_needed)
10268 emit_insn (gen_rtx_SET (SImode,
10269 gen_rtx_REG (SImode, LR_REGNUM),
10270 gen_rtx_PLUS (SImode,
10271 gen_rtx_REG (SImode, LR_REGNUM),
10272 GEN_INT (-4))));
10274 if (live_regs_mask)
10276 insn = emit_multi_reg_push (live_regs_mask);
10277 saved_regs += bit_count (live_regs_mask) * 4;
10278 RTX_FRAME_RELATED_P (insn) = 1;
10281 if (TARGET_IWMMXT)
10282 for (reg = LAST_IWMMXT_REGNUM; reg >= FIRST_IWMMXT_REGNUM; reg--)
10283 if (regs_ever_live[reg] && ! call_used_regs [reg])
10285 insn = gen_rtx_PRE_DEC (V2SImode, stack_pointer_rtx);
10286 insn = gen_rtx_MEM (V2SImode, insn);
10287 insn = emit_insn (gen_rtx_SET (VOIDmode, insn,
10288 gen_rtx_REG (V2SImode, reg)));
10289 RTX_FRAME_RELATED_P (insn) = 1;
10290 saved_regs += 8;
10293 if (! IS_VOLATILE (func_type))
10295 int start_reg;
10297 /* Save any floating point call-saved registers used by this
10298 function. */
10299 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
10301 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
10302 if (regs_ever_live[reg] && !call_used_regs[reg])
10304 insn = gen_rtx_PRE_DEC (XFmode, stack_pointer_rtx);
10305 insn = gen_rtx_MEM (XFmode, insn);
10306 insn = emit_insn (gen_rtx_SET (VOIDmode, insn,
10307 gen_rtx_REG (XFmode, reg)));
10308 RTX_FRAME_RELATED_P (insn) = 1;
10309 saved_regs += 12;
10312 else
10314 start_reg = LAST_FPA_REGNUM;
10316 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
10318 if (regs_ever_live[reg] && !call_used_regs[reg])
10320 if (start_reg - reg == 3)
10322 insn = emit_sfm (reg, 4);
10323 RTX_FRAME_RELATED_P (insn) = 1;
10324 saved_regs += 48;
10325 start_reg = reg - 1;
10328 else
10330 if (start_reg != reg)
10332 insn = emit_sfm (reg + 1, start_reg - reg);
10333 RTX_FRAME_RELATED_P (insn) = 1;
10334 saved_regs += (start_reg - reg) * 12;
10336 start_reg = reg - 1;
10340 if (start_reg != reg)
10342 insn = emit_sfm (reg + 1, start_reg - reg);
10343 saved_regs += (start_reg - reg) * 12;
10344 RTX_FRAME_RELATED_P (insn) = 1;
10347 if (TARGET_HARD_FLOAT && TARGET_VFP)
10349 start_reg = FIRST_VFP_REGNUM;
10351 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
10353 if ((!regs_ever_live[reg] || call_used_regs[reg])
10354 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
10356 if (start_reg != reg)
10357 saved_regs += vfp_emit_fstmx (start_reg,
10358 (reg - start_reg) / 2);
10359 start_reg = reg + 2;
10362 if (start_reg != reg)
10363 saved_regs += vfp_emit_fstmx (start_reg,
10364 (reg - start_reg) / 2);
10368 if (frame_pointer_needed)
10370 /* Create the new frame pointer. */
10371 insn = GEN_INT (-(4 + args_to_push + fp_offset));
10372 insn = emit_insn (gen_addsi3 (hard_frame_pointer_rtx, ip_rtx, insn));
10373 RTX_FRAME_RELATED_P (insn) = 1;
10375 if (IS_NESTED (func_type))
10377 /* Recover the static chain register. */
10378 if (regs_ever_live [3] == 0
10379 || saved_pretend_args)
10380 insn = gen_rtx_REG (SImode, 3);
10381 else /* if (current_function_pretend_args_size == 0) */
10383 insn = gen_rtx_PLUS (SImode, hard_frame_pointer_rtx,
10384 GEN_INT (4));
10385 insn = gen_rtx_MEM (SImode, insn);
10388 emit_insn (gen_rtx_SET (SImode, ip_rtx, insn));
10389 /* Add a USE to stop propagate_one_insn() from barfing. */
10390 emit_insn (gen_prologue_use (ip_rtx));
10394 offsets = arm_get_frame_offsets ();
10395 if (offsets->outgoing_args != offsets->saved_args + saved_regs)
10397 /* This add can produce multiple insns for a large constant, so we
10398 need to get tricky. */
10399 rtx last = get_last_insn ();
10401 amount = GEN_INT (offsets->saved_args + saved_regs
10402 - offsets->outgoing_args);
10404 insn = emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10405 amount));
10408 last = last ? NEXT_INSN (last) : get_insns ();
10409 RTX_FRAME_RELATED_P (last) = 1;
10411 while (last != insn);
10413 /* If the frame pointer is needed, emit a special barrier that
10414 will prevent the scheduler from moving stores to the frame
10415 before the stack adjustment. */
10416 if (frame_pointer_needed)
10417 insn = emit_insn (gen_stack_tie (stack_pointer_rtx,
10418 hard_frame_pointer_rtx));
10422 if (flag_pic)
10423 arm_load_pic_register (INVALID_REGNUM);
10425 /* If we are profiling, make sure no instructions are scheduled before
10426 the call to mcount. Similarly if the user has requested no
10427 scheduling in the prolog. */
10428 if (current_function_profile || !TARGET_SCHED_PROLOG)
10429 emit_insn (gen_blockage ());
10431 /* If the link register is being kept alive, with the return address in it,
10432 then make sure that it does not get reused by the ce2 pass. */
10433 if ((live_regs_mask & (1 << LR_REGNUM)) == 0)
10435 emit_insn (gen_prologue_use (gen_rtx_REG (SImode, LR_REGNUM)));
10436 cfun->machine->lr_save_eliminated = 1;
10440 /* If CODE is 'd', then the X is a condition operand and the instruction
10441 should only be executed if the condition is true.
10442 if CODE is 'D', then the X is a condition operand and the instruction
10443 should only be executed if the condition is false: however, if the mode
10444 of the comparison is CCFPEmode, then always execute the instruction -- we
10445 do this because in these circumstances !GE does not necessarily imply LT;
10446 in these cases the instruction pattern will take care to make sure that
10447 an instruction containing %d will follow, thereby undoing the effects of
10448 doing this instruction unconditionally.
10449 If CODE is 'N' then X is a floating point operand that must be negated
10450 before output.
10451 If CODE is 'B' then output a bitwise inverted value of X (a const int).
10452 If X is a REG and CODE is `M', output a ldm/stm style multi-reg. */
10453 void
10454 arm_print_operand (FILE *stream, rtx x, int code)
10456 switch (code)
10458 case '@':
10459 fputs (ASM_COMMENT_START, stream);
10460 return;
10462 case '_':
10463 fputs (user_label_prefix, stream);
10464 return;
10466 case '|':
10467 fputs (REGISTER_PREFIX, stream);
10468 return;
10470 case '?':
10471 if (arm_ccfsm_state == 3 || arm_ccfsm_state == 4)
10473 if (TARGET_THUMB)
10475 output_operand_lossage ("predicated Thumb instruction");
10476 break;
10478 if (current_insn_predicate != NULL)
10480 output_operand_lossage
10481 ("predicated instruction in conditional sequence");
10482 break;
10485 fputs (arm_condition_codes[arm_current_cc], stream);
10487 else if (current_insn_predicate)
10489 enum arm_cond_code code;
10491 if (TARGET_THUMB)
10493 output_operand_lossage ("predicated Thumb instruction");
10494 break;
10497 code = get_arm_condition_code (current_insn_predicate);
10498 fputs (arm_condition_codes[code], stream);
10500 return;
10502 case 'N':
10504 REAL_VALUE_TYPE r;
10505 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
10506 r = REAL_VALUE_NEGATE (r);
10507 fprintf (stream, "%s", fp_const_from_val (&r));
10509 return;
10511 case 'B':
10512 if (GET_CODE (x) == CONST_INT)
10514 HOST_WIDE_INT val;
10515 val = ARM_SIGN_EXTEND (~INTVAL (x));
10516 fprintf (stream, HOST_WIDE_INT_PRINT_DEC, val);
10518 else
10520 putc ('~', stream);
10521 output_addr_const (stream, x);
10523 return;
10525 case 'i':
10526 fprintf (stream, "%s", arithmetic_instr (x, 1));
10527 return;
10529 /* Truncate Cirrus shift counts. */
10530 case 's':
10531 if (GET_CODE (x) == CONST_INT)
10533 fprintf (stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0x3f);
10534 return;
10536 arm_print_operand (stream, x, 0);
10537 return;
10539 case 'I':
10540 fprintf (stream, "%s", arithmetic_instr (x, 0));
10541 return;
10543 case 'S':
10545 HOST_WIDE_INT val;
10546 const char * shift = shift_op (x, &val);
10548 if (shift)
10550 fprintf (stream, ", %s ", shift_op (x, &val));
10551 if (val == -1)
10552 arm_print_operand (stream, XEXP (x, 1), 0);
10553 else
10554 fprintf (stream, "#" HOST_WIDE_INT_PRINT_DEC, val);
10557 return;
10559 /* An explanation of the 'Q', 'R' and 'H' register operands:
10561 In a pair of registers containing a DI or DF value the 'Q'
10562 operand returns the register number of the register containing
10563 the least significant part of the value. The 'R' operand returns
10564 the register number of the register containing the most
10565 significant part of the value.
10567 The 'H' operand returns the higher of the two register numbers.
10568 On a run where WORDS_BIG_ENDIAN is true the 'H' operand is the
10569 same as the 'Q' operand, since the most significant part of the
10570 value is held in the lower number register. The reverse is true
10571 on systems where WORDS_BIG_ENDIAN is false.
10573 The purpose of these operands is to distinguish between cases
10574 where the endian-ness of the values is important (for example
10575 when they are added together), and cases where the endian-ness
10576 is irrelevant, but the order of register operations is important.
10577 For example when loading a value from memory into a register
10578 pair, the endian-ness does not matter. Provided that the value
10579 from the lower memory address is put into the lower numbered
10580 register, and the value from the higher address is put into the
10581 higher numbered register, the load will work regardless of whether
10582 the value being loaded is big-wordian or little-wordian. The
10583 order of the two register loads can matter however, if the address
10584 of the memory location is actually held in one of the registers
10585 being overwritten by the load. */
10586 case 'Q':
10587 if (GET_CODE (x) != REG || REGNO (x) > LAST_ARM_REGNUM)
10589 output_operand_lossage ("invalid operand for code '%c'", code);
10590 return;
10593 asm_fprintf (stream, "%r", REGNO (x) + (WORDS_BIG_ENDIAN ? 1 : 0));
10594 return;
10596 case 'R':
10597 if (GET_CODE (x) != REG || REGNO (x) > LAST_ARM_REGNUM)
10599 output_operand_lossage ("invalid operand for code '%c'", code);
10600 return;
10603 asm_fprintf (stream, "%r", REGNO (x) + (WORDS_BIG_ENDIAN ? 0 : 1));
10604 return;
10606 case 'H':
10607 if (GET_CODE (x) != REG || REGNO (x) > LAST_ARM_REGNUM)
10609 output_operand_lossage ("invalid operand for code '%c'", code);
10610 return;
10613 asm_fprintf (stream, "%r", REGNO (x) + 1);
10614 return;
10616 case 'm':
10617 asm_fprintf (stream, "%r",
10618 GET_CODE (XEXP (x, 0)) == REG
10619 ? REGNO (XEXP (x, 0)) : REGNO (XEXP (XEXP (x, 0), 0)));
10620 return;
10622 case 'M':
10623 asm_fprintf (stream, "{%r-%r}",
10624 REGNO (x),
10625 REGNO (x) + ARM_NUM_REGS (GET_MODE (x)) - 1);
10626 return;
10628 case 'd':
10629 /* CONST_TRUE_RTX means always -- that's the default. */
10630 if (x == const_true_rtx)
10631 return;
10633 if (!COMPARISON_P (x))
10635 output_operand_lossage ("invalid operand for code '%c'", code);
10636 return;
10639 fputs (arm_condition_codes[get_arm_condition_code (x)],
10640 stream);
10641 return;
10643 case 'D':
10644 /* CONST_TRUE_RTX means not always -- i.e. never. We shouldn't ever
10645 want to do that. */
10646 if (x == const_true_rtx)
10648 output_operand_lossage ("instruction never exectued");
10649 return;
10651 if (!COMPARISON_P (x))
10653 output_operand_lossage ("invalid operand for code '%c'", code);
10654 return;
10657 fputs (arm_condition_codes[ARM_INVERSE_CONDITION_CODE
10658 (get_arm_condition_code (x))],
10659 stream);
10660 return;
10662 /* Cirrus registers can be accessed in a variety of ways:
10663 single floating point (f)
10664 double floating point (d)
10665 32bit integer (fx)
10666 64bit integer (dx). */
10667 case 'W': /* Cirrus register in F mode. */
10668 case 'X': /* Cirrus register in D mode. */
10669 case 'Y': /* Cirrus register in FX mode. */
10670 case 'Z': /* Cirrus register in DX mode. */
10671 gcc_assert (GET_CODE (x) == REG
10672 && REGNO_REG_CLASS (REGNO (x)) == CIRRUS_REGS);
10674 fprintf (stream, "mv%s%s",
10675 code == 'W' ? "f"
10676 : code == 'X' ? "d"
10677 : code == 'Y' ? "fx" : "dx", reg_names[REGNO (x)] + 2);
10679 return;
10681 /* Print cirrus register in the mode specified by the register's mode. */
10682 case 'V':
10684 int mode = GET_MODE (x);
10686 if (GET_CODE (x) != REG || REGNO_REG_CLASS (REGNO (x)) != CIRRUS_REGS)
10688 output_operand_lossage ("invalid operand for code '%c'", code);
10689 return;
10692 fprintf (stream, "mv%s%s",
10693 mode == DFmode ? "d"
10694 : mode == SImode ? "fx"
10695 : mode == DImode ? "dx"
10696 : "f", reg_names[REGNO (x)] + 2);
10698 return;
10701 case 'U':
10702 if (GET_CODE (x) != REG
10703 || REGNO (x) < FIRST_IWMMXT_GR_REGNUM
10704 || REGNO (x) > LAST_IWMMXT_GR_REGNUM)
10705 /* Bad value for wCG register number. */
10707 output_operand_lossage ("invalid operand for code '%c'", code);
10708 return;
10711 else
10712 fprintf (stream, "%d", REGNO (x) - FIRST_IWMMXT_GR_REGNUM);
10713 return;
10715 /* Print an iWMMXt control register name. */
10716 case 'w':
10717 if (GET_CODE (x) != CONST_INT
10718 || INTVAL (x) < 0
10719 || INTVAL (x) >= 16)
10720 /* Bad value for wC register number. */
10722 output_operand_lossage ("invalid operand for code '%c'", code);
10723 return;
10726 else
10728 static const char * wc_reg_names [16] =
10730 "wCID", "wCon", "wCSSF", "wCASF",
10731 "wC4", "wC5", "wC6", "wC7",
10732 "wCGR0", "wCGR1", "wCGR2", "wCGR3",
10733 "wC12", "wC13", "wC14", "wC15"
10736 fprintf (stream, wc_reg_names [INTVAL (x)]);
10738 return;
10740 /* Print a VFP double precision register name. */
10741 case 'P':
10743 int mode = GET_MODE (x);
10744 int num;
10746 if (mode != DImode && mode != DFmode)
10748 output_operand_lossage ("invalid operand for code '%c'", code);
10749 return;
10752 if (GET_CODE (x) != REG
10753 || !IS_VFP_REGNUM (REGNO (x)))
10755 output_operand_lossage ("invalid operand for code '%c'", code);
10756 return;
10759 num = REGNO(x) - FIRST_VFP_REGNUM;
10760 if (num & 1)
10762 output_operand_lossage ("invalid operand for code '%c'", code);
10763 return;
10766 fprintf (stream, "d%d", num >> 1);
10768 return;
10770 default:
10771 if (x == 0)
10773 output_operand_lossage ("missing operand");
10774 return;
10777 switch (GET_CODE (x))
10779 case REG:
10780 asm_fprintf (stream, "%r", REGNO (x));
10781 break;
10783 case MEM:
10784 output_memory_reference_mode = GET_MODE (x);
10785 output_address (XEXP (x, 0));
10786 break;
10788 case CONST_DOUBLE:
10789 fprintf (stream, "#%s", fp_immediate_constant (x));
10790 break;
10792 default:
10793 gcc_assert (GET_CODE (x) != NEG);
10794 fputc ('#', stream);
10795 output_addr_const (stream, x);
10796 break;
10801 #ifndef AOF_ASSEMBLER
10802 /* Target hook for assembling integer objects. The ARM version needs to
10803 handle word-sized values specially. */
10804 static bool
10805 arm_assemble_integer (rtx x, unsigned int size, int aligned_p)
10807 if (size == UNITS_PER_WORD && aligned_p)
10809 fputs ("\t.word\t", asm_out_file);
10810 output_addr_const (asm_out_file, x);
10812 /* Mark symbols as position independent. We only do this in the
10813 .text segment, not in the .data segment. */
10814 if (NEED_GOT_RELOC && flag_pic && making_const_table &&
10815 (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF))
10817 if (GET_CODE (x) == SYMBOL_REF
10818 && (CONSTANT_POOL_ADDRESS_P (x)
10819 || SYMBOL_REF_LOCAL_P (x)))
10820 fputs ("(GOTOFF)", asm_out_file);
10821 else if (GET_CODE (x) == LABEL_REF)
10822 fputs ("(GOTOFF)", asm_out_file);
10823 else
10824 fputs ("(GOT)", asm_out_file);
10826 fputc ('\n', asm_out_file);
10827 return true;
10830 if (arm_vector_mode_supported_p (GET_MODE (x)))
10832 int i, units;
10834 gcc_assert (GET_CODE (x) == CONST_VECTOR);
10836 units = CONST_VECTOR_NUNITS (x);
10838 switch (GET_MODE (x))
10840 case V2SImode: size = 4; break;
10841 case V4HImode: size = 2; break;
10842 case V8QImode: size = 1; break;
10843 default:
10844 gcc_unreachable ();
10847 for (i = 0; i < units; i++)
10849 rtx elt;
10851 elt = CONST_VECTOR_ELT (x, i);
10852 assemble_integer
10853 (elt, size, i == 0 ? BIGGEST_ALIGNMENT : size * BITS_PER_UNIT, 1);
10856 return true;
10859 return default_assemble_integer (x, size, aligned_p);
10863 /* Add a function to the list of static constructors. */
10865 static void
10866 arm_elf_asm_constructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
10868 if (!TARGET_AAPCS_BASED)
10870 default_named_section_asm_out_constructor (symbol, priority);
10871 return;
10874 /* Put these in the .init_array section, using a special relocation. */
10875 ctors_section ();
10876 assemble_align (POINTER_SIZE);
10877 fputs ("\t.word\t", asm_out_file);
10878 output_addr_const (asm_out_file, symbol);
10879 fputs ("(target1)\n", asm_out_file);
10881 #endif
10883 /* A finite state machine takes care of noticing whether or not instructions
10884 can be conditionally executed, and thus decrease execution time and code
10885 size by deleting branch instructions. The fsm is controlled by
10886 final_prescan_insn, and controls the actions of ASM_OUTPUT_OPCODE. */
10888 /* The state of the fsm controlling condition codes are:
10889 0: normal, do nothing special
10890 1: make ASM_OUTPUT_OPCODE not output this instruction
10891 2: make ASM_OUTPUT_OPCODE not output this instruction
10892 3: make instructions conditional
10893 4: make instructions conditional
10895 State transitions (state->state by whom under condition):
10896 0 -> 1 final_prescan_insn if the `target' is a label
10897 0 -> 2 final_prescan_insn if the `target' is an unconditional branch
10898 1 -> 3 ASM_OUTPUT_OPCODE after not having output the conditional branch
10899 2 -> 4 ASM_OUTPUT_OPCODE after not having output the conditional branch
10900 3 -> 0 (*targetm.asm_out.internal_label) if the `target' label is reached
10901 (the target label has CODE_LABEL_NUMBER equal to arm_target_label).
10902 4 -> 0 final_prescan_insn if the `target' unconditional branch is reached
10903 (the target insn is arm_target_insn).
10905 If the jump clobbers the conditions then we use states 2 and 4.
10907 A similar thing can be done with conditional return insns.
10909 XXX In case the `target' is an unconditional branch, this conditionalising
10910 of the instructions always reduces code size, but not always execution
10911 time. But then, I want to reduce the code size to somewhere near what
10912 /bin/cc produces. */
10914 /* Returns the index of the ARM condition code string in
10915 `arm_condition_codes'. COMPARISON should be an rtx like
10916 `(eq (...) (...))'. */
10917 static enum arm_cond_code
10918 get_arm_condition_code (rtx comparison)
10920 enum machine_mode mode = GET_MODE (XEXP (comparison, 0));
10921 int code;
10922 enum rtx_code comp_code = GET_CODE (comparison);
10924 if (GET_MODE_CLASS (mode) != MODE_CC)
10925 mode = SELECT_CC_MODE (comp_code, XEXP (comparison, 0),
10926 XEXP (comparison, 1));
10928 switch (mode)
10930 case CC_DNEmode: code = ARM_NE; goto dominance;
10931 case CC_DEQmode: code = ARM_EQ; goto dominance;
10932 case CC_DGEmode: code = ARM_GE; goto dominance;
10933 case CC_DGTmode: code = ARM_GT; goto dominance;
10934 case CC_DLEmode: code = ARM_LE; goto dominance;
10935 case CC_DLTmode: code = ARM_LT; goto dominance;
10936 case CC_DGEUmode: code = ARM_CS; goto dominance;
10937 case CC_DGTUmode: code = ARM_HI; goto dominance;
10938 case CC_DLEUmode: code = ARM_LS; goto dominance;
10939 case CC_DLTUmode: code = ARM_CC;
10941 dominance:
10942 gcc_assert (comp_code == EQ || comp_code == NE);
10944 if (comp_code == EQ)
10945 return ARM_INVERSE_CONDITION_CODE (code);
10946 return code;
10948 case CC_NOOVmode:
10949 switch (comp_code)
10951 case NE: return ARM_NE;
10952 case EQ: return ARM_EQ;
10953 case GE: return ARM_PL;
10954 case LT: return ARM_MI;
10955 default: gcc_unreachable ();
10958 case CC_Zmode:
10959 switch (comp_code)
10961 case NE: return ARM_NE;
10962 case EQ: return ARM_EQ;
10963 default: gcc_unreachable ();
10966 case CC_Nmode:
10967 switch (comp_code)
10969 case NE: return ARM_MI;
10970 case EQ: return ARM_PL;
10971 default: gcc_unreachable ();
10974 case CCFPEmode:
10975 case CCFPmode:
10976 /* These encodings assume that AC=1 in the FPA system control
10977 byte. This allows us to handle all cases except UNEQ and
10978 LTGT. */
10979 switch (comp_code)
10981 case GE: return ARM_GE;
10982 case GT: return ARM_GT;
10983 case LE: return ARM_LS;
10984 case LT: return ARM_MI;
10985 case NE: return ARM_NE;
10986 case EQ: return ARM_EQ;
10987 case ORDERED: return ARM_VC;
10988 case UNORDERED: return ARM_VS;
10989 case UNLT: return ARM_LT;
10990 case UNLE: return ARM_LE;
10991 case UNGT: return ARM_HI;
10992 case UNGE: return ARM_PL;
10993 /* UNEQ and LTGT do not have a representation. */
10994 case UNEQ: /* Fall through. */
10995 case LTGT: /* Fall through. */
10996 default: gcc_unreachable ();
10999 case CC_SWPmode:
11000 switch (comp_code)
11002 case NE: return ARM_NE;
11003 case EQ: return ARM_EQ;
11004 case GE: return ARM_LE;
11005 case GT: return ARM_LT;
11006 case LE: return ARM_GE;
11007 case LT: return ARM_GT;
11008 case GEU: return ARM_LS;
11009 case GTU: return ARM_CC;
11010 case LEU: return ARM_CS;
11011 case LTU: return ARM_HI;
11012 default: gcc_unreachable ();
11015 case CC_Cmode:
11016 switch (comp_code)
11018 case LTU: return ARM_CS;
11019 case GEU: return ARM_CC;
11020 default: gcc_unreachable ();
11023 case CCmode:
11024 switch (comp_code)
11026 case NE: return ARM_NE;
11027 case EQ: return ARM_EQ;
11028 case GE: return ARM_GE;
11029 case GT: return ARM_GT;
11030 case LE: return ARM_LE;
11031 case LT: return ARM_LT;
11032 case GEU: return ARM_CS;
11033 case GTU: return ARM_HI;
11034 case LEU: return ARM_LS;
11035 case LTU: return ARM_CC;
11036 default: gcc_unreachable ();
11039 default: gcc_unreachable ();
11043 void
11044 arm_final_prescan_insn (rtx insn)
11046 /* BODY will hold the body of INSN. */
11047 rtx body = PATTERN (insn);
11049 /* This will be 1 if trying to repeat the trick, and things need to be
11050 reversed if it appears to fail. */
11051 int reverse = 0;
11053 /* JUMP_CLOBBERS will be one implies that the conditions if a branch is
11054 taken are clobbered, even if the rtl suggests otherwise. It also
11055 means that we have to grub around within the jump expression to find
11056 out what the conditions are when the jump isn't taken. */
11057 int jump_clobbers = 0;
11059 /* If we start with a return insn, we only succeed if we find another one. */
11060 int seeking_return = 0;
11062 /* START_INSN will hold the insn from where we start looking. This is the
11063 first insn after the following code_label if REVERSE is true. */
11064 rtx start_insn = insn;
11066 /* If in state 4, check if the target branch is reached, in order to
11067 change back to state 0. */
11068 if (arm_ccfsm_state == 4)
11070 if (insn == arm_target_insn)
11072 arm_target_insn = NULL;
11073 arm_ccfsm_state = 0;
11075 return;
11078 /* If in state 3, it is possible to repeat the trick, if this insn is an
11079 unconditional branch to a label, and immediately following this branch
11080 is the previous target label which is only used once, and the label this
11081 branch jumps to is not too far off. */
11082 if (arm_ccfsm_state == 3)
11084 if (simplejump_p (insn))
11086 start_insn = next_nonnote_insn (start_insn);
11087 if (GET_CODE (start_insn) == BARRIER)
11089 /* XXX Isn't this always a barrier? */
11090 start_insn = next_nonnote_insn (start_insn);
11092 if (GET_CODE (start_insn) == CODE_LABEL
11093 && CODE_LABEL_NUMBER (start_insn) == arm_target_label
11094 && LABEL_NUSES (start_insn) == 1)
11095 reverse = TRUE;
11096 else
11097 return;
11099 else if (GET_CODE (body) == RETURN)
11101 start_insn = next_nonnote_insn (start_insn);
11102 if (GET_CODE (start_insn) == BARRIER)
11103 start_insn = next_nonnote_insn (start_insn);
11104 if (GET_CODE (start_insn) == CODE_LABEL
11105 && CODE_LABEL_NUMBER (start_insn) == arm_target_label
11106 && LABEL_NUSES (start_insn) == 1)
11108 reverse = TRUE;
11109 seeking_return = 1;
11111 else
11112 return;
11114 else
11115 return;
11118 gcc_assert (!arm_ccfsm_state || reverse);
11119 if (GET_CODE (insn) != JUMP_INSN)
11120 return;
11122 /* This jump might be paralleled with a clobber of the condition codes
11123 the jump should always come first */
11124 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) > 0)
11125 body = XVECEXP (body, 0, 0);
11127 if (reverse
11128 || (GET_CODE (body) == SET && GET_CODE (SET_DEST (body)) == PC
11129 && GET_CODE (SET_SRC (body)) == IF_THEN_ELSE))
11131 int insns_skipped;
11132 int fail = FALSE, succeed = FALSE;
11133 /* Flag which part of the IF_THEN_ELSE is the LABEL_REF. */
11134 int then_not_else = TRUE;
11135 rtx this_insn = start_insn, label = 0;
11137 /* If the jump cannot be done with one instruction, we cannot
11138 conditionally execute the instruction in the inverse case. */
11139 if (get_attr_conds (insn) == CONDS_JUMP_CLOB)
11141 jump_clobbers = 1;
11142 return;
11145 /* Register the insn jumped to. */
11146 if (reverse)
11148 if (!seeking_return)
11149 label = XEXP (SET_SRC (body), 0);
11151 else if (GET_CODE (XEXP (SET_SRC (body), 1)) == LABEL_REF)
11152 label = XEXP (XEXP (SET_SRC (body), 1), 0);
11153 else if (GET_CODE (XEXP (SET_SRC (body), 2)) == LABEL_REF)
11155 label = XEXP (XEXP (SET_SRC (body), 2), 0);
11156 then_not_else = FALSE;
11158 else if (GET_CODE (XEXP (SET_SRC (body), 1)) == RETURN)
11159 seeking_return = 1;
11160 else if (GET_CODE (XEXP (SET_SRC (body), 2)) == RETURN)
11162 seeking_return = 1;
11163 then_not_else = FALSE;
11165 else
11166 gcc_unreachable ();
11168 /* See how many insns this branch skips, and what kind of insns. If all
11169 insns are okay, and the label or unconditional branch to the same
11170 label is not too far away, succeed. */
11171 for (insns_skipped = 0;
11172 !fail && !succeed && insns_skipped++ < max_insns_skipped;)
11174 rtx scanbody;
11176 this_insn = next_nonnote_insn (this_insn);
11177 if (!this_insn)
11178 break;
11180 switch (GET_CODE (this_insn))
11182 case CODE_LABEL:
11183 /* Succeed if it is the target label, otherwise fail since
11184 control falls in from somewhere else. */
11185 if (this_insn == label)
11187 if (jump_clobbers)
11189 arm_ccfsm_state = 2;
11190 this_insn = next_nonnote_insn (this_insn);
11192 else
11193 arm_ccfsm_state = 1;
11194 succeed = TRUE;
11196 else
11197 fail = TRUE;
11198 break;
11200 case BARRIER:
11201 /* Succeed if the following insn is the target label.
11202 Otherwise fail.
11203 If return insns are used then the last insn in a function
11204 will be a barrier. */
11205 this_insn = next_nonnote_insn (this_insn);
11206 if (this_insn && this_insn == label)
11208 if (jump_clobbers)
11210 arm_ccfsm_state = 2;
11211 this_insn = next_nonnote_insn (this_insn);
11213 else
11214 arm_ccfsm_state = 1;
11215 succeed = TRUE;
11217 else
11218 fail = TRUE;
11219 break;
11221 case CALL_INSN:
11222 /* The AAPCS says that conditional calls should not be
11223 used since they make interworking inefficient (the
11224 linker can't transform BL<cond> into BLX). That's
11225 only a problem if the machine has BLX. */
11226 if (arm_arch5)
11228 fail = TRUE;
11229 break;
11232 /* Succeed if the following insn is the target label, or
11233 if the following two insns are a barrier and the
11234 target label. */
11235 this_insn = next_nonnote_insn (this_insn);
11236 if (this_insn && GET_CODE (this_insn) == BARRIER)
11237 this_insn = next_nonnote_insn (this_insn);
11239 if (this_insn && this_insn == label
11240 && insns_skipped < max_insns_skipped)
11242 if (jump_clobbers)
11244 arm_ccfsm_state = 2;
11245 this_insn = next_nonnote_insn (this_insn);
11247 else
11248 arm_ccfsm_state = 1;
11249 succeed = TRUE;
11251 else
11252 fail = TRUE;
11253 break;
11255 case JUMP_INSN:
11256 /* If this is an unconditional branch to the same label, succeed.
11257 If it is to another label, do nothing. If it is conditional,
11258 fail. */
11259 /* XXX Probably, the tests for SET and the PC are
11260 unnecessary. */
11262 scanbody = PATTERN (this_insn);
11263 if (GET_CODE (scanbody) == SET
11264 && GET_CODE (SET_DEST (scanbody)) == PC)
11266 if (GET_CODE (SET_SRC (scanbody)) == LABEL_REF
11267 && XEXP (SET_SRC (scanbody), 0) == label && !reverse)
11269 arm_ccfsm_state = 2;
11270 succeed = TRUE;
11272 else if (GET_CODE (SET_SRC (scanbody)) == IF_THEN_ELSE)
11273 fail = TRUE;
11275 /* Fail if a conditional return is undesirable (e.g. on a
11276 StrongARM), but still allow this if optimizing for size. */
11277 else if (GET_CODE (scanbody) == RETURN
11278 && !use_return_insn (TRUE, NULL)
11279 && !optimize_size)
11280 fail = TRUE;
11281 else if (GET_CODE (scanbody) == RETURN
11282 && seeking_return)
11284 arm_ccfsm_state = 2;
11285 succeed = TRUE;
11287 else if (GET_CODE (scanbody) == PARALLEL)
11289 switch (get_attr_conds (this_insn))
11291 case CONDS_NOCOND:
11292 break;
11293 default:
11294 fail = TRUE;
11295 break;
11298 else
11299 fail = TRUE; /* Unrecognized jump (e.g. epilogue). */
11301 break;
11303 case INSN:
11304 /* Instructions using or affecting the condition codes make it
11305 fail. */
11306 scanbody = PATTERN (this_insn);
11307 if (!(GET_CODE (scanbody) == SET
11308 || GET_CODE (scanbody) == PARALLEL)
11309 || get_attr_conds (this_insn) != CONDS_NOCOND)
11310 fail = TRUE;
11312 /* A conditional cirrus instruction must be followed by
11313 a non Cirrus instruction. However, since we
11314 conditionalize instructions in this function and by
11315 the time we get here we can't add instructions
11316 (nops), because shorten_branches() has already been
11317 called, we will disable conditionalizing Cirrus
11318 instructions to be safe. */
11319 if (GET_CODE (scanbody) != USE
11320 && GET_CODE (scanbody) != CLOBBER
11321 && get_attr_cirrus (this_insn) != CIRRUS_NOT)
11322 fail = TRUE;
11323 break;
11325 default:
11326 break;
11329 if (succeed)
11331 if ((!seeking_return) && (arm_ccfsm_state == 1 || reverse))
11332 arm_target_label = CODE_LABEL_NUMBER (label);
11333 else
11335 gcc_assert (seeking_return || arm_ccfsm_state == 2);
11337 while (this_insn && GET_CODE (PATTERN (this_insn)) == USE)
11339 this_insn = next_nonnote_insn (this_insn);
11340 gcc_assert (!this_insn
11341 || (GET_CODE (this_insn) != BARRIER
11342 && GET_CODE (this_insn) != CODE_LABEL));
11344 if (!this_insn)
11346 /* Oh, dear! we ran off the end.. give up. */
11347 recog (PATTERN (insn), insn, NULL);
11348 arm_ccfsm_state = 0;
11349 arm_target_insn = NULL;
11350 return;
11352 arm_target_insn = this_insn;
11354 if (jump_clobbers)
11356 gcc_assert (!reverse);
11357 arm_current_cc =
11358 get_arm_condition_code (XEXP (XEXP (XEXP (SET_SRC (body),
11359 0), 0), 1));
11360 if (GET_CODE (XEXP (XEXP (SET_SRC (body), 0), 0)) == AND)
11361 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
11362 if (GET_CODE (XEXP (SET_SRC (body), 0)) == NE)
11363 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
11365 else
11367 /* If REVERSE is true, ARM_CURRENT_CC needs to be inverted from
11368 what it was. */
11369 if (!reverse)
11370 arm_current_cc = get_arm_condition_code (XEXP (SET_SRC (body),
11371 0));
11374 if (reverse || then_not_else)
11375 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
11378 /* Restore recog_data (getting the attributes of other insns can
11379 destroy this array, but final.c assumes that it remains intact
11380 across this call; since the insn has been recognized already we
11381 call recog direct). */
11382 recog (PATTERN (insn), insn, NULL);
11386 /* Returns true if REGNO is a valid register
11387 for holding a quantity of type MODE. */
11389 arm_hard_regno_mode_ok (unsigned int regno, enum machine_mode mode)
11391 if (GET_MODE_CLASS (mode) == MODE_CC)
11392 return regno == CC_REGNUM || regno == VFPCC_REGNUM;
11394 if (TARGET_THUMB)
11395 /* For the Thumb we only allow values bigger than SImode in
11396 registers 0 - 6, so that there is always a second low
11397 register available to hold the upper part of the value.
11398 We probably we ought to ensure that the register is the
11399 start of an even numbered register pair. */
11400 return (ARM_NUM_REGS (mode) < 2) || (regno < LAST_LO_REGNUM);
11402 if (IS_CIRRUS_REGNUM (regno))
11403 /* We have outlawed SI values in Cirrus registers because they
11404 reside in the lower 32 bits, but SF values reside in the
11405 upper 32 bits. This causes gcc all sorts of grief. We can't
11406 even split the registers into pairs because Cirrus SI values
11407 get sign extended to 64bits-- aldyh. */
11408 return (GET_MODE_CLASS (mode) == MODE_FLOAT) || (mode == DImode);
11410 if (IS_VFP_REGNUM (regno))
11412 if (mode == SFmode || mode == SImode)
11413 return TRUE;
11415 /* DFmode values are only valid in even register pairs. */
11416 if (mode == DFmode)
11417 return ((regno - FIRST_VFP_REGNUM) & 1) == 0;
11418 return FALSE;
11421 if (IS_IWMMXT_GR_REGNUM (regno))
11422 return mode == SImode;
11424 if (IS_IWMMXT_REGNUM (regno))
11425 return VALID_IWMMXT_REG_MODE (mode);
11427 /* We allow any value to be stored in the general registers.
11428 Restrict doubleword quantities to even register pairs so that we can
11429 use ldrd. */
11430 if (regno <= LAST_ARM_REGNUM)
11431 return !(TARGET_LDRD && GET_MODE_SIZE (mode) > 4 && (regno & 1) != 0);
11433 if ( regno == FRAME_POINTER_REGNUM
11434 || regno == ARG_POINTER_REGNUM)
11435 /* We only allow integers in the fake hard registers. */
11436 return GET_MODE_CLASS (mode) == MODE_INT;
11438 /* The only registers left are the FPA registers
11439 which we only allow to hold FP values. */
11440 return GET_MODE_CLASS (mode) == MODE_FLOAT
11441 && regno >= FIRST_FPA_REGNUM
11442 && regno <= LAST_FPA_REGNUM;
11446 arm_regno_class (int regno)
11448 if (TARGET_THUMB)
11450 if (regno == STACK_POINTER_REGNUM)
11451 return STACK_REG;
11452 if (regno == CC_REGNUM)
11453 return CC_REG;
11454 if (regno < 8)
11455 return LO_REGS;
11456 return HI_REGS;
11459 if ( regno <= LAST_ARM_REGNUM
11460 || regno == FRAME_POINTER_REGNUM
11461 || regno == ARG_POINTER_REGNUM)
11462 return GENERAL_REGS;
11464 if (regno == CC_REGNUM || regno == VFPCC_REGNUM)
11465 return NO_REGS;
11467 if (IS_CIRRUS_REGNUM (regno))
11468 return CIRRUS_REGS;
11470 if (IS_VFP_REGNUM (regno))
11471 return VFP_REGS;
11473 if (IS_IWMMXT_REGNUM (regno))
11474 return IWMMXT_REGS;
11476 if (IS_IWMMXT_GR_REGNUM (regno))
11477 return IWMMXT_GR_REGS;
11479 return FPA_REGS;
11482 /* Handle a special case when computing the offset
11483 of an argument from the frame pointer. */
11485 arm_debugger_arg_offset (int value, rtx addr)
11487 rtx insn;
11489 /* We are only interested if dbxout_parms() failed to compute the offset. */
11490 if (value != 0)
11491 return 0;
11493 /* We can only cope with the case where the address is held in a register. */
11494 if (GET_CODE (addr) != REG)
11495 return 0;
11497 /* If we are using the frame pointer to point at the argument, then
11498 an offset of 0 is correct. */
11499 if (REGNO (addr) == (unsigned) HARD_FRAME_POINTER_REGNUM)
11500 return 0;
11502 /* If we are using the stack pointer to point at the
11503 argument, then an offset of 0 is correct. */
11504 if ((TARGET_THUMB || !frame_pointer_needed)
11505 && REGNO (addr) == SP_REGNUM)
11506 return 0;
11508 /* Oh dear. The argument is pointed to by a register rather
11509 than being held in a register, or being stored at a known
11510 offset from the frame pointer. Since GDB only understands
11511 those two kinds of argument we must translate the address
11512 held in the register into an offset from the frame pointer.
11513 We do this by searching through the insns for the function
11514 looking to see where this register gets its value. If the
11515 register is initialized from the frame pointer plus an offset
11516 then we are in luck and we can continue, otherwise we give up.
11518 This code is exercised by producing debugging information
11519 for a function with arguments like this:
11521 double func (double a, double b, int c, double d) {return d;}
11523 Without this code the stab for parameter 'd' will be set to
11524 an offset of 0 from the frame pointer, rather than 8. */
11526 /* The if() statement says:
11528 If the insn is a normal instruction
11529 and if the insn is setting the value in a register
11530 and if the register being set is the register holding the address of the argument
11531 and if the address is computing by an addition
11532 that involves adding to a register
11533 which is the frame pointer
11534 a constant integer
11536 then... */
11538 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
11540 if ( GET_CODE (insn) == INSN
11541 && GET_CODE (PATTERN (insn)) == SET
11542 && REGNO (XEXP (PATTERN (insn), 0)) == REGNO (addr)
11543 && GET_CODE (XEXP (PATTERN (insn), 1)) == PLUS
11544 && GET_CODE (XEXP (XEXP (PATTERN (insn), 1), 0)) == REG
11545 && REGNO (XEXP (XEXP (PATTERN (insn), 1), 0)) == (unsigned) HARD_FRAME_POINTER_REGNUM
11546 && GET_CODE (XEXP (XEXP (PATTERN (insn), 1), 1)) == CONST_INT
11549 value = INTVAL (XEXP (XEXP (PATTERN (insn), 1), 1));
11551 break;
11555 if (value == 0)
11557 debug_rtx (addr);
11558 warning (0, "unable to compute real location of stacked parameter");
11559 value = 8; /* XXX magic hack */
11562 return value;
11565 #define def_mbuiltin(MASK, NAME, TYPE, CODE) \
11566 do \
11568 if ((MASK) & insn_flags) \
11569 lang_hooks.builtin_function ((NAME), (TYPE), (CODE), \
11570 BUILT_IN_MD, NULL, NULL_TREE); \
11572 while (0)
11574 struct builtin_description
11576 const unsigned int mask;
11577 const enum insn_code icode;
11578 const char * const name;
11579 const enum arm_builtins code;
11580 const enum rtx_code comparison;
11581 const unsigned int flag;
11584 static const struct builtin_description bdesc_2arg[] =
11586 #define IWMMXT_BUILTIN(code, string, builtin) \
11587 { FL_IWMMXT, CODE_FOR_##code, "__builtin_arm_" string, \
11588 ARM_BUILTIN_##builtin, 0, 0 },
11590 IWMMXT_BUILTIN (addv8qi3, "waddb", WADDB)
11591 IWMMXT_BUILTIN (addv4hi3, "waddh", WADDH)
11592 IWMMXT_BUILTIN (addv2si3, "waddw", WADDW)
11593 IWMMXT_BUILTIN (subv8qi3, "wsubb", WSUBB)
11594 IWMMXT_BUILTIN (subv4hi3, "wsubh", WSUBH)
11595 IWMMXT_BUILTIN (subv2si3, "wsubw", WSUBW)
11596 IWMMXT_BUILTIN (ssaddv8qi3, "waddbss", WADDSSB)
11597 IWMMXT_BUILTIN (ssaddv4hi3, "waddhss", WADDSSH)
11598 IWMMXT_BUILTIN (ssaddv2si3, "waddwss", WADDSSW)
11599 IWMMXT_BUILTIN (sssubv8qi3, "wsubbss", WSUBSSB)
11600 IWMMXT_BUILTIN (sssubv4hi3, "wsubhss", WSUBSSH)
11601 IWMMXT_BUILTIN (sssubv2si3, "wsubwss", WSUBSSW)
11602 IWMMXT_BUILTIN (usaddv8qi3, "waddbus", WADDUSB)
11603 IWMMXT_BUILTIN (usaddv4hi3, "waddhus", WADDUSH)
11604 IWMMXT_BUILTIN (usaddv2si3, "waddwus", WADDUSW)
11605 IWMMXT_BUILTIN (ussubv8qi3, "wsubbus", WSUBUSB)
11606 IWMMXT_BUILTIN (ussubv4hi3, "wsubhus", WSUBUSH)
11607 IWMMXT_BUILTIN (ussubv2si3, "wsubwus", WSUBUSW)
11608 IWMMXT_BUILTIN (mulv4hi3, "wmulul", WMULUL)
11609 IWMMXT_BUILTIN (smulv4hi3_highpart, "wmulsm", WMULSM)
11610 IWMMXT_BUILTIN (umulv4hi3_highpart, "wmulum", WMULUM)
11611 IWMMXT_BUILTIN (eqv8qi3, "wcmpeqb", WCMPEQB)
11612 IWMMXT_BUILTIN (eqv4hi3, "wcmpeqh", WCMPEQH)
11613 IWMMXT_BUILTIN (eqv2si3, "wcmpeqw", WCMPEQW)
11614 IWMMXT_BUILTIN (gtuv8qi3, "wcmpgtub", WCMPGTUB)
11615 IWMMXT_BUILTIN (gtuv4hi3, "wcmpgtuh", WCMPGTUH)
11616 IWMMXT_BUILTIN (gtuv2si3, "wcmpgtuw", WCMPGTUW)
11617 IWMMXT_BUILTIN (gtv8qi3, "wcmpgtsb", WCMPGTSB)
11618 IWMMXT_BUILTIN (gtv4hi3, "wcmpgtsh", WCMPGTSH)
11619 IWMMXT_BUILTIN (gtv2si3, "wcmpgtsw", WCMPGTSW)
11620 IWMMXT_BUILTIN (umaxv8qi3, "wmaxub", WMAXUB)
11621 IWMMXT_BUILTIN (smaxv8qi3, "wmaxsb", WMAXSB)
11622 IWMMXT_BUILTIN (umaxv4hi3, "wmaxuh", WMAXUH)
11623 IWMMXT_BUILTIN (smaxv4hi3, "wmaxsh", WMAXSH)
11624 IWMMXT_BUILTIN (umaxv2si3, "wmaxuw", WMAXUW)
11625 IWMMXT_BUILTIN (smaxv2si3, "wmaxsw", WMAXSW)
11626 IWMMXT_BUILTIN (uminv8qi3, "wminub", WMINUB)
11627 IWMMXT_BUILTIN (sminv8qi3, "wminsb", WMINSB)
11628 IWMMXT_BUILTIN (uminv4hi3, "wminuh", WMINUH)
11629 IWMMXT_BUILTIN (sminv4hi3, "wminsh", WMINSH)
11630 IWMMXT_BUILTIN (uminv2si3, "wminuw", WMINUW)
11631 IWMMXT_BUILTIN (sminv2si3, "wminsw", WMINSW)
11632 IWMMXT_BUILTIN (iwmmxt_anddi3, "wand", WAND)
11633 IWMMXT_BUILTIN (iwmmxt_nanddi3, "wandn", WANDN)
11634 IWMMXT_BUILTIN (iwmmxt_iordi3, "wor", WOR)
11635 IWMMXT_BUILTIN (iwmmxt_xordi3, "wxor", WXOR)
11636 IWMMXT_BUILTIN (iwmmxt_uavgv8qi3, "wavg2b", WAVG2B)
11637 IWMMXT_BUILTIN (iwmmxt_uavgv4hi3, "wavg2h", WAVG2H)
11638 IWMMXT_BUILTIN (iwmmxt_uavgrndv8qi3, "wavg2br", WAVG2BR)
11639 IWMMXT_BUILTIN (iwmmxt_uavgrndv4hi3, "wavg2hr", WAVG2HR)
11640 IWMMXT_BUILTIN (iwmmxt_wunpckilb, "wunpckilb", WUNPCKILB)
11641 IWMMXT_BUILTIN (iwmmxt_wunpckilh, "wunpckilh", WUNPCKILH)
11642 IWMMXT_BUILTIN (iwmmxt_wunpckilw, "wunpckilw", WUNPCKILW)
11643 IWMMXT_BUILTIN (iwmmxt_wunpckihb, "wunpckihb", WUNPCKIHB)
11644 IWMMXT_BUILTIN (iwmmxt_wunpckihh, "wunpckihh", WUNPCKIHH)
11645 IWMMXT_BUILTIN (iwmmxt_wunpckihw, "wunpckihw", WUNPCKIHW)
11646 IWMMXT_BUILTIN (iwmmxt_wmadds, "wmadds", WMADDS)
11647 IWMMXT_BUILTIN (iwmmxt_wmaddu, "wmaddu", WMADDU)
11649 #define IWMMXT_BUILTIN2(code, builtin) \
11650 { FL_IWMMXT, CODE_FOR_##code, NULL, ARM_BUILTIN_##builtin, 0, 0 },
11652 IWMMXT_BUILTIN2 (iwmmxt_wpackhss, WPACKHSS)
11653 IWMMXT_BUILTIN2 (iwmmxt_wpackwss, WPACKWSS)
11654 IWMMXT_BUILTIN2 (iwmmxt_wpackdss, WPACKDSS)
11655 IWMMXT_BUILTIN2 (iwmmxt_wpackhus, WPACKHUS)
11656 IWMMXT_BUILTIN2 (iwmmxt_wpackwus, WPACKWUS)
11657 IWMMXT_BUILTIN2 (iwmmxt_wpackdus, WPACKDUS)
11658 IWMMXT_BUILTIN2 (ashlv4hi3_di, WSLLH)
11659 IWMMXT_BUILTIN2 (ashlv4hi3, WSLLHI)
11660 IWMMXT_BUILTIN2 (ashlv2si3_di, WSLLW)
11661 IWMMXT_BUILTIN2 (ashlv2si3, WSLLWI)
11662 IWMMXT_BUILTIN2 (ashldi3_di, WSLLD)
11663 IWMMXT_BUILTIN2 (ashldi3_iwmmxt, WSLLDI)
11664 IWMMXT_BUILTIN2 (lshrv4hi3_di, WSRLH)
11665 IWMMXT_BUILTIN2 (lshrv4hi3, WSRLHI)
11666 IWMMXT_BUILTIN2 (lshrv2si3_di, WSRLW)
11667 IWMMXT_BUILTIN2 (lshrv2si3, WSRLWI)
11668 IWMMXT_BUILTIN2 (lshrdi3_di, WSRLD)
11669 IWMMXT_BUILTIN2 (lshrdi3_iwmmxt, WSRLDI)
11670 IWMMXT_BUILTIN2 (ashrv4hi3_di, WSRAH)
11671 IWMMXT_BUILTIN2 (ashrv4hi3, WSRAHI)
11672 IWMMXT_BUILTIN2 (ashrv2si3_di, WSRAW)
11673 IWMMXT_BUILTIN2 (ashrv2si3, WSRAWI)
11674 IWMMXT_BUILTIN2 (ashrdi3_di, WSRAD)
11675 IWMMXT_BUILTIN2 (ashrdi3_iwmmxt, WSRADI)
11676 IWMMXT_BUILTIN2 (rorv4hi3_di, WRORH)
11677 IWMMXT_BUILTIN2 (rorv4hi3, WRORHI)
11678 IWMMXT_BUILTIN2 (rorv2si3_di, WRORW)
11679 IWMMXT_BUILTIN2 (rorv2si3, WRORWI)
11680 IWMMXT_BUILTIN2 (rordi3_di, WRORD)
11681 IWMMXT_BUILTIN2 (rordi3, WRORDI)
11682 IWMMXT_BUILTIN2 (iwmmxt_wmacuz, WMACUZ)
11683 IWMMXT_BUILTIN2 (iwmmxt_wmacsz, WMACSZ)
11686 static const struct builtin_description bdesc_1arg[] =
11688 IWMMXT_BUILTIN (iwmmxt_tmovmskb, "tmovmskb", TMOVMSKB)
11689 IWMMXT_BUILTIN (iwmmxt_tmovmskh, "tmovmskh", TMOVMSKH)
11690 IWMMXT_BUILTIN (iwmmxt_tmovmskw, "tmovmskw", TMOVMSKW)
11691 IWMMXT_BUILTIN (iwmmxt_waccb, "waccb", WACCB)
11692 IWMMXT_BUILTIN (iwmmxt_wacch, "wacch", WACCH)
11693 IWMMXT_BUILTIN (iwmmxt_waccw, "waccw", WACCW)
11694 IWMMXT_BUILTIN (iwmmxt_wunpckehub, "wunpckehub", WUNPCKEHUB)
11695 IWMMXT_BUILTIN (iwmmxt_wunpckehuh, "wunpckehuh", WUNPCKEHUH)
11696 IWMMXT_BUILTIN (iwmmxt_wunpckehuw, "wunpckehuw", WUNPCKEHUW)
11697 IWMMXT_BUILTIN (iwmmxt_wunpckehsb, "wunpckehsb", WUNPCKEHSB)
11698 IWMMXT_BUILTIN (iwmmxt_wunpckehsh, "wunpckehsh", WUNPCKEHSH)
11699 IWMMXT_BUILTIN (iwmmxt_wunpckehsw, "wunpckehsw", WUNPCKEHSW)
11700 IWMMXT_BUILTIN (iwmmxt_wunpckelub, "wunpckelub", WUNPCKELUB)
11701 IWMMXT_BUILTIN (iwmmxt_wunpckeluh, "wunpckeluh", WUNPCKELUH)
11702 IWMMXT_BUILTIN (iwmmxt_wunpckeluw, "wunpckeluw", WUNPCKELUW)
11703 IWMMXT_BUILTIN (iwmmxt_wunpckelsb, "wunpckelsb", WUNPCKELSB)
11704 IWMMXT_BUILTIN (iwmmxt_wunpckelsh, "wunpckelsh", WUNPCKELSH)
11705 IWMMXT_BUILTIN (iwmmxt_wunpckelsw, "wunpckelsw", WUNPCKELSW)
11708 /* Set up all the iWMMXt builtins. This is
11709 not called if TARGET_IWMMXT is zero. */
11711 static void
11712 arm_init_iwmmxt_builtins (void)
11714 const struct builtin_description * d;
11715 size_t i;
11716 tree endlink = void_list_node;
11718 tree V2SI_type_node = build_vector_type_for_mode (intSI_type_node, V2SImode);
11719 tree V4HI_type_node = build_vector_type_for_mode (intHI_type_node, V4HImode);
11720 tree V8QI_type_node = build_vector_type_for_mode (intQI_type_node, V8QImode);
11722 tree int_ftype_int
11723 = build_function_type (integer_type_node,
11724 tree_cons (NULL_TREE, integer_type_node, endlink));
11725 tree v8qi_ftype_v8qi_v8qi_int
11726 = build_function_type (V8QI_type_node,
11727 tree_cons (NULL_TREE, V8QI_type_node,
11728 tree_cons (NULL_TREE, V8QI_type_node,
11729 tree_cons (NULL_TREE,
11730 integer_type_node,
11731 endlink))));
11732 tree v4hi_ftype_v4hi_int
11733 = build_function_type (V4HI_type_node,
11734 tree_cons (NULL_TREE, V4HI_type_node,
11735 tree_cons (NULL_TREE, integer_type_node,
11736 endlink)));
11737 tree v2si_ftype_v2si_int
11738 = build_function_type (V2SI_type_node,
11739 tree_cons (NULL_TREE, V2SI_type_node,
11740 tree_cons (NULL_TREE, integer_type_node,
11741 endlink)));
11742 tree v2si_ftype_di_di
11743 = build_function_type (V2SI_type_node,
11744 tree_cons (NULL_TREE, long_long_integer_type_node,
11745 tree_cons (NULL_TREE, long_long_integer_type_node,
11746 endlink)));
11747 tree di_ftype_di_int
11748 = build_function_type (long_long_integer_type_node,
11749 tree_cons (NULL_TREE, long_long_integer_type_node,
11750 tree_cons (NULL_TREE, integer_type_node,
11751 endlink)));
11752 tree di_ftype_di_int_int
11753 = build_function_type (long_long_integer_type_node,
11754 tree_cons (NULL_TREE, long_long_integer_type_node,
11755 tree_cons (NULL_TREE, integer_type_node,
11756 tree_cons (NULL_TREE,
11757 integer_type_node,
11758 endlink))));
11759 tree int_ftype_v8qi
11760 = build_function_type (integer_type_node,
11761 tree_cons (NULL_TREE, V8QI_type_node,
11762 endlink));
11763 tree int_ftype_v4hi
11764 = build_function_type (integer_type_node,
11765 tree_cons (NULL_TREE, V4HI_type_node,
11766 endlink));
11767 tree int_ftype_v2si
11768 = build_function_type (integer_type_node,
11769 tree_cons (NULL_TREE, V2SI_type_node,
11770 endlink));
11771 tree int_ftype_v8qi_int
11772 = build_function_type (integer_type_node,
11773 tree_cons (NULL_TREE, V8QI_type_node,
11774 tree_cons (NULL_TREE, integer_type_node,
11775 endlink)));
11776 tree int_ftype_v4hi_int
11777 = build_function_type (integer_type_node,
11778 tree_cons (NULL_TREE, V4HI_type_node,
11779 tree_cons (NULL_TREE, integer_type_node,
11780 endlink)));
11781 tree int_ftype_v2si_int
11782 = build_function_type (integer_type_node,
11783 tree_cons (NULL_TREE, V2SI_type_node,
11784 tree_cons (NULL_TREE, integer_type_node,
11785 endlink)));
11786 tree v8qi_ftype_v8qi_int_int
11787 = build_function_type (V8QI_type_node,
11788 tree_cons (NULL_TREE, V8QI_type_node,
11789 tree_cons (NULL_TREE, integer_type_node,
11790 tree_cons (NULL_TREE,
11791 integer_type_node,
11792 endlink))));
11793 tree v4hi_ftype_v4hi_int_int
11794 = build_function_type (V4HI_type_node,
11795 tree_cons (NULL_TREE, V4HI_type_node,
11796 tree_cons (NULL_TREE, integer_type_node,
11797 tree_cons (NULL_TREE,
11798 integer_type_node,
11799 endlink))));
11800 tree v2si_ftype_v2si_int_int
11801 = build_function_type (V2SI_type_node,
11802 tree_cons (NULL_TREE, V2SI_type_node,
11803 tree_cons (NULL_TREE, integer_type_node,
11804 tree_cons (NULL_TREE,
11805 integer_type_node,
11806 endlink))));
11807 /* Miscellaneous. */
11808 tree v8qi_ftype_v4hi_v4hi
11809 = build_function_type (V8QI_type_node,
11810 tree_cons (NULL_TREE, V4HI_type_node,
11811 tree_cons (NULL_TREE, V4HI_type_node,
11812 endlink)));
11813 tree v4hi_ftype_v2si_v2si
11814 = build_function_type (V4HI_type_node,
11815 tree_cons (NULL_TREE, V2SI_type_node,
11816 tree_cons (NULL_TREE, V2SI_type_node,
11817 endlink)));
11818 tree v2si_ftype_v4hi_v4hi
11819 = build_function_type (V2SI_type_node,
11820 tree_cons (NULL_TREE, V4HI_type_node,
11821 tree_cons (NULL_TREE, V4HI_type_node,
11822 endlink)));
11823 tree v2si_ftype_v8qi_v8qi
11824 = build_function_type (V2SI_type_node,
11825 tree_cons (NULL_TREE, V8QI_type_node,
11826 tree_cons (NULL_TREE, V8QI_type_node,
11827 endlink)));
11828 tree v4hi_ftype_v4hi_di
11829 = build_function_type (V4HI_type_node,
11830 tree_cons (NULL_TREE, V4HI_type_node,
11831 tree_cons (NULL_TREE,
11832 long_long_integer_type_node,
11833 endlink)));
11834 tree v2si_ftype_v2si_di
11835 = build_function_type (V2SI_type_node,
11836 tree_cons (NULL_TREE, V2SI_type_node,
11837 tree_cons (NULL_TREE,
11838 long_long_integer_type_node,
11839 endlink)));
11840 tree void_ftype_int_int
11841 = build_function_type (void_type_node,
11842 tree_cons (NULL_TREE, integer_type_node,
11843 tree_cons (NULL_TREE, integer_type_node,
11844 endlink)));
11845 tree di_ftype_void
11846 = build_function_type (long_long_unsigned_type_node, endlink);
11847 tree di_ftype_v8qi
11848 = build_function_type (long_long_integer_type_node,
11849 tree_cons (NULL_TREE, V8QI_type_node,
11850 endlink));
11851 tree di_ftype_v4hi
11852 = build_function_type (long_long_integer_type_node,
11853 tree_cons (NULL_TREE, V4HI_type_node,
11854 endlink));
11855 tree di_ftype_v2si
11856 = build_function_type (long_long_integer_type_node,
11857 tree_cons (NULL_TREE, V2SI_type_node,
11858 endlink));
11859 tree v2si_ftype_v4hi
11860 = build_function_type (V2SI_type_node,
11861 tree_cons (NULL_TREE, V4HI_type_node,
11862 endlink));
11863 tree v4hi_ftype_v8qi
11864 = build_function_type (V4HI_type_node,
11865 tree_cons (NULL_TREE, V8QI_type_node,
11866 endlink));
11868 tree di_ftype_di_v4hi_v4hi
11869 = build_function_type (long_long_unsigned_type_node,
11870 tree_cons (NULL_TREE,
11871 long_long_unsigned_type_node,
11872 tree_cons (NULL_TREE, V4HI_type_node,
11873 tree_cons (NULL_TREE,
11874 V4HI_type_node,
11875 endlink))));
11877 tree di_ftype_v4hi_v4hi
11878 = build_function_type (long_long_unsigned_type_node,
11879 tree_cons (NULL_TREE, V4HI_type_node,
11880 tree_cons (NULL_TREE, V4HI_type_node,
11881 endlink)));
11883 /* Normal vector binops. */
11884 tree v8qi_ftype_v8qi_v8qi
11885 = build_function_type (V8QI_type_node,
11886 tree_cons (NULL_TREE, V8QI_type_node,
11887 tree_cons (NULL_TREE, V8QI_type_node,
11888 endlink)));
11889 tree v4hi_ftype_v4hi_v4hi
11890 = build_function_type (V4HI_type_node,
11891 tree_cons (NULL_TREE, V4HI_type_node,
11892 tree_cons (NULL_TREE, V4HI_type_node,
11893 endlink)));
11894 tree v2si_ftype_v2si_v2si
11895 = build_function_type (V2SI_type_node,
11896 tree_cons (NULL_TREE, V2SI_type_node,
11897 tree_cons (NULL_TREE, V2SI_type_node,
11898 endlink)));
11899 tree di_ftype_di_di
11900 = build_function_type (long_long_unsigned_type_node,
11901 tree_cons (NULL_TREE, long_long_unsigned_type_node,
11902 tree_cons (NULL_TREE,
11903 long_long_unsigned_type_node,
11904 endlink)));
11906 /* Add all builtins that are more or less simple operations on two
11907 operands. */
11908 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
11910 /* Use one of the operands; the target can have a different mode for
11911 mask-generating compares. */
11912 enum machine_mode mode;
11913 tree type;
11915 if (d->name == 0)
11916 continue;
11918 mode = insn_data[d->icode].operand[1].mode;
11920 switch (mode)
11922 case V8QImode:
11923 type = v8qi_ftype_v8qi_v8qi;
11924 break;
11925 case V4HImode:
11926 type = v4hi_ftype_v4hi_v4hi;
11927 break;
11928 case V2SImode:
11929 type = v2si_ftype_v2si_v2si;
11930 break;
11931 case DImode:
11932 type = di_ftype_di_di;
11933 break;
11935 default:
11936 gcc_unreachable ();
11939 def_mbuiltin (d->mask, d->name, type, d->code);
11942 /* Add the remaining MMX insns with somewhat more complicated types. */
11943 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wzero", di_ftype_void, ARM_BUILTIN_WZERO);
11944 def_mbuiltin (FL_IWMMXT, "__builtin_arm_setwcx", void_ftype_int_int, ARM_BUILTIN_SETWCX);
11945 def_mbuiltin (FL_IWMMXT, "__builtin_arm_getwcx", int_ftype_int, ARM_BUILTIN_GETWCX);
11947 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSLLH);
11948 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllw", v2si_ftype_v2si_di, ARM_BUILTIN_WSLLW);
11949 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wslld", di_ftype_di_di, ARM_BUILTIN_WSLLD);
11950 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSLLHI);
11951 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllwi", v2si_ftype_v2si_int, ARM_BUILTIN_WSLLWI);
11952 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wslldi", di_ftype_di_int, ARM_BUILTIN_WSLLDI);
11954 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSRLH);
11955 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlw", v2si_ftype_v2si_di, ARM_BUILTIN_WSRLW);
11956 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrld", di_ftype_di_di, ARM_BUILTIN_WSRLD);
11957 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSRLHI);
11958 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlwi", v2si_ftype_v2si_int, ARM_BUILTIN_WSRLWI);
11959 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrldi", di_ftype_di_int, ARM_BUILTIN_WSRLDI);
11961 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrah", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSRAH);
11962 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsraw", v2si_ftype_v2si_di, ARM_BUILTIN_WSRAW);
11963 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrad", di_ftype_di_di, ARM_BUILTIN_WSRAD);
11964 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrahi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSRAHI);
11965 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrawi", v2si_ftype_v2si_int, ARM_BUILTIN_WSRAWI);
11966 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsradi", di_ftype_di_int, ARM_BUILTIN_WSRADI);
11968 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WRORH);
11969 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorw", v2si_ftype_v2si_di, ARM_BUILTIN_WRORW);
11970 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrord", di_ftype_di_di, ARM_BUILTIN_WRORD);
11971 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WRORHI);
11972 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorwi", v2si_ftype_v2si_int, ARM_BUILTIN_WRORWI);
11973 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrordi", di_ftype_di_int, ARM_BUILTIN_WRORDI);
11975 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wshufh", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSHUFH);
11977 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadb", v2si_ftype_v8qi_v8qi, ARM_BUILTIN_WSADB);
11978 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadh", v2si_ftype_v4hi_v4hi, ARM_BUILTIN_WSADH);
11979 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadbz", v2si_ftype_v8qi_v8qi, ARM_BUILTIN_WSADBZ);
11980 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadhz", v2si_ftype_v4hi_v4hi, ARM_BUILTIN_WSADHZ);
11982 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsb", int_ftype_v8qi_int, ARM_BUILTIN_TEXTRMSB);
11983 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsh", int_ftype_v4hi_int, ARM_BUILTIN_TEXTRMSH);
11984 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsw", int_ftype_v2si_int, ARM_BUILTIN_TEXTRMSW);
11985 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmub", int_ftype_v8qi_int, ARM_BUILTIN_TEXTRMUB);
11986 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmuh", int_ftype_v4hi_int, ARM_BUILTIN_TEXTRMUH);
11987 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmuw", int_ftype_v2si_int, ARM_BUILTIN_TEXTRMUW);
11988 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrb", v8qi_ftype_v8qi_int_int, ARM_BUILTIN_TINSRB);
11989 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrh", v4hi_ftype_v4hi_int_int, ARM_BUILTIN_TINSRH);
11990 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrw", v2si_ftype_v2si_int_int, ARM_BUILTIN_TINSRW);
11992 def_mbuiltin (FL_IWMMXT, "__builtin_arm_waccb", di_ftype_v8qi, ARM_BUILTIN_WACCB);
11993 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wacch", di_ftype_v4hi, ARM_BUILTIN_WACCH);
11994 def_mbuiltin (FL_IWMMXT, "__builtin_arm_waccw", di_ftype_v2si, ARM_BUILTIN_WACCW);
11996 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskb", int_ftype_v8qi, ARM_BUILTIN_TMOVMSKB);
11997 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskh", int_ftype_v4hi, ARM_BUILTIN_TMOVMSKH);
11998 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskw", int_ftype_v2si, ARM_BUILTIN_TMOVMSKW);
12000 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackhss", v8qi_ftype_v4hi_v4hi, ARM_BUILTIN_WPACKHSS);
12001 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackhus", v8qi_ftype_v4hi_v4hi, ARM_BUILTIN_WPACKHUS);
12002 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackwus", v4hi_ftype_v2si_v2si, ARM_BUILTIN_WPACKWUS);
12003 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackwss", v4hi_ftype_v2si_v2si, ARM_BUILTIN_WPACKWSS);
12004 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackdus", v2si_ftype_di_di, ARM_BUILTIN_WPACKDUS);
12005 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackdss", v2si_ftype_di_di, ARM_BUILTIN_WPACKDSS);
12007 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehub", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKEHUB);
12008 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehuh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKEHUH);
12009 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehuw", di_ftype_v2si, ARM_BUILTIN_WUNPCKEHUW);
12010 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsb", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKEHSB);
12011 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKEHSH);
12012 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsw", di_ftype_v2si, ARM_BUILTIN_WUNPCKEHSW);
12013 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelub", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKELUB);
12014 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckeluh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKELUH);
12015 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckeluw", di_ftype_v2si, ARM_BUILTIN_WUNPCKELUW);
12016 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsb", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKELSB);
12017 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKELSH);
12018 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsw", di_ftype_v2si, ARM_BUILTIN_WUNPCKELSW);
12020 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacs", di_ftype_di_v4hi_v4hi, ARM_BUILTIN_WMACS);
12021 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacsz", di_ftype_v4hi_v4hi, ARM_BUILTIN_WMACSZ);
12022 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacu", di_ftype_di_v4hi_v4hi, ARM_BUILTIN_WMACU);
12023 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacuz", di_ftype_v4hi_v4hi, ARM_BUILTIN_WMACUZ);
12025 def_mbuiltin (FL_IWMMXT, "__builtin_arm_walign", v8qi_ftype_v8qi_v8qi_int, ARM_BUILTIN_WALIGN);
12026 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmia", di_ftype_di_int_int, ARM_BUILTIN_TMIA);
12027 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiaph", di_ftype_di_int_int, ARM_BUILTIN_TMIAPH);
12028 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiabb", di_ftype_di_int_int, ARM_BUILTIN_TMIABB);
12029 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiabt", di_ftype_di_int_int, ARM_BUILTIN_TMIABT);
12030 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiatb", di_ftype_di_int_int, ARM_BUILTIN_TMIATB);
12031 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiatt", di_ftype_di_int_int, ARM_BUILTIN_TMIATT);
12034 static void
12035 arm_init_builtins (void)
12037 if (TARGET_REALLY_IWMMXT)
12038 arm_init_iwmmxt_builtins ();
12041 /* Errors in the source file can cause expand_expr to return const0_rtx
12042 where we expect a vector. To avoid crashing, use one of the vector
12043 clear instructions. */
12045 static rtx
12046 safe_vector_operand (rtx x, enum machine_mode mode)
12048 if (x != const0_rtx)
12049 return x;
12050 x = gen_reg_rtx (mode);
12052 emit_insn (gen_iwmmxt_clrdi (mode == DImode ? x
12053 : gen_rtx_SUBREG (DImode, x, 0)));
12054 return x;
12057 /* Subroutine of arm_expand_builtin to take care of binop insns. */
12059 static rtx
12060 arm_expand_binop_builtin (enum insn_code icode,
12061 tree arglist, rtx target)
12063 rtx pat;
12064 tree arg0 = TREE_VALUE (arglist);
12065 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12066 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12067 rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12068 enum machine_mode tmode = insn_data[icode].operand[0].mode;
12069 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
12070 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
12072 if (VECTOR_MODE_P (mode0))
12073 op0 = safe_vector_operand (op0, mode0);
12074 if (VECTOR_MODE_P (mode1))
12075 op1 = safe_vector_operand (op1, mode1);
12077 if (! target
12078 || GET_MODE (target) != tmode
12079 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12080 target = gen_reg_rtx (tmode);
12082 gcc_assert (GET_MODE (op0) == mode0 && GET_MODE (op1) == mode1);
12084 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12085 op0 = copy_to_mode_reg (mode0, op0);
12086 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12087 op1 = copy_to_mode_reg (mode1, op1);
12089 pat = GEN_FCN (icode) (target, op0, op1);
12090 if (! pat)
12091 return 0;
12092 emit_insn (pat);
12093 return target;
12096 /* Subroutine of arm_expand_builtin to take care of unop insns. */
12098 static rtx
12099 arm_expand_unop_builtin (enum insn_code icode,
12100 tree arglist, rtx target, int do_load)
12102 rtx pat;
12103 tree arg0 = TREE_VALUE (arglist);
12104 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12105 enum machine_mode tmode = insn_data[icode].operand[0].mode;
12106 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
12108 if (! target
12109 || GET_MODE (target) != tmode
12110 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12111 target = gen_reg_rtx (tmode);
12112 if (do_load)
12113 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
12114 else
12116 if (VECTOR_MODE_P (mode0))
12117 op0 = safe_vector_operand (op0, mode0);
12119 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12120 op0 = copy_to_mode_reg (mode0, op0);
12123 pat = GEN_FCN (icode) (target, op0);
12124 if (! pat)
12125 return 0;
12126 emit_insn (pat);
12127 return target;
12130 /* Expand an expression EXP that calls a built-in function,
12131 with result going to TARGET if that's convenient
12132 (and in mode MODE if that's convenient).
12133 SUBTARGET may be used as the target for computing one of EXP's operands.
12134 IGNORE is nonzero if the value is to be ignored. */
12136 static rtx
12137 arm_expand_builtin (tree exp,
12138 rtx target,
12139 rtx subtarget ATTRIBUTE_UNUSED,
12140 enum machine_mode mode ATTRIBUTE_UNUSED,
12141 int ignore ATTRIBUTE_UNUSED)
12143 const struct builtin_description * d;
12144 enum insn_code icode;
12145 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
12146 tree arglist = TREE_OPERAND (exp, 1);
12147 tree arg0;
12148 tree arg1;
12149 tree arg2;
12150 rtx op0;
12151 rtx op1;
12152 rtx op2;
12153 rtx pat;
12154 int fcode = DECL_FUNCTION_CODE (fndecl);
12155 size_t i;
12156 enum machine_mode tmode;
12157 enum machine_mode mode0;
12158 enum machine_mode mode1;
12159 enum machine_mode mode2;
12161 switch (fcode)
12163 case ARM_BUILTIN_TEXTRMSB:
12164 case ARM_BUILTIN_TEXTRMUB:
12165 case ARM_BUILTIN_TEXTRMSH:
12166 case ARM_BUILTIN_TEXTRMUH:
12167 case ARM_BUILTIN_TEXTRMSW:
12168 case ARM_BUILTIN_TEXTRMUW:
12169 icode = (fcode == ARM_BUILTIN_TEXTRMSB ? CODE_FOR_iwmmxt_textrmsb
12170 : fcode == ARM_BUILTIN_TEXTRMUB ? CODE_FOR_iwmmxt_textrmub
12171 : fcode == ARM_BUILTIN_TEXTRMSH ? CODE_FOR_iwmmxt_textrmsh
12172 : fcode == ARM_BUILTIN_TEXTRMUH ? CODE_FOR_iwmmxt_textrmuh
12173 : CODE_FOR_iwmmxt_textrmw);
12175 arg0 = TREE_VALUE (arglist);
12176 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12177 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12178 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12179 tmode = insn_data[icode].operand[0].mode;
12180 mode0 = insn_data[icode].operand[1].mode;
12181 mode1 = insn_data[icode].operand[2].mode;
12183 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12184 op0 = copy_to_mode_reg (mode0, op0);
12185 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12187 /* @@@ better error message */
12188 error ("selector must be an immediate");
12189 return gen_reg_rtx (tmode);
12191 if (target == 0
12192 || GET_MODE (target) != tmode
12193 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12194 target = gen_reg_rtx (tmode);
12195 pat = GEN_FCN (icode) (target, op0, op1);
12196 if (! pat)
12197 return 0;
12198 emit_insn (pat);
12199 return target;
12201 case ARM_BUILTIN_TINSRB:
12202 case ARM_BUILTIN_TINSRH:
12203 case ARM_BUILTIN_TINSRW:
12204 icode = (fcode == ARM_BUILTIN_TINSRB ? CODE_FOR_iwmmxt_tinsrb
12205 : fcode == ARM_BUILTIN_TINSRH ? CODE_FOR_iwmmxt_tinsrh
12206 : CODE_FOR_iwmmxt_tinsrw);
12207 arg0 = TREE_VALUE (arglist);
12208 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12209 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
12210 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12211 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12212 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
12213 tmode = insn_data[icode].operand[0].mode;
12214 mode0 = insn_data[icode].operand[1].mode;
12215 mode1 = insn_data[icode].operand[2].mode;
12216 mode2 = insn_data[icode].operand[3].mode;
12218 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12219 op0 = copy_to_mode_reg (mode0, op0);
12220 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12221 op1 = copy_to_mode_reg (mode1, op1);
12222 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
12224 /* @@@ better error message */
12225 error ("selector must be an immediate");
12226 return const0_rtx;
12228 if (target == 0
12229 || GET_MODE (target) != tmode
12230 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12231 target = gen_reg_rtx (tmode);
12232 pat = GEN_FCN (icode) (target, op0, op1, op2);
12233 if (! pat)
12234 return 0;
12235 emit_insn (pat);
12236 return target;
12238 case ARM_BUILTIN_SETWCX:
12239 arg0 = TREE_VALUE (arglist);
12240 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12241 op0 = force_reg (SImode, expand_expr (arg0, NULL_RTX, VOIDmode, 0));
12242 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12243 emit_insn (gen_iwmmxt_tmcr (op1, op0));
12244 return 0;
12246 case ARM_BUILTIN_GETWCX:
12247 arg0 = TREE_VALUE (arglist);
12248 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12249 target = gen_reg_rtx (SImode);
12250 emit_insn (gen_iwmmxt_tmrc (target, op0));
12251 return target;
12253 case ARM_BUILTIN_WSHUFH:
12254 icode = CODE_FOR_iwmmxt_wshufh;
12255 arg0 = TREE_VALUE (arglist);
12256 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12257 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12258 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12259 tmode = insn_data[icode].operand[0].mode;
12260 mode1 = insn_data[icode].operand[1].mode;
12261 mode2 = insn_data[icode].operand[2].mode;
12263 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
12264 op0 = copy_to_mode_reg (mode1, op0);
12265 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
12267 /* @@@ better error message */
12268 error ("mask must be an immediate");
12269 return const0_rtx;
12271 if (target == 0
12272 || GET_MODE (target) != tmode
12273 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12274 target = gen_reg_rtx (tmode);
12275 pat = GEN_FCN (icode) (target, op0, op1);
12276 if (! pat)
12277 return 0;
12278 emit_insn (pat);
12279 return target;
12281 case ARM_BUILTIN_WSADB:
12282 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadb, arglist, target);
12283 case ARM_BUILTIN_WSADH:
12284 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadh, arglist, target);
12285 case ARM_BUILTIN_WSADBZ:
12286 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadbz, arglist, target);
12287 case ARM_BUILTIN_WSADHZ:
12288 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadhz, arglist, target);
12290 /* Several three-argument builtins. */
12291 case ARM_BUILTIN_WMACS:
12292 case ARM_BUILTIN_WMACU:
12293 case ARM_BUILTIN_WALIGN:
12294 case ARM_BUILTIN_TMIA:
12295 case ARM_BUILTIN_TMIAPH:
12296 case ARM_BUILTIN_TMIATT:
12297 case ARM_BUILTIN_TMIATB:
12298 case ARM_BUILTIN_TMIABT:
12299 case ARM_BUILTIN_TMIABB:
12300 icode = (fcode == ARM_BUILTIN_WMACS ? CODE_FOR_iwmmxt_wmacs
12301 : fcode == ARM_BUILTIN_WMACU ? CODE_FOR_iwmmxt_wmacu
12302 : fcode == ARM_BUILTIN_TMIA ? CODE_FOR_iwmmxt_tmia
12303 : fcode == ARM_BUILTIN_TMIAPH ? CODE_FOR_iwmmxt_tmiaph
12304 : fcode == ARM_BUILTIN_TMIABB ? CODE_FOR_iwmmxt_tmiabb
12305 : fcode == ARM_BUILTIN_TMIABT ? CODE_FOR_iwmmxt_tmiabt
12306 : fcode == ARM_BUILTIN_TMIATB ? CODE_FOR_iwmmxt_tmiatb
12307 : fcode == ARM_BUILTIN_TMIATT ? CODE_FOR_iwmmxt_tmiatt
12308 : CODE_FOR_iwmmxt_walign);
12309 arg0 = TREE_VALUE (arglist);
12310 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12311 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
12312 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12313 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12314 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
12315 tmode = insn_data[icode].operand[0].mode;
12316 mode0 = insn_data[icode].operand[1].mode;
12317 mode1 = insn_data[icode].operand[2].mode;
12318 mode2 = insn_data[icode].operand[3].mode;
12320 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12321 op0 = copy_to_mode_reg (mode0, op0);
12322 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12323 op1 = copy_to_mode_reg (mode1, op1);
12324 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
12325 op2 = copy_to_mode_reg (mode2, op2);
12326 if (target == 0
12327 || GET_MODE (target) != tmode
12328 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12329 target = gen_reg_rtx (tmode);
12330 pat = GEN_FCN (icode) (target, op0, op1, op2);
12331 if (! pat)
12332 return 0;
12333 emit_insn (pat);
12334 return target;
12336 case ARM_BUILTIN_WZERO:
12337 target = gen_reg_rtx (DImode);
12338 emit_insn (gen_iwmmxt_clrdi (target));
12339 return target;
12341 default:
12342 break;
12345 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
12346 if (d->code == (const enum arm_builtins) fcode)
12347 return arm_expand_binop_builtin (d->icode, arglist, target);
12349 for (i = 0, d = bdesc_1arg; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
12350 if (d->code == (const enum arm_builtins) fcode)
12351 return arm_expand_unop_builtin (d->icode, arglist, target, 0);
12353 /* @@@ Should really do something sensible here. */
12354 return NULL_RTX;
12357 /* Return the number (counting from 0) of
12358 the least significant set bit in MASK. */
12360 inline static int
12361 number_of_first_bit_set (unsigned mask)
12363 int bit;
12365 for (bit = 0;
12366 (mask & (1 << bit)) == 0;
12367 ++bit)
12368 continue;
12370 return bit;
12373 /* Emit code to push or pop registers to or from the stack. F is the
12374 assembly file. MASK is the registers to push or pop. PUSH is
12375 nonzero if we should push, and zero if we should pop. For debugging
12376 output, if pushing, adjust CFA_OFFSET by the amount of space added
12377 to the stack. REAL_REGS should have the same number of bits set as
12378 MASK, and will be used instead (in the same order) to describe which
12379 registers were saved - this is used to mark the save slots when we
12380 push high registers after moving them to low registers. */
12381 static void
12382 thumb_pushpop (FILE *f, unsigned long mask, int push, int *cfa_offset,
12383 unsigned long real_regs)
12385 int regno;
12386 int lo_mask = mask & 0xFF;
12387 int pushed_words = 0;
12389 gcc_assert (mask);
12391 if (lo_mask == 0 && !push && (mask & (1 << PC_REGNUM)))
12393 /* Special case. Do not generate a POP PC statement here, do it in
12394 thumb_exit() */
12395 thumb_exit (f, -1);
12396 return;
12399 fprintf (f, "\t%s\t{", push ? "push" : "pop");
12401 /* Look at the low registers first. */
12402 for (regno = 0; regno <= LAST_LO_REGNUM; regno++, lo_mask >>= 1)
12404 if (lo_mask & 1)
12406 asm_fprintf (f, "%r", regno);
12408 if ((lo_mask & ~1) != 0)
12409 fprintf (f, ", ");
12411 pushed_words++;
12415 if (push && (mask & (1 << LR_REGNUM)))
12417 /* Catch pushing the LR. */
12418 if (mask & 0xFF)
12419 fprintf (f, ", ");
12421 asm_fprintf (f, "%r", LR_REGNUM);
12423 pushed_words++;
12425 else if (!push && (mask & (1 << PC_REGNUM)))
12427 /* Catch popping the PC. */
12428 if (TARGET_INTERWORK || TARGET_BACKTRACE
12429 || current_function_calls_eh_return)
12431 /* The PC is never poped directly, instead
12432 it is popped into r3 and then BX is used. */
12433 fprintf (f, "}\n");
12435 thumb_exit (f, -1);
12437 return;
12439 else
12441 if (mask & 0xFF)
12442 fprintf (f, ", ");
12444 asm_fprintf (f, "%r", PC_REGNUM);
12448 fprintf (f, "}\n");
12450 if (push && pushed_words && dwarf2out_do_frame ())
12452 char *l = dwarf2out_cfi_label ();
12453 int pushed_mask = real_regs;
12455 *cfa_offset += pushed_words * 4;
12456 dwarf2out_def_cfa (l, SP_REGNUM, *cfa_offset);
12458 pushed_words = 0;
12459 pushed_mask = real_regs;
12460 for (regno = 0; regno <= 14; regno++, pushed_mask >>= 1)
12462 if (pushed_mask & 1)
12463 dwarf2out_reg_save (l, regno, 4 * pushed_words++ - *cfa_offset);
12468 /* Generate code to return from a thumb function.
12469 If 'reg_containing_return_addr' is -1, then the return address is
12470 actually on the stack, at the stack pointer. */
12471 static void
12472 thumb_exit (FILE *f, int reg_containing_return_addr)
12474 unsigned regs_available_for_popping;
12475 unsigned regs_to_pop;
12476 int pops_needed;
12477 unsigned available;
12478 unsigned required;
12479 int mode;
12480 int size;
12481 int restore_a4 = FALSE;
12483 /* Compute the registers we need to pop. */
12484 regs_to_pop = 0;
12485 pops_needed = 0;
12487 if (reg_containing_return_addr == -1)
12489 regs_to_pop |= 1 << LR_REGNUM;
12490 ++pops_needed;
12493 if (TARGET_BACKTRACE)
12495 /* Restore the (ARM) frame pointer and stack pointer. */
12496 regs_to_pop |= (1 << ARM_HARD_FRAME_POINTER_REGNUM) | (1 << SP_REGNUM);
12497 pops_needed += 2;
12500 /* If there is nothing to pop then just emit the BX instruction and
12501 return. */
12502 if (pops_needed == 0)
12504 if (current_function_calls_eh_return)
12505 asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, ARM_EH_STACKADJ_REGNUM);
12507 asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
12508 return;
12510 /* Otherwise if we are not supporting interworking and we have not created
12511 a backtrace structure and the function was not entered in ARM mode then
12512 just pop the return address straight into the PC. */
12513 else if (!TARGET_INTERWORK
12514 && !TARGET_BACKTRACE
12515 && !is_called_in_ARM_mode (current_function_decl)
12516 && !current_function_calls_eh_return)
12518 asm_fprintf (f, "\tpop\t{%r}\n", PC_REGNUM);
12519 return;
12522 /* Find out how many of the (return) argument registers we can corrupt. */
12523 regs_available_for_popping = 0;
12525 /* If returning via __builtin_eh_return, the bottom three registers
12526 all contain information needed for the return. */
12527 if (current_function_calls_eh_return)
12528 size = 12;
12529 else
12531 /* If we can deduce the registers used from the function's
12532 return value. This is more reliable that examining
12533 regs_ever_live[] because that will be set if the register is
12534 ever used in the function, not just if the register is used
12535 to hold a return value. */
12537 if (current_function_return_rtx != 0)
12538 mode = GET_MODE (current_function_return_rtx);
12539 else
12540 mode = DECL_MODE (DECL_RESULT (current_function_decl));
12542 size = GET_MODE_SIZE (mode);
12544 if (size == 0)
12546 /* In a void function we can use any argument register.
12547 In a function that returns a structure on the stack
12548 we can use the second and third argument registers. */
12549 if (mode == VOIDmode)
12550 regs_available_for_popping =
12551 (1 << ARG_REGISTER (1))
12552 | (1 << ARG_REGISTER (2))
12553 | (1 << ARG_REGISTER (3));
12554 else
12555 regs_available_for_popping =
12556 (1 << ARG_REGISTER (2))
12557 | (1 << ARG_REGISTER (3));
12559 else if (size <= 4)
12560 regs_available_for_popping =
12561 (1 << ARG_REGISTER (2))
12562 | (1 << ARG_REGISTER (3));
12563 else if (size <= 8)
12564 regs_available_for_popping =
12565 (1 << ARG_REGISTER (3));
12568 /* Match registers to be popped with registers into which we pop them. */
12569 for (available = regs_available_for_popping,
12570 required = regs_to_pop;
12571 required != 0 && available != 0;
12572 available &= ~(available & - available),
12573 required &= ~(required & - required))
12574 -- pops_needed;
12576 /* If we have any popping registers left over, remove them. */
12577 if (available > 0)
12578 regs_available_for_popping &= ~available;
12580 /* Otherwise if we need another popping register we can use
12581 the fourth argument register. */
12582 else if (pops_needed)
12584 /* If we have not found any free argument registers and
12585 reg a4 contains the return address, we must move it. */
12586 if (regs_available_for_popping == 0
12587 && reg_containing_return_addr == LAST_ARG_REGNUM)
12589 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM, LAST_ARG_REGNUM);
12590 reg_containing_return_addr = LR_REGNUM;
12592 else if (size > 12)
12594 /* Register a4 is being used to hold part of the return value,
12595 but we have dire need of a free, low register. */
12596 restore_a4 = TRUE;
12598 asm_fprintf (f, "\tmov\t%r, %r\n",IP_REGNUM, LAST_ARG_REGNUM);
12601 if (reg_containing_return_addr != LAST_ARG_REGNUM)
12603 /* The fourth argument register is available. */
12604 regs_available_for_popping |= 1 << LAST_ARG_REGNUM;
12606 --pops_needed;
12610 /* Pop as many registers as we can. */
12611 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
12612 regs_available_for_popping);
12614 /* Process the registers we popped. */
12615 if (reg_containing_return_addr == -1)
12617 /* The return address was popped into the lowest numbered register. */
12618 regs_to_pop &= ~(1 << LR_REGNUM);
12620 reg_containing_return_addr =
12621 number_of_first_bit_set (regs_available_for_popping);
12623 /* Remove this register for the mask of available registers, so that
12624 the return address will not be corrupted by further pops. */
12625 regs_available_for_popping &= ~(1 << reg_containing_return_addr);
12628 /* If we popped other registers then handle them here. */
12629 if (regs_available_for_popping)
12631 int frame_pointer;
12633 /* Work out which register currently contains the frame pointer. */
12634 frame_pointer = number_of_first_bit_set (regs_available_for_popping);
12636 /* Move it into the correct place. */
12637 asm_fprintf (f, "\tmov\t%r, %r\n",
12638 ARM_HARD_FRAME_POINTER_REGNUM, frame_pointer);
12640 /* (Temporarily) remove it from the mask of popped registers. */
12641 regs_available_for_popping &= ~(1 << frame_pointer);
12642 regs_to_pop &= ~(1 << ARM_HARD_FRAME_POINTER_REGNUM);
12644 if (regs_available_for_popping)
12646 int stack_pointer;
12648 /* We popped the stack pointer as well,
12649 find the register that contains it. */
12650 stack_pointer = number_of_first_bit_set (regs_available_for_popping);
12652 /* Move it into the stack register. */
12653 asm_fprintf (f, "\tmov\t%r, %r\n", SP_REGNUM, stack_pointer);
12655 /* At this point we have popped all necessary registers, so
12656 do not worry about restoring regs_available_for_popping
12657 to its correct value:
12659 assert (pops_needed == 0)
12660 assert (regs_available_for_popping == (1 << frame_pointer))
12661 assert (regs_to_pop == (1 << STACK_POINTER)) */
12663 else
12665 /* Since we have just move the popped value into the frame
12666 pointer, the popping register is available for reuse, and
12667 we know that we still have the stack pointer left to pop. */
12668 regs_available_for_popping |= (1 << frame_pointer);
12672 /* If we still have registers left on the stack, but we no longer have
12673 any registers into which we can pop them, then we must move the return
12674 address into the link register and make available the register that
12675 contained it. */
12676 if (regs_available_for_popping == 0 && pops_needed > 0)
12678 regs_available_for_popping |= 1 << reg_containing_return_addr;
12680 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM,
12681 reg_containing_return_addr);
12683 reg_containing_return_addr = LR_REGNUM;
12686 /* If we have registers left on the stack then pop some more.
12687 We know that at most we will want to pop FP and SP. */
12688 if (pops_needed > 0)
12690 int popped_into;
12691 int move_to;
12693 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
12694 regs_available_for_popping);
12696 /* We have popped either FP or SP.
12697 Move whichever one it is into the correct register. */
12698 popped_into = number_of_first_bit_set (regs_available_for_popping);
12699 move_to = number_of_first_bit_set (regs_to_pop);
12701 asm_fprintf (f, "\tmov\t%r, %r\n", move_to, popped_into);
12703 regs_to_pop &= ~(1 << move_to);
12705 --pops_needed;
12708 /* If we still have not popped everything then we must have only
12709 had one register available to us and we are now popping the SP. */
12710 if (pops_needed > 0)
12712 int popped_into;
12714 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
12715 regs_available_for_popping);
12717 popped_into = number_of_first_bit_set (regs_available_for_popping);
12719 asm_fprintf (f, "\tmov\t%r, %r\n", SP_REGNUM, popped_into);
12721 assert (regs_to_pop == (1 << STACK_POINTER))
12722 assert (pops_needed == 1)
12726 /* If necessary restore the a4 register. */
12727 if (restore_a4)
12729 if (reg_containing_return_addr != LR_REGNUM)
12731 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM, LAST_ARG_REGNUM);
12732 reg_containing_return_addr = LR_REGNUM;
12735 asm_fprintf (f, "\tmov\t%r, %r\n", LAST_ARG_REGNUM, IP_REGNUM);
12738 if (current_function_calls_eh_return)
12739 asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, ARM_EH_STACKADJ_REGNUM);
12741 /* Return to caller. */
12742 asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
12746 void
12747 thumb_final_prescan_insn (rtx insn)
12749 if (flag_print_asm_name)
12750 asm_fprintf (asm_out_file, "%@ 0x%04x\n",
12751 INSN_ADDRESSES (INSN_UID (insn)));
12755 thumb_shiftable_const (unsigned HOST_WIDE_INT val)
12757 unsigned HOST_WIDE_INT mask = 0xff;
12758 int i;
12760 if (val == 0) /* XXX */
12761 return 0;
12763 for (i = 0; i < 25; i++)
12764 if ((val & (mask << i)) == val)
12765 return 1;
12767 return 0;
12770 /* Returns nonzero if the current function contains,
12771 or might contain a far jump. */
12772 static int
12773 thumb_far_jump_used_p (void)
12775 rtx insn;
12777 /* This test is only important for leaf functions. */
12778 /* assert (!leaf_function_p ()); */
12780 /* If we have already decided that far jumps may be used,
12781 do not bother checking again, and always return true even if
12782 it turns out that they are not being used. Once we have made
12783 the decision that far jumps are present (and that hence the link
12784 register will be pushed onto the stack) we cannot go back on it. */
12785 if (cfun->machine->far_jump_used)
12786 return 1;
12788 /* If this function is not being called from the prologue/epilogue
12789 generation code then it must be being called from the
12790 INITIAL_ELIMINATION_OFFSET macro. */
12791 if (!(ARM_DOUBLEWORD_ALIGN || reload_completed))
12793 /* In this case we know that we are being asked about the elimination
12794 of the arg pointer register. If that register is not being used,
12795 then there are no arguments on the stack, and we do not have to
12796 worry that a far jump might force the prologue to push the link
12797 register, changing the stack offsets. In this case we can just
12798 return false, since the presence of far jumps in the function will
12799 not affect stack offsets.
12801 If the arg pointer is live (or if it was live, but has now been
12802 eliminated and so set to dead) then we do have to test to see if
12803 the function might contain a far jump. This test can lead to some
12804 false negatives, since before reload is completed, then length of
12805 branch instructions is not known, so gcc defaults to returning their
12806 longest length, which in turn sets the far jump attribute to true.
12808 A false negative will not result in bad code being generated, but it
12809 will result in a needless push and pop of the link register. We
12810 hope that this does not occur too often.
12812 If we need doubleword stack alignment this could affect the other
12813 elimination offsets so we can't risk getting it wrong. */
12814 if (regs_ever_live [ARG_POINTER_REGNUM])
12815 cfun->machine->arg_pointer_live = 1;
12816 else if (!cfun->machine->arg_pointer_live)
12817 return 0;
12820 /* Check to see if the function contains a branch
12821 insn with the far jump attribute set. */
12822 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
12824 if (GET_CODE (insn) == JUMP_INSN
12825 /* Ignore tablejump patterns. */
12826 && GET_CODE (PATTERN (insn)) != ADDR_VEC
12827 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC
12828 && get_attr_far_jump (insn) == FAR_JUMP_YES
12831 /* Record the fact that we have decided that
12832 the function does use far jumps. */
12833 cfun->machine->far_jump_used = 1;
12834 return 1;
12838 return 0;
12841 /* Return nonzero if FUNC must be entered in ARM mode. */
12843 is_called_in_ARM_mode (tree func)
12845 gcc_assert (TREE_CODE (func) == FUNCTION_DECL);
12847 /* Ignore the problem about functions whose address is taken. */
12848 if (TARGET_CALLEE_INTERWORKING && TREE_PUBLIC (func))
12849 return TRUE;
12851 #ifdef ARM_PE
12852 return lookup_attribute ("interfacearm", DECL_ATTRIBUTES (func)) != NULL_TREE;
12853 #else
12854 return FALSE;
12855 #endif
12858 /* The bits which aren't usefully expanded as rtl. */
12859 const char *
12860 thumb_unexpanded_epilogue (void)
12862 int regno;
12863 unsigned long live_regs_mask = 0;
12864 int high_regs_pushed = 0;
12865 int had_to_push_lr;
12866 int size;
12867 int mode;
12869 if (return_used_this_function)
12870 return "";
12872 if (IS_NAKED (arm_current_func_type ()))
12873 return "";
12875 live_regs_mask = thumb_compute_save_reg_mask ();
12876 high_regs_pushed = bit_count (live_regs_mask & 0x0f00);
12878 /* If we can deduce the registers used from the function's return value.
12879 This is more reliable that examining regs_ever_live[] because that
12880 will be set if the register is ever used in the function, not just if
12881 the register is used to hold a return value. */
12883 if (current_function_return_rtx != 0)
12884 mode = GET_MODE (current_function_return_rtx);
12885 else
12886 mode = DECL_MODE (DECL_RESULT (current_function_decl));
12888 size = GET_MODE_SIZE (mode);
12890 /* The prolog may have pushed some high registers to use as
12891 work registers. e.g. the testsuite file:
12892 gcc/testsuite/gcc/gcc.c-torture/execute/complex-2.c
12893 compiles to produce:
12894 push {r4, r5, r6, r7, lr}
12895 mov r7, r9
12896 mov r6, r8
12897 push {r6, r7}
12898 as part of the prolog. We have to undo that pushing here. */
12900 if (high_regs_pushed)
12902 unsigned long mask = live_regs_mask & 0xff;
12903 int next_hi_reg;
12905 /* The available low registers depend on the size of the value we are
12906 returning. */
12907 if (size <= 12)
12908 mask |= 1 << 3;
12909 if (size <= 8)
12910 mask |= 1 << 2;
12912 if (mask == 0)
12913 /* Oh dear! We have no low registers into which we can pop
12914 high registers! */
12915 internal_error
12916 ("no low registers available for popping high registers");
12918 for (next_hi_reg = 8; next_hi_reg < 13; next_hi_reg++)
12919 if (live_regs_mask & (1 << next_hi_reg))
12920 break;
12922 while (high_regs_pushed)
12924 /* Find lo register(s) into which the high register(s) can
12925 be popped. */
12926 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
12928 if (mask & (1 << regno))
12929 high_regs_pushed--;
12930 if (high_regs_pushed == 0)
12931 break;
12934 mask &= (2 << regno) - 1; /* A noop if regno == 8 */
12936 /* Pop the values into the low register(s). */
12937 thumb_pushpop (asm_out_file, mask, 0, NULL, mask);
12939 /* Move the value(s) into the high registers. */
12940 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
12942 if (mask & (1 << regno))
12944 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", next_hi_reg,
12945 regno);
12947 for (next_hi_reg++; next_hi_reg < 13; next_hi_reg++)
12948 if (live_regs_mask & (1 << next_hi_reg))
12949 break;
12953 live_regs_mask &= ~0x0f00;
12956 had_to_push_lr = (live_regs_mask & (1 << LR_REGNUM)) != 0;
12957 live_regs_mask &= 0xff;
12959 if (current_function_pretend_args_size == 0 || TARGET_BACKTRACE)
12961 /* Pop the return address into the PC. */
12962 if (had_to_push_lr)
12963 live_regs_mask |= 1 << PC_REGNUM;
12965 /* Either no argument registers were pushed or a backtrace
12966 structure was created which includes an adjusted stack
12967 pointer, so just pop everything. */
12968 if (live_regs_mask)
12969 thumb_pushpop (asm_out_file, live_regs_mask, FALSE, NULL,
12970 live_regs_mask);
12972 /* We have either just popped the return address into the
12973 PC or it is was kept in LR for the entire function. */
12974 if (!had_to_push_lr)
12975 thumb_exit (asm_out_file, LR_REGNUM);
12977 else
12979 /* Pop everything but the return address. */
12980 if (live_regs_mask)
12981 thumb_pushpop (asm_out_file, live_regs_mask, FALSE, NULL,
12982 live_regs_mask);
12984 if (had_to_push_lr)
12986 if (size > 12)
12988 /* We have no free low regs, so save one. */
12989 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", IP_REGNUM,
12990 LAST_ARG_REGNUM);
12993 /* Get the return address into a temporary register. */
12994 thumb_pushpop (asm_out_file, 1 << LAST_ARG_REGNUM, 0, NULL,
12995 1 << LAST_ARG_REGNUM);
12997 if (size > 12)
12999 /* Move the return address to lr. */
13000 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", LR_REGNUM,
13001 LAST_ARG_REGNUM);
13002 /* Restore the low register. */
13003 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", LAST_ARG_REGNUM,
13004 IP_REGNUM);
13005 regno = LR_REGNUM;
13007 else
13008 regno = LAST_ARG_REGNUM;
13010 else
13011 regno = LR_REGNUM;
13013 /* Remove the argument registers that were pushed onto the stack. */
13014 asm_fprintf (asm_out_file, "\tadd\t%r, %r, #%d\n",
13015 SP_REGNUM, SP_REGNUM,
13016 current_function_pretend_args_size);
13018 thumb_exit (asm_out_file, regno);
13021 return "";
13024 /* Functions to save and restore machine-specific function data. */
13025 static struct machine_function *
13026 arm_init_machine_status (void)
13028 struct machine_function *machine;
13029 machine = (machine_function *) ggc_alloc_cleared (sizeof (machine_function));
13031 #if ARM_FT_UNKNOWN != 0
13032 machine->func_type = ARM_FT_UNKNOWN;
13033 #endif
13034 return machine;
13037 /* Return an RTX indicating where the return address to the
13038 calling function can be found. */
13040 arm_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
13042 if (count != 0)
13043 return NULL_RTX;
13045 return get_hard_reg_initial_val (Pmode, LR_REGNUM);
13048 /* Do anything needed before RTL is emitted for each function. */
13049 void
13050 arm_init_expanders (void)
13052 /* Arrange to initialize and mark the machine per-function status. */
13053 init_machine_status = arm_init_machine_status;
13055 /* This is to stop the combine pass optimizing away the alignment
13056 adjustment of va_arg. */
13057 /* ??? It is claimed that this should not be necessary. */
13058 if (cfun)
13059 mark_reg_pointer (arg_pointer_rtx, PARM_BOUNDARY);
13063 /* Like arm_compute_initial_elimination offset. Simpler because
13064 THUMB_HARD_FRAME_POINTER isn't actually the ABI specified frame pointer. */
13066 HOST_WIDE_INT
13067 thumb_compute_initial_elimination_offset (unsigned int from, unsigned int to)
13069 arm_stack_offsets *offsets;
13071 offsets = arm_get_frame_offsets ();
13073 switch (from)
13075 case ARG_POINTER_REGNUM:
13076 switch (to)
13078 case STACK_POINTER_REGNUM:
13079 return offsets->outgoing_args - offsets->saved_args;
13081 case FRAME_POINTER_REGNUM:
13082 return offsets->soft_frame - offsets->saved_args;
13084 case THUMB_HARD_FRAME_POINTER_REGNUM:
13085 case ARM_HARD_FRAME_POINTER_REGNUM:
13086 return offsets->saved_regs - offsets->saved_args;
13088 default:
13089 gcc_unreachable ();
13091 break;
13093 case FRAME_POINTER_REGNUM:
13094 switch (to)
13096 case STACK_POINTER_REGNUM:
13097 return offsets->outgoing_args - offsets->soft_frame;
13099 case THUMB_HARD_FRAME_POINTER_REGNUM:
13100 case ARM_HARD_FRAME_POINTER_REGNUM:
13101 return offsets->saved_regs - offsets->soft_frame;
13103 default:
13104 gcc_unreachable ();
13106 break;
13108 default:
13109 gcc_unreachable ();
13114 /* Generate the rest of a function's prologue. */
13115 void
13116 thumb_expand_prologue (void)
13118 rtx insn, dwarf;
13120 HOST_WIDE_INT amount;
13121 arm_stack_offsets *offsets;
13122 unsigned long func_type;
13123 int regno;
13124 unsigned long live_regs_mask;
13126 func_type = arm_current_func_type ();
13128 /* Naked functions don't have prologues. */
13129 if (IS_NAKED (func_type))
13130 return;
13132 if (IS_INTERRUPT (func_type))
13134 error ("interrupt Service Routines cannot be coded in Thumb mode");
13135 return;
13138 live_regs_mask = thumb_compute_save_reg_mask ();
13139 /* Load the pic register before setting the frame pointer,
13140 so we can use r7 as a temporary work register. */
13141 if (flag_pic)
13142 arm_load_pic_register (thumb_find_work_register (live_regs_mask));
13144 offsets = arm_get_frame_offsets ();
13146 if (frame_pointer_needed)
13148 insn = emit_insn (gen_movsi (hard_frame_pointer_rtx,
13149 stack_pointer_rtx));
13150 RTX_FRAME_RELATED_P (insn) = 1;
13152 else if (CALLER_INTERWORKING_SLOT_SIZE > 0)
13153 emit_move_insn (gen_rtx_REG (Pmode, ARM_HARD_FRAME_POINTER_REGNUM),
13154 stack_pointer_rtx);
13156 amount = offsets->outgoing_args - offsets->saved_regs;
13157 if (amount)
13159 if (amount < 512)
13161 insn = emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
13162 GEN_INT (- amount)));
13163 RTX_FRAME_RELATED_P (insn) = 1;
13165 else
13167 rtx reg;
13169 /* The stack decrement is too big for an immediate value in a single
13170 insn. In theory we could issue multiple subtracts, but after
13171 three of them it becomes more space efficient to place the full
13172 value in the constant pool and load into a register. (Also the
13173 ARM debugger really likes to see only one stack decrement per
13174 function). So instead we look for a scratch register into which
13175 we can load the decrement, and then we subtract this from the
13176 stack pointer. Unfortunately on the thumb the only available
13177 scratch registers are the argument registers, and we cannot use
13178 these as they may hold arguments to the function. Instead we
13179 attempt to locate a call preserved register which is used by this
13180 function. If we can find one, then we know that it will have
13181 been pushed at the start of the prologue and so we can corrupt
13182 it now. */
13183 for (regno = LAST_ARG_REGNUM + 1; regno <= LAST_LO_REGNUM; regno++)
13184 if (live_regs_mask & (1 << regno)
13185 && !(frame_pointer_needed
13186 && (regno == THUMB_HARD_FRAME_POINTER_REGNUM)))
13187 break;
13189 if (regno > LAST_LO_REGNUM) /* Very unlikely. */
13191 rtx spare = gen_rtx_REG (SImode, IP_REGNUM);
13193 /* Choose an arbitrary, non-argument low register. */
13194 reg = gen_rtx_REG (SImode, LAST_LO_REGNUM);
13196 /* Save it by copying it into a high, scratch register. */
13197 emit_insn (gen_movsi (spare, reg));
13198 /* Add a USE to stop propagate_one_insn() from barfing. */
13199 emit_insn (gen_prologue_use (spare));
13201 /* Decrement the stack. */
13202 emit_insn (gen_movsi (reg, GEN_INT (- amount)));
13203 insn = emit_insn (gen_addsi3 (stack_pointer_rtx,
13204 stack_pointer_rtx, reg));
13205 RTX_FRAME_RELATED_P (insn) = 1;
13206 dwarf = gen_rtx_SET (SImode, stack_pointer_rtx,
13207 plus_constant (stack_pointer_rtx,
13208 -amount));
13209 RTX_FRAME_RELATED_P (dwarf) = 1;
13210 REG_NOTES (insn)
13211 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
13212 REG_NOTES (insn));
13214 /* Restore the low register's original value. */
13215 emit_insn (gen_movsi (reg, spare));
13217 /* Emit a USE of the restored scratch register, so that flow
13218 analysis will not consider the restore redundant. The
13219 register won't be used again in this function and isn't
13220 restored by the epilogue. */
13221 emit_insn (gen_prologue_use (reg));
13223 else
13225 reg = gen_rtx_REG (SImode, regno);
13227 emit_insn (gen_movsi (reg, GEN_INT (- amount)));
13229 insn = emit_insn (gen_addsi3 (stack_pointer_rtx,
13230 stack_pointer_rtx, reg));
13231 RTX_FRAME_RELATED_P (insn) = 1;
13232 dwarf = gen_rtx_SET (SImode, stack_pointer_rtx,
13233 plus_constant (stack_pointer_rtx,
13234 -amount));
13235 RTX_FRAME_RELATED_P (dwarf) = 1;
13236 REG_NOTES (insn)
13237 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
13238 REG_NOTES (insn));
13241 /* If the frame pointer is needed, emit a special barrier that
13242 will prevent the scheduler from moving stores to the frame
13243 before the stack adjustment. */
13244 if (frame_pointer_needed)
13245 emit_insn (gen_stack_tie (stack_pointer_rtx,
13246 hard_frame_pointer_rtx));
13249 if (current_function_profile || !TARGET_SCHED_PROLOG)
13250 emit_insn (gen_blockage ());
13252 cfun->machine->lr_save_eliminated = !thumb_force_lr_save ();
13253 if (live_regs_mask & 0xff)
13254 cfun->machine->lr_save_eliminated = 0;
13256 /* If the link register is being kept alive, with the return address in it,
13257 then make sure that it does not get reused by the ce2 pass. */
13258 if (cfun->machine->lr_save_eliminated)
13259 emit_insn (gen_prologue_use (gen_rtx_REG (SImode, LR_REGNUM)));
13263 void
13264 thumb_expand_epilogue (void)
13266 HOST_WIDE_INT amount;
13267 arm_stack_offsets *offsets;
13268 int regno;
13270 /* Naked functions don't have prologues. */
13271 if (IS_NAKED (arm_current_func_type ()))
13272 return;
13274 offsets = arm_get_frame_offsets ();
13275 amount = offsets->outgoing_args - offsets->saved_regs;
13277 if (frame_pointer_needed)
13278 emit_insn (gen_movsi (stack_pointer_rtx, hard_frame_pointer_rtx));
13279 else if (amount)
13281 if (amount < 512)
13282 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
13283 GEN_INT (amount)));
13284 else
13286 /* r3 is always free in the epilogue. */
13287 rtx reg = gen_rtx_REG (SImode, LAST_ARG_REGNUM);
13289 emit_insn (gen_movsi (reg, GEN_INT (amount)));
13290 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, reg));
13294 /* Emit a USE (stack_pointer_rtx), so that
13295 the stack adjustment will not be deleted. */
13296 emit_insn (gen_prologue_use (stack_pointer_rtx));
13298 if (current_function_profile || !TARGET_SCHED_PROLOG)
13299 emit_insn (gen_blockage ());
13301 /* Emit a clobber for each insn that will be restored in the epilogue,
13302 so that flow2 will get register lifetimes correct. */
13303 for (regno = 0; regno < 13; regno++)
13304 if (regs_ever_live[regno] && !call_used_regs[regno])
13305 emit_insn (gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, regno)));
13307 if (! regs_ever_live[LR_REGNUM])
13308 emit_insn (gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, LR_REGNUM)));
13311 static void
13312 thumb_output_function_prologue (FILE *f, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
13314 unsigned long live_regs_mask = 0;
13315 unsigned long l_mask;
13316 unsigned high_regs_pushed = 0;
13317 int cfa_offset = 0;
13318 int regno;
13320 if (IS_NAKED (arm_current_func_type ()))
13321 return;
13323 if (is_called_in_ARM_mode (current_function_decl))
13325 const char * name;
13327 gcc_assert (GET_CODE (DECL_RTL (current_function_decl)) == MEM);
13328 gcc_assert (GET_CODE (XEXP (DECL_RTL (current_function_decl), 0))
13329 == SYMBOL_REF);
13330 name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
13332 /* Generate code sequence to switch us into Thumb mode. */
13333 /* The .code 32 directive has already been emitted by
13334 ASM_DECLARE_FUNCTION_NAME. */
13335 asm_fprintf (f, "\torr\t%r, %r, #1\n", IP_REGNUM, PC_REGNUM);
13336 asm_fprintf (f, "\tbx\t%r\n", IP_REGNUM);
13338 /* Generate a label, so that the debugger will notice the
13339 change in instruction sets. This label is also used by
13340 the assembler to bypass the ARM code when this function
13341 is called from a Thumb encoded function elsewhere in the
13342 same file. Hence the definition of STUB_NAME here must
13343 agree with the definition in gas/config/tc-arm.c. */
13345 #define STUB_NAME ".real_start_of"
13347 fprintf (f, "\t.code\t16\n");
13348 #ifdef ARM_PE
13349 if (arm_dllexport_name_p (name))
13350 name = arm_strip_name_encoding (name);
13351 #endif
13352 asm_fprintf (f, "\t.globl %s%U%s\n", STUB_NAME, name);
13353 fprintf (f, "\t.thumb_func\n");
13354 asm_fprintf (f, "%s%U%s:\n", STUB_NAME, name);
13357 if (current_function_pretend_args_size)
13359 if (cfun->machine->uses_anonymous_args)
13361 int num_pushes;
13363 fprintf (f, "\tpush\t{");
13365 num_pushes = ARM_NUM_INTS (current_function_pretend_args_size);
13367 for (regno = LAST_ARG_REGNUM + 1 - num_pushes;
13368 regno <= LAST_ARG_REGNUM;
13369 regno++)
13370 asm_fprintf (f, "%r%s", regno,
13371 regno == LAST_ARG_REGNUM ? "" : ", ");
13373 fprintf (f, "}\n");
13375 else
13376 asm_fprintf (f, "\tsub\t%r, %r, #%d\n",
13377 SP_REGNUM, SP_REGNUM,
13378 current_function_pretend_args_size);
13380 /* We don't need to record the stores for unwinding (would it
13381 help the debugger any if we did?), but record the change in
13382 the stack pointer. */
13383 if (dwarf2out_do_frame ())
13385 char *l = dwarf2out_cfi_label ();
13387 cfa_offset = cfa_offset + current_function_pretend_args_size;
13388 dwarf2out_def_cfa (l, SP_REGNUM, cfa_offset);
13392 /* Get the registers we are going to push. */
13393 live_regs_mask = thumb_compute_save_reg_mask ();
13394 /* Extract a mask of the ones we can give to the Thumb's push instruction. */
13395 l_mask = live_regs_mask & 0x40ff;
13396 /* Then count how many other high registers will need to be pushed. */
13397 high_regs_pushed = bit_count (live_regs_mask & 0x0f00);
13399 if (TARGET_BACKTRACE)
13401 unsigned offset;
13402 unsigned work_register;
13404 /* We have been asked to create a stack backtrace structure.
13405 The code looks like this:
13407 0 .align 2
13408 0 func:
13409 0 sub SP, #16 Reserve space for 4 registers.
13410 2 push {R7} Push low registers.
13411 4 add R7, SP, #20 Get the stack pointer before the push.
13412 6 str R7, [SP, #8] Store the stack pointer (before reserving the space).
13413 8 mov R7, PC Get hold of the start of this code plus 12.
13414 10 str R7, [SP, #16] Store it.
13415 12 mov R7, FP Get hold of the current frame pointer.
13416 14 str R7, [SP, #4] Store it.
13417 16 mov R7, LR Get hold of the current return address.
13418 18 str R7, [SP, #12] Store it.
13419 20 add R7, SP, #16 Point at the start of the backtrace structure.
13420 22 mov FP, R7 Put this value into the frame pointer. */
13422 work_register = thumb_find_work_register (live_regs_mask);
13424 asm_fprintf
13425 (f, "\tsub\t%r, %r, #16\t%@ Create stack backtrace structure\n",
13426 SP_REGNUM, SP_REGNUM);
13428 if (dwarf2out_do_frame ())
13430 char *l = dwarf2out_cfi_label ();
13432 cfa_offset = cfa_offset + 16;
13433 dwarf2out_def_cfa (l, SP_REGNUM, cfa_offset);
13436 if (l_mask)
13438 thumb_pushpop (f, l_mask, 1, &cfa_offset, l_mask);
13439 offset = bit_count (l_mask);
13441 else
13442 offset = 0;
13444 asm_fprintf (f, "\tadd\t%r, %r, #%d\n", work_register, SP_REGNUM,
13445 offset + 16 + current_function_pretend_args_size);
13447 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13448 offset + 4);
13450 /* Make sure that the instruction fetching the PC is in the right place
13451 to calculate "start of backtrace creation code + 12". */
13452 if (l_mask)
13454 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, PC_REGNUM);
13455 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13456 offset + 12);
13457 asm_fprintf (f, "\tmov\t%r, %r\n", work_register,
13458 ARM_HARD_FRAME_POINTER_REGNUM);
13459 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13460 offset);
13462 else
13464 asm_fprintf (f, "\tmov\t%r, %r\n", work_register,
13465 ARM_HARD_FRAME_POINTER_REGNUM);
13466 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13467 offset);
13468 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, PC_REGNUM);
13469 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13470 offset + 12);
13473 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, LR_REGNUM);
13474 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13475 offset + 8);
13476 asm_fprintf (f, "\tadd\t%r, %r, #%d\n", work_register, SP_REGNUM,
13477 offset + 12);
13478 asm_fprintf (f, "\tmov\t%r, %r\t\t%@ Backtrace structure created\n",
13479 ARM_HARD_FRAME_POINTER_REGNUM, work_register);
13481 /* Optimisation: If we are not pushing any low registers but we are going
13482 to push some high registers then delay our first push. This will just
13483 be a push of LR and we can combine it with the push of the first high
13484 register. */
13485 else if ((l_mask & 0xff) != 0
13486 || (high_regs_pushed == 0 && l_mask))
13487 thumb_pushpop (f, l_mask, 1, &cfa_offset, l_mask);
13489 if (high_regs_pushed)
13491 unsigned pushable_regs;
13492 unsigned next_hi_reg;
13494 for (next_hi_reg = 12; next_hi_reg > LAST_LO_REGNUM; next_hi_reg--)
13495 if (live_regs_mask & (1 << next_hi_reg))
13496 break;
13498 pushable_regs = l_mask & 0xff;
13500 if (pushable_regs == 0)
13501 pushable_regs = 1 << thumb_find_work_register (live_regs_mask);
13503 while (high_regs_pushed > 0)
13505 unsigned long real_regs_mask = 0;
13507 for (regno = LAST_LO_REGNUM; regno >= 0; regno --)
13509 if (pushable_regs & (1 << regno))
13511 asm_fprintf (f, "\tmov\t%r, %r\n", regno, next_hi_reg);
13513 high_regs_pushed --;
13514 real_regs_mask |= (1 << next_hi_reg);
13516 if (high_regs_pushed)
13518 for (next_hi_reg --; next_hi_reg > LAST_LO_REGNUM;
13519 next_hi_reg --)
13520 if (live_regs_mask & (1 << next_hi_reg))
13521 break;
13523 else
13525 pushable_regs &= ~((1 << regno) - 1);
13526 break;
13531 /* If we had to find a work register and we have not yet
13532 saved the LR then add it to the list of regs to push. */
13533 if (l_mask == (1 << LR_REGNUM))
13535 thumb_pushpop (f, pushable_regs | (1 << LR_REGNUM),
13536 1, &cfa_offset,
13537 real_regs_mask | (1 << LR_REGNUM));
13538 l_mask = 0;
13540 else
13541 thumb_pushpop (f, pushable_regs, 1, &cfa_offset, real_regs_mask);
13546 /* Handle the case of a double word load into a low register from
13547 a computed memory address. The computed address may involve a
13548 register which is overwritten by the load. */
13549 const char *
13550 thumb_load_double_from_address (rtx *operands)
13552 rtx addr;
13553 rtx base;
13554 rtx offset;
13555 rtx arg1;
13556 rtx arg2;
13558 gcc_assert (GET_CODE (operands[0]) == REG);
13559 gcc_assert (GET_CODE (operands[1]) == MEM);
13561 /* Get the memory address. */
13562 addr = XEXP (operands[1], 0);
13564 /* Work out how the memory address is computed. */
13565 switch (GET_CODE (addr))
13567 case REG:
13568 operands[2] = gen_rtx_MEM (SImode,
13569 plus_constant (XEXP (operands[1], 0), 4));
13571 if (REGNO (operands[0]) == REGNO (addr))
13573 output_asm_insn ("ldr\t%H0, %2", operands);
13574 output_asm_insn ("ldr\t%0, %1", operands);
13576 else
13578 output_asm_insn ("ldr\t%0, %1", operands);
13579 output_asm_insn ("ldr\t%H0, %2", operands);
13581 break;
13583 case CONST:
13584 /* Compute <address> + 4 for the high order load. */
13585 operands[2] = gen_rtx_MEM (SImode,
13586 plus_constant (XEXP (operands[1], 0), 4));
13588 output_asm_insn ("ldr\t%0, %1", operands);
13589 output_asm_insn ("ldr\t%H0, %2", operands);
13590 break;
13592 case PLUS:
13593 arg1 = XEXP (addr, 0);
13594 arg2 = XEXP (addr, 1);
13596 if (CONSTANT_P (arg1))
13597 base = arg2, offset = arg1;
13598 else
13599 base = arg1, offset = arg2;
13601 gcc_assert (GET_CODE (base) == REG);
13603 /* Catch the case of <address> = <reg> + <reg> */
13604 if (GET_CODE (offset) == REG)
13606 int reg_offset = REGNO (offset);
13607 int reg_base = REGNO (base);
13608 int reg_dest = REGNO (operands[0]);
13610 /* Add the base and offset registers together into the
13611 higher destination register. */
13612 asm_fprintf (asm_out_file, "\tadd\t%r, %r, %r",
13613 reg_dest + 1, reg_base, reg_offset);
13615 /* Load the lower destination register from the address in
13616 the higher destination register. */
13617 asm_fprintf (asm_out_file, "\tldr\t%r, [%r, #0]",
13618 reg_dest, reg_dest + 1);
13620 /* Load the higher destination register from its own address
13621 plus 4. */
13622 asm_fprintf (asm_out_file, "\tldr\t%r, [%r, #4]",
13623 reg_dest + 1, reg_dest + 1);
13625 else
13627 /* Compute <address> + 4 for the high order load. */
13628 operands[2] = gen_rtx_MEM (SImode,
13629 plus_constant (XEXP (operands[1], 0), 4));
13631 /* If the computed address is held in the low order register
13632 then load the high order register first, otherwise always
13633 load the low order register first. */
13634 if (REGNO (operands[0]) == REGNO (base))
13636 output_asm_insn ("ldr\t%H0, %2", operands);
13637 output_asm_insn ("ldr\t%0, %1", operands);
13639 else
13641 output_asm_insn ("ldr\t%0, %1", operands);
13642 output_asm_insn ("ldr\t%H0, %2", operands);
13645 break;
13647 case LABEL_REF:
13648 /* With no registers to worry about we can just load the value
13649 directly. */
13650 operands[2] = gen_rtx_MEM (SImode,
13651 plus_constant (XEXP (operands[1], 0), 4));
13653 output_asm_insn ("ldr\t%H0, %2", operands);
13654 output_asm_insn ("ldr\t%0, %1", operands);
13655 break;
13657 default:
13658 gcc_unreachable ();
13661 return "";
13664 const char *
13665 thumb_output_move_mem_multiple (int n, rtx *operands)
13667 rtx tmp;
13669 switch (n)
13671 case 2:
13672 if (REGNO (operands[4]) > REGNO (operands[5]))
13674 tmp = operands[4];
13675 operands[4] = operands[5];
13676 operands[5] = tmp;
13678 output_asm_insn ("ldmia\t%1!, {%4, %5}", operands);
13679 output_asm_insn ("stmia\t%0!, {%4, %5}", operands);
13680 break;
13682 case 3:
13683 if (REGNO (operands[4]) > REGNO (operands[5]))
13685 tmp = operands[4];
13686 operands[4] = operands[5];
13687 operands[5] = tmp;
13689 if (REGNO (operands[5]) > REGNO (operands[6]))
13691 tmp = operands[5];
13692 operands[5] = operands[6];
13693 operands[6] = tmp;
13695 if (REGNO (operands[4]) > REGNO (operands[5]))
13697 tmp = operands[4];
13698 operands[4] = operands[5];
13699 operands[5] = tmp;
13702 output_asm_insn ("ldmia\t%1!, {%4, %5, %6}", operands);
13703 output_asm_insn ("stmia\t%0!, {%4, %5, %6}", operands);
13704 break;
13706 default:
13707 gcc_unreachable ();
13710 return "";
13713 /* Output a call-via instruction for thumb state. */
13714 const char *
13715 thumb_call_via_reg (rtx reg)
13717 int regno = REGNO (reg);
13718 rtx *labelp;
13720 gcc_assert (regno < LR_REGNUM);
13722 /* If we are in the normal text section we can use a single instance
13723 per compilation unit. If we are doing function sections, then we need
13724 an entry per section, since we can't rely on reachability. */
13725 if (in_text_section ())
13727 thumb_call_reg_needed = 1;
13729 if (thumb_call_via_label[regno] == NULL)
13730 thumb_call_via_label[regno] = gen_label_rtx ();
13731 labelp = thumb_call_via_label + regno;
13733 else
13735 if (cfun->machine->call_via[regno] == NULL)
13736 cfun->machine->call_via[regno] = gen_label_rtx ();
13737 labelp = cfun->machine->call_via + regno;
13740 output_asm_insn ("bl\t%a0", labelp);
13741 return "";
13744 /* Routines for generating rtl. */
13745 void
13746 thumb_expand_movmemqi (rtx *operands)
13748 rtx out = copy_to_mode_reg (SImode, XEXP (operands[0], 0));
13749 rtx in = copy_to_mode_reg (SImode, XEXP (operands[1], 0));
13750 HOST_WIDE_INT len = INTVAL (operands[2]);
13751 HOST_WIDE_INT offset = 0;
13753 while (len >= 12)
13755 emit_insn (gen_movmem12b (out, in, out, in));
13756 len -= 12;
13759 if (len >= 8)
13761 emit_insn (gen_movmem8b (out, in, out, in));
13762 len -= 8;
13765 if (len >= 4)
13767 rtx reg = gen_reg_rtx (SImode);
13768 emit_insn (gen_movsi (reg, gen_rtx_MEM (SImode, in)));
13769 emit_insn (gen_movsi (gen_rtx_MEM (SImode, out), reg));
13770 len -= 4;
13771 offset += 4;
13774 if (len >= 2)
13776 rtx reg = gen_reg_rtx (HImode);
13777 emit_insn (gen_movhi (reg, gen_rtx_MEM (HImode,
13778 plus_constant (in, offset))));
13779 emit_insn (gen_movhi (gen_rtx_MEM (HImode, plus_constant (out, offset)),
13780 reg));
13781 len -= 2;
13782 offset += 2;
13785 if (len)
13787 rtx reg = gen_reg_rtx (QImode);
13788 emit_insn (gen_movqi (reg, gen_rtx_MEM (QImode,
13789 plus_constant (in, offset))));
13790 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (out, offset)),
13791 reg));
13795 void
13796 thumb_reload_out_hi (rtx *operands)
13798 emit_insn (gen_thumb_movhi_clobber (operands[0], operands[1], operands[2]));
13801 /* Handle reading a half-word from memory during reload. */
13802 void
13803 thumb_reload_in_hi (rtx *operands ATTRIBUTE_UNUSED)
13805 gcc_unreachable ();
13808 /* Return the length of a function name prefix
13809 that starts with the character 'c'. */
13810 static int
13811 arm_get_strip_length (int c)
13813 switch (c)
13815 ARM_NAME_ENCODING_LENGTHS
13816 default: return 0;
13820 /* Return a pointer to a function's name with any
13821 and all prefix encodings stripped from it. */
13822 const char *
13823 arm_strip_name_encoding (const char *name)
13825 int skip;
13827 while ((skip = arm_get_strip_length (* name)))
13828 name += skip;
13830 return name;
13833 /* If there is a '*' anywhere in the name's prefix, then
13834 emit the stripped name verbatim, otherwise prepend an
13835 underscore if leading underscores are being used. */
13836 void
13837 arm_asm_output_labelref (FILE *stream, const char *name)
13839 int skip;
13840 int verbatim = 0;
13842 while ((skip = arm_get_strip_length (* name)))
13844 verbatim |= (*name == '*');
13845 name += skip;
13848 if (verbatim)
13849 fputs (name, stream);
13850 else
13851 asm_fprintf (stream, "%U%s", name);
13854 static void
13855 arm_file_end (void)
13857 int regno;
13859 if (! thumb_call_reg_needed)
13860 return;
13862 text_section ();
13863 asm_fprintf (asm_out_file, "\t.code 16\n");
13864 ASM_OUTPUT_ALIGN (asm_out_file, 1);
13866 for (regno = 0; regno < LR_REGNUM; regno++)
13868 rtx label = thumb_call_via_label[regno];
13870 if (label != 0)
13872 targetm.asm_out.internal_label (asm_out_file, "L",
13873 CODE_LABEL_NUMBER (label));
13874 asm_fprintf (asm_out_file, "\tbx\t%r\n", regno);
13879 rtx aof_pic_label;
13881 #ifdef AOF_ASSEMBLER
13882 /* Special functions only needed when producing AOF syntax assembler. */
13884 struct pic_chain
13886 struct pic_chain * next;
13887 const char * symname;
13890 static struct pic_chain * aof_pic_chain = NULL;
13893 aof_pic_entry (rtx x)
13895 struct pic_chain ** chainp;
13896 int offset;
13898 if (aof_pic_label == NULL_RTX)
13900 aof_pic_label = gen_rtx_SYMBOL_REF (Pmode, "x$adcons");
13903 for (offset = 0, chainp = &aof_pic_chain; *chainp;
13904 offset += 4, chainp = &(*chainp)->next)
13905 if ((*chainp)->symname == XSTR (x, 0))
13906 return plus_constant (aof_pic_label, offset);
13908 *chainp = (struct pic_chain *) xmalloc (sizeof (struct pic_chain));
13909 (*chainp)->next = NULL;
13910 (*chainp)->symname = XSTR (x, 0);
13911 return plus_constant (aof_pic_label, offset);
13914 void
13915 aof_dump_pic_table (FILE *f)
13917 struct pic_chain * chain;
13919 if (aof_pic_chain == NULL)
13920 return;
13922 asm_fprintf (f, "\tAREA |%r$$adcons|, BASED %r\n",
13923 PIC_OFFSET_TABLE_REGNUM,
13924 PIC_OFFSET_TABLE_REGNUM);
13925 fputs ("|x$adcons|\n", f);
13927 for (chain = aof_pic_chain; chain; chain = chain->next)
13929 fputs ("\tDCD\t", f);
13930 assemble_name (f, chain->symname);
13931 fputs ("\n", f);
13935 int arm_text_section_count = 1;
13937 char *
13938 aof_text_section (void )
13940 static char buf[100];
13941 sprintf (buf, "\tAREA |C$$code%d|, CODE, READONLY",
13942 arm_text_section_count++);
13943 if (flag_pic)
13944 strcat (buf, ", PIC, REENTRANT");
13945 return buf;
13948 static int arm_data_section_count = 1;
13950 char *
13951 aof_data_section (void)
13953 static char buf[100];
13954 sprintf (buf, "\tAREA |C$$data%d|, DATA", arm_data_section_count++);
13955 return buf;
13958 /* The AOF assembler is religiously strict about declarations of
13959 imported and exported symbols, so that it is impossible to declare
13960 a function as imported near the beginning of the file, and then to
13961 export it later on. It is, however, possible to delay the decision
13962 until all the functions in the file have been compiled. To get
13963 around this, we maintain a list of the imports and exports, and
13964 delete from it any that are subsequently defined. At the end of
13965 compilation we spit the remainder of the list out before the END
13966 directive. */
13968 struct import
13970 struct import * next;
13971 const char * name;
13974 static struct import * imports_list = NULL;
13976 void
13977 aof_add_import (const char *name)
13979 struct import * new;
13981 for (new = imports_list; new; new = new->next)
13982 if (new->name == name)
13983 return;
13985 new = (struct import *) xmalloc (sizeof (struct import));
13986 new->next = imports_list;
13987 imports_list = new;
13988 new->name = name;
13991 void
13992 aof_delete_import (const char *name)
13994 struct import ** old;
13996 for (old = &imports_list; *old; old = & (*old)->next)
13998 if ((*old)->name == name)
14000 *old = (*old)->next;
14001 return;
14006 int arm_main_function = 0;
14008 static void
14009 aof_dump_imports (FILE *f)
14011 /* The AOF assembler needs this to cause the startup code to be extracted
14012 from the library. Brining in __main causes the whole thing to work
14013 automagically. */
14014 if (arm_main_function)
14016 text_section ();
14017 fputs ("\tIMPORT __main\n", f);
14018 fputs ("\tDCD __main\n", f);
14021 /* Now dump the remaining imports. */
14022 while (imports_list)
14024 fprintf (f, "\tIMPORT\t");
14025 assemble_name (f, imports_list->name);
14026 fputc ('\n', f);
14027 imports_list = imports_list->next;
14031 static void
14032 aof_globalize_label (FILE *stream, const char *name)
14034 default_globalize_label (stream, name);
14035 if (! strcmp (name, "main"))
14036 arm_main_function = 1;
14039 static void
14040 aof_file_start (void)
14042 fputs ("__r0\tRN\t0\n", asm_out_file);
14043 fputs ("__a1\tRN\t0\n", asm_out_file);
14044 fputs ("__a2\tRN\t1\n", asm_out_file);
14045 fputs ("__a3\tRN\t2\n", asm_out_file);
14046 fputs ("__a4\tRN\t3\n", asm_out_file);
14047 fputs ("__v1\tRN\t4\n", asm_out_file);
14048 fputs ("__v2\tRN\t5\n", asm_out_file);
14049 fputs ("__v3\tRN\t6\n", asm_out_file);
14050 fputs ("__v4\tRN\t7\n", asm_out_file);
14051 fputs ("__v5\tRN\t8\n", asm_out_file);
14052 fputs ("__v6\tRN\t9\n", asm_out_file);
14053 fputs ("__sl\tRN\t10\n", asm_out_file);
14054 fputs ("__fp\tRN\t11\n", asm_out_file);
14055 fputs ("__ip\tRN\t12\n", asm_out_file);
14056 fputs ("__sp\tRN\t13\n", asm_out_file);
14057 fputs ("__lr\tRN\t14\n", asm_out_file);
14058 fputs ("__pc\tRN\t15\n", asm_out_file);
14059 fputs ("__f0\tFN\t0\n", asm_out_file);
14060 fputs ("__f1\tFN\t1\n", asm_out_file);
14061 fputs ("__f2\tFN\t2\n", asm_out_file);
14062 fputs ("__f3\tFN\t3\n", asm_out_file);
14063 fputs ("__f4\tFN\t4\n", asm_out_file);
14064 fputs ("__f5\tFN\t5\n", asm_out_file);
14065 fputs ("__f6\tFN\t6\n", asm_out_file);
14066 fputs ("__f7\tFN\t7\n", asm_out_file);
14067 text_section ();
14070 static void
14071 aof_file_end (void)
14073 if (flag_pic)
14074 aof_dump_pic_table (asm_out_file);
14075 arm_file_end ();
14076 aof_dump_imports (asm_out_file);
14077 fputs ("\tEND\n", asm_out_file);
14079 #endif /* AOF_ASSEMBLER */
14081 #ifndef ARM_PE
14082 /* Symbols in the text segment can be accessed without indirecting via the
14083 constant pool; it may take an extra binary operation, but this is still
14084 faster than indirecting via memory. Don't do this when not optimizing,
14085 since we won't be calculating al of the offsets necessary to do this
14086 simplification. */
14088 static void
14089 arm_encode_section_info (tree decl, rtx rtl, int first)
14091 /* This doesn't work with AOF syntax, since the string table may be in
14092 a different AREA. */
14093 #ifndef AOF_ASSEMBLER
14094 if (optimize > 0 && TREE_CONSTANT (decl))
14095 SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
14096 #endif
14098 /* If we are referencing a function that is weak then encode a long call
14099 flag in the function name, otherwise if the function is static or
14100 or known to be defined in this file then encode a short call flag. */
14101 if (first && DECL_P (decl))
14103 if (TREE_CODE (decl) == FUNCTION_DECL && DECL_WEAK (decl))
14104 arm_encode_call_attribute (decl, LONG_CALL_FLAG_CHAR);
14105 else if (! TREE_PUBLIC (decl))
14106 arm_encode_call_attribute (decl, SHORT_CALL_FLAG_CHAR);
14109 #endif /* !ARM_PE */
14111 static void
14112 arm_internal_label (FILE *stream, const char *prefix, unsigned long labelno)
14114 if (arm_ccfsm_state == 3 && (unsigned) arm_target_label == labelno
14115 && !strcmp (prefix, "L"))
14117 arm_ccfsm_state = 0;
14118 arm_target_insn = NULL;
14120 default_internal_label (stream, prefix, labelno);
14123 /* Output code to add DELTA to the first argument, and then jump
14124 to FUNCTION. Used for C++ multiple inheritance. */
14125 static void
14126 arm_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
14127 HOST_WIDE_INT delta,
14128 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
14129 tree function)
14131 static int thunk_label = 0;
14132 char label[256];
14133 int mi_delta = delta;
14134 const char *const mi_op = mi_delta < 0 ? "sub" : "add";
14135 int shift = 0;
14136 int this_regno = (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function)
14137 ? 1 : 0);
14138 if (mi_delta < 0)
14139 mi_delta = - mi_delta;
14140 if (TARGET_THUMB)
14142 int labelno = thunk_label++;
14143 ASM_GENERATE_INTERNAL_LABEL (label, "LTHUMBFUNC", labelno);
14144 fputs ("\tldr\tr12, ", file);
14145 assemble_name (file, label);
14146 fputc ('\n', file);
14148 while (mi_delta != 0)
14150 if ((mi_delta & (3 << shift)) == 0)
14151 shift += 2;
14152 else
14154 asm_fprintf (file, "\t%s\t%r, %r, #%d\n",
14155 mi_op, this_regno, this_regno,
14156 mi_delta & (0xff << shift));
14157 mi_delta &= ~(0xff << shift);
14158 shift += 8;
14161 if (TARGET_THUMB)
14163 fprintf (file, "\tbx\tr12\n");
14164 ASM_OUTPUT_ALIGN (file, 2);
14165 assemble_name (file, label);
14166 fputs (":\n", file);
14167 assemble_integer (XEXP (DECL_RTL (function), 0), 4, BITS_PER_WORD, 1);
14169 else
14171 fputs ("\tb\t", file);
14172 assemble_name (file, XSTR (XEXP (DECL_RTL (function), 0), 0));
14173 if (NEED_PLT_RELOC)
14174 fputs ("(PLT)", file);
14175 fputc ('\n', file);
14180 arm_emit_vector_const (FILE *file, rtx x)
14182 int i;
14183 const char * pattern;
14185 gcc_assert (GET_CODE (x) == CONST_VECTOR);
14187 switch (GET_MODE (x))
14189 case V2SImode: pattern = "%08x"; break;
14190 case V4HImode: pattern = "%04x"; break;
14191 case V8QImode: pattern = "%02x"; break;
14192 default: gcc_unreachable ();
14195 fprintf (file, "0x");
14196 for (i = CONST_VECTOR_NUNITS (x); i--;)
14198 rtx element;
14200 element = CONST_VECTOR_ELT (x, i);
14201 fprintf (file, pattern, INTVAL (element));
14204 return 1;
14207 const char *
14208 arm_output_load_gr (rtx *operands)
14210 rtx reg;
14211 rtx offset;
14212 rtx wcgr;
14213 rtx sum;
14215 if (GET_CODE (operands [1]) != MEM
14216 || GET_CODE (sum = XEXP (operands [1], 0)) != PLUS
14217 || GET_CODE (reg = XEXP (sum, 0)) != REG
14218 || GET_CODE (offset = XEXP (sum, 1)) != CONST_INT
14219 || ((INTVAL (offset) < 1024) && (INTVAL (offset) > -1024)))
14220 return "wldrw%?\t%0, %1";
14222 /* Fix up an out-of-range load of a GR register. */
14223 output_asm_insn ("str%?\t%0, [sp, #-4]!\t@ Start of GR load expansion", & reg);
14224 wcgr = operands[0];
14225 operands[0] = reg;
14226 output_asm_insn ("ldr%?\t%0, %1", operands);
14228 operands[0] = wcgr;
14229 operands[1] = reg;
14230 output_asm_insn ("tmcr%?\t%0, %1", operands);
14231 output_asm_insn ("ldr%?\t%0, [sp], #4\t@ End of GR load expansion", & reg);
14233 return "";
14236 static rtx
14237 arm_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
14238 int incoming ATTRIBUTE_UNUSED)
14240 #if 0
14241 /* FIXME: The ARM backend has special code to handle structure
14242 returns, and will reserve its own hidden first argument. So
14243 if this macro is enabled a *second* hidden argument will be
14244 reserved, which will break binary compatibility with old
14245 toolchains and also thunk handling. One day this should be
14246 fixed. */
14247 return 0;
14248 #else
14249 /* Register in which address to store a structure value
14250 is passed to a function. */
14251 return gen_rtx_REG (Pmode, ARG_REGISTER (1));
14252 #endif
14255 /* Worker function for TARGET_SETUP_INCOMING_VARARGS.
14257 On the ARM, PRETEND_SIZE is set in order to have the prologue push the last
14258 named arg and all anonymous args onto the stack.
14259 XXX I know the prologue shouldn't be pushing registers, but it is faster
14260 that way. */
14262 static void
14263 arm_setup_incoming_varargs (CUMULATIVE_ARGS *cum,
14264 enum machine_mode mode ATTRIBUTE_UNUSED,
14265 tree type ATTRIBUTE_UNUSED,
14266 int *pretend_size,
14267 int second_time ATTRIBUTE_UNUSED)
14269 cfun->machine->uses_anonymous_args = 1;
14270 if (cum->nregs < NUM_ARG_REGS)
14271 *pretend_size = (NUM_ARG_REGS - cum->nregs) * UNITS_PER_WORD;
14274 /* Return nonzero if the CONSUMER instruction (a store) does not need
14275 PRODUCER's value to calculate the address. */
14278 arm_no_early_store_addr_dep (rtx producer, rtx consumer)
14280 rtx value = PATTERN (producer);
14281 rtx addr = PATTERN (consumer);
14283 if (GET_CODE (value) == COND_EXEC)
14284 value = COND_EXEC_CODE (value);
14285 if (GET_CODE (value) == PARALLEL)
14286 value = XVECEXP (value, 0, 0);
14287 value = XEXP (value, 0);
14288 if (GET_CODE (addr) == COND_EXEC)
14289 addr = COND_EXEC_CODE (addr);
14290 if (GET_CODE (addr) == PARALLEL)
14291 addr = XVECEXP (addr, 0, 0);
14292 addr = XEXP (addr, 0);
14294 return !reg_overlap_mentioned_p (value, addr);
14297 /* Return nonzero if the CONSUMER instruction (an ALU op) does not
14298 have an early register shift value or amount dependency on the
14299 result of PRODUCER. */
14302 arm_no_early_alu_shift_dep (rtx producer, rtx consumer)
14304 rtx value = PATTERN (producer);
14305 rtx op = PATTERN (consumer);
14306 rtx early_op;
14308 if (GET_CODE (value) == COND_EXEC)
14309 value = COND_EXEC_CODE (value);
14310 if (GET_CODE (value) == PARALLEL)
14311 value = XVECEXP (value, 0, 0);
14312 value = XEXP (value, 0);
14313 if (GET_CODE (op) == COND_EXEC)
14314 op = COND_EXEC_CODE (op);
14315 if (GET_CODE (op) == PARALLEL)
14316 op = XVECEXP (op, 0, 0);
14317 op = XEXP (op, 1);
14319 early_op = XEXP (op, 0);
14320 /* This is either an actual independent shift, or a shift applied to
14321 the first operand of another operation. We want the whole shift
14322 operation. */
14323 if (GET_CODE (early_op) == REG)
14324 early_op = op;
14326 return !reg_overlap_mentioned_p (value, early_op);
14329 /* Return nonzero if the CONSUMER instruction (an ALU op) does not
14330 have an early register shift value dependency on the result of
14331 PRODUCER. */
14334 arm_no_early_alu_shift_value_dep (rtx producer, rtx consumer)
14336 rtx value = PATTERN (producer);
14337 rtx op = PATTERN (consumer);
14338 rtx early_op;
14340 if (GET_CODE (value) == COND_EXEC)
14341 value = COND_EXEC_CODE (value);
14342 if (GET_CODE (value) == PARALLEL)
14343 value = XVECEXP (value, 0, 0);
14344 value = XEXP (value, 0);
14345 if (GET_CODE (op) == COND_EXEC)
14346 op = COND_EXEC_CODE (op);
14347 if (GET_CODE (op) == PARALLEL)
14348 op = XVECEXP (op, 0, 0);
14349 op = XEXP (op, 1);
14351 early_op = XEXP (op, 0);
14353 /* This is either an actual independent shift, or a shift applied to
14354 the first operand of another operation. We want the value being
14355 shifted, in either case. */
14356 if (GET_CODE (early_op) != REG)
14357 early_op = XEXP (early_op, 0);
14359 return !reg_overlap_mentioned_p (value, early_op);
14362 /* Return nonzero if the CONSUMER (a mul or mac op) does not
14363 have an early register mult dependency on the result of
14364 PRODUCER. */
14367 arm_no_early_mul_dep (rtx producer, rtx consumer)
14369 rtx value = PATTERN (producer);
14370 rtx op = PATTERN (consumer);
14372 if (GET_CODE (value) == COND_EXEC)
14373 value = COND_EXEC_CODE (value);
14374 if (GET_CODE (value) == PARALLEL)
14375 value = XVECEXP (value, 0, 0);
14376 value = XEXP (value, 0);
14377 if (GET_CODE (op) == COND_EXEC)
14378 op = COND_EXEC_CODE (op);
14379 if (GET_CODE (op) == PARALLEL)
14380 op = XVECEXP (op, 0, 0);
14381 op = XEXP (op, 1);
14383 return (GET_CODE (op) == PLUS
14384 && !reg_overlap_mentioned_p (value, XEXP (op, 0)));
14388 /* We can't rely on the caller doing the proper promotion when
14389 using APCS or ATPCS. */
14391 static bool
14392 arm_promote_prototypes (tree t ATTRIBUTE_UNUSED)
14394 return !TARGET_AAPCS_BASED;
14398 /* AAPCS based ABIs use short enums by default. */
14400 static bool
14401 arm_default_short_enums (void)
14403 return TARGET_AAPCS_BASED;
14407 /* AAPCS requires that anonymous bitfields affect structure alignment. */
14409 static bool
14410 arm_align_anon_bitfield (void)
14412 return TARGET_AAPCS_BASED;
14416 /* The generic C++ ABI says 64-bit (long long). The EABI says 32-bit. */
14418 static tree
14419 arm_cxx_guard_type (void)
14421 return TARGET_AAPCS_BASED ? integer_type_node : long_long_integer_type_node;
14425 /* The EABI says test the least significan bit of a guard variable. */
14427 static bool
14428 arm_cxx_guard_mask_bit (void)
14430 return TARGET_AAPCS_BASED;
14434 /* The EABI specifies that all array cookies are 8 bytes long. */
14436 static tree
14437 arm_get_cookie_size (tree type)
14439 tree size;
14441 if (!TARGET_AAPCS_BASED)
14442 return default_cxx_get_cookie_size (type);
14444 size = build_int_cst (sizetype, 8);
14445 return size;
14449 /* The EABI says that array cookies should also contain the element size. */
14451 static bool
14452 arm_cookie_has_size (void)
14454 return TARGET_AAPCS_BASED;
14458 /* The EABI says constructors and destructors should return a pointer to
14459 the object constructed/destroyed. */
14461 static bool
14462 arm_cxx_cdtor_returns_this (void)
14464 return TARGET_AAPCS_BASED;
14467 /* The EABI says that an inline function may never be the key
14468 method. */
14470 static bool
14471 arm_cxx_key_method_may_be_inline (void)
14473 return !TARGET_AAPCS_BASED;
14476 static void
14477 arm_cxx_determine_class_data_visibility (tree decl)
14479 if (!TARGET_AAPCS_BASED)
14480 return;
14482 /* In general, \S 3.2.5.5 of the ARM EABI requires that class data
14483 is exported. However, on systems without dynamic vague linkage,
14484 \S 3.2.5.6 says that COMDAT class data has hidden linkage. */
14485 if (!TARGET_ARM_DYNAMIC_VAGUE_LINKAGE_P && DECL_COMDAT (decl))
14486 DECL_VISIBILITY (decl) = VISIBILITY_HIDDEN;
14487 else
14488 DECL_VISIBILITY (decl) = VISIBILITY_DEFAULT;
14489 DECL_VISIBILITY_SPECIFIED (decl) = 1;
14492 static bool
14493 arm_cxx_class_data_always_comdat (void)
14495 /* \S 3.2.5.4 of the ARM C++ ABI says that class data only have
14496 vague linkage if the class has no key function. */
14497 return !TARGET_AAPCS_BASED;
14501 /* The EABI says __aeabi_atexit should be used to register static
14502 destructors. */
14504 static bool
14505 arm_cxx_use_aeabi_atexit (void)
14507 return TARGET_AAPCS_BASED;
14511 void
14512 arm_set_return_address (rtx source, rtx scratch)
14514 arm_stack_offsets *offsets;
14515 HOST_WIDE_INT delta;
14516 rtx addr;
14517 unsigned long saved_regs;
14519 saved_regs = arm_compute_save_reg_mask ();
14521 if ((saved_regs & (1 << LR_REGNUM)) == 0)
14522 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNUM), source);
14523 else
14525 if (frame_pointer_needed)
14526 addr = plus_constant(hard_frame_pointer_rtx, -4);
14527 else
14529 /* LR will be the first saved register. */
14530 offsets = arm_get_frame_offsets ();
14531 delta = offsets->outgoing_args - (offsets->frame + 4);
14534 if (delta >= 4096)
14536 emit_insn (gen_addsi3 (scratch, stack_pointer_rtx,
14537 GEN_INT (delta & ~4095)));
14538 addr = scratch;
14539 delta &= 4095;
14541 else
14542 addr = stack_pointer_rtx;
14544 addr = plus_constant (addr, delta);
14546 emit_move_insn (gen_rtx_MEM (Pmode, addr), source);
14551 void
14552 thumb_set_return_address (rtx source, rtx scratch)
14554 arm_stack_offsets *offsets;
14555 HOST_WIDE_INT delta;
14556 int reg;
14557 rtx addr;
14558 unsigned long mask;
14560 emit_insn (gen_rtx_USE (VOIDmode, source));
14562 mask = thumb_compute_save_reg_mask ();
14563 if (mask & (1 << LR_REGNUM))
14565 offsets = arm_get_frame_offsets ();
14567 /* Find the saved regs. */
14568 if (frame_pointer_needed)
14570 delta = offsets->soft_frame - offsets->saved_args;
14571 reg = THUMB_HARD_FRAME_POINTER_REGNUM;
14573 else
14575 delta = offsets->outgoing_args - offsets->saved_args;
14576 reg = SP_REGNUM;
14578 /* Allow for the stack frame. */
14579 if (TARGET_BACKTRACE)
14580 delta -= 16;
14581 /* The link register is always the first saved register. */
14582 delta -= 4;
14584 /* Construct the address. */
14585 addr = gen_rtx_REG (SImode, reg);
14586 if ((reg != SP_REGNUM && delta >= 128)
14587 || delta >= 1024)
14589 emit_insn (gen_movsi (scratch, GEN_INT (delta)));
14590 emit_insn (gen_addsi3 (scratch, scratch, stack_pointer_rtx));
14591 addr = scratch;
14593 else
14594 addr = plus_constant (addr, delta);
14596 emit_move_insn (gen_rtx_MEM (Pmode, addr), source);
14598 else
14599 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNUM), source);
14602 /* Implements target hook vector_mode_supported_p. */
14603 bool
14604 arm_vector_mode_supported_p (enum machine_mode mode)
14606 if ((mode == V2SImode)
14607 || (mode == V4HImode)
14608 || (mode == V8QImode))
14609 return true;
14611 return false;
14614 /* Implement TARGET_SHIFT_TRUNCATION_MASK. SImode shifts use normal
14615 ARM insns and therefore guarantee that the shift count is modulo 256.
14616 DImode shifts (those implemented by lib1funcs.asm or by optabs.c)
14617 guarantee no particular behavior for out-of-range counts. */
14619 static unsigned HOST_WIDE_INT
14620 arm_shift_truncation_mask (enum machine_mode mode)
14622 return mode == SImode ? 255 : 0;
14626 /* Map internal gcc register numbers to DWARF2 register numbers. */
14628 unsigned int
14629 arm_dbx_register_number (unsigned int regno)
14631 if (regno < 16)
14632 return regno;
14634 /* TODO: Legacy targets output FPA regs as registers 16-23 for backwards
14635 compatibility. The EABI defines them as registers 96-103. */
14636 if (IS_FPA_REGNUM (regno))
14637 return (TARGET_AAPCS_BASED ? 96 : 16) + regno - FIRST_FPA_REGNUM;
14639 if (IS_VFP_REGNUM (regno))
14640 return 64 + regno - FIRST_VFP_REGNUM;
14642 if (IS_IWMMXT_GR_REGNUM (regno))
14643 return 104 + regno - FIRST_IWMMXT_GR_REGNUM;
14645 if (IS_IWMMXT_REGNUM (regno))
14646 return 112 + regno - FIRST_IWMMXT_REGNUM;
14648 gcc_unreachable ();