* diagnostic.c (warning): Accept parameter to classify warning option.
[official-gcc.git] / gcc / config / arm / arm.c
blob7349b1e2dc2cc82b575fc98b3db1f5d1402ed3a6
1 /* Output routines for GCC for ARM.
2 Copyright (C) 1991, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
4 Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
5 and Martin Simmons (@harleqn.co.uk).
6 More major hacks by Richard Earnshaw (rearnsha@arm.com).
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it
11 under the terms of the GNU General Public License as published
12 by the Free Software Foundation; either version 2, or (at your
13 option) any later version.
15 GCC is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
17 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
18 License for more details.
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING. If not, write to
22 the Free Software Foundation, 59 Temple Place - Suite 330,
23 Boston, MA 02111-1307, USA. */
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "tm.h"
29 #include "rtl.h"
30 #include "tree.h"
31 #include "obstack.h"
32 #include "regs.h"
33 #include "hard-reg-set.h"
34 #include "real.h"
35 #include "insn-config.h"
36 #include "conditions.h"
37 #include "output.h"
38 #include "insn-attr.h"
39 #include "flags.h"
40 #include "reload.h"
41 #include "function.h"
42 #include "expr.h"
43 #include "optabs.h"
44 #include "toplev.h"
45 #include "recog.h"
46 #include "ggc.h"
47 #include "except.h"
48 #include "c-pragma.h"
49 #include "integrate.h"
50 #include "tm_p.h"
51 #include "target.h"
52 #include "target-def.h"
53 #include "debug.h"
54 #include "langhooks.h"
56 /* Forward definitions of types. */
57 typedef struct minipool_node Mnode;
58 typedef struct minipool_fixup Mfix;
60 const struct attribute_spec arm_attribute_table[];
62 /* Forward function declarations. */
63 static arm_stack_offsets *arm_get_frame_offsets (void);
64 static void arm_add_gc_roots (void);
65 static int arm_gen_constant (enum rtx_code, enum machine_mode, rtx,
66 HOST_WIDE_INT, rtx, rtx, int, int);
67 static unsigned bit_count (unsigned long);
68 static int arm_address_register_rtx_p (rtx, int);
69 static int arm_legitimate_index_p (enum machine_mode, rtx, RTX_CODE, int);
70 static int thumb_base_register_rtx_p (rtx, enum machine_mode, int);
71 inline static int thumb_index_register_rtx_p (rtx, int);
72 static int thumb_far_jump_used_p (void);
73 static bool thumb_force_lr_save (void);
74 static int const_ok_for_op (HOST_WIDE_INT, enum rtx_code);
75 static rtx emit_sfm (int, int);
76 #ifndef AOF_ASSEMBLER
77 static bool arm_assemble_integer (rtx, unsigned int, int);
78 #endif
79 static const char *fp_const_from_val (REAL_VALUE_TYPE *);
80 static arm_cc get_arm_condition_code (rtx);
81 static HOST_WIDE_INT int_log2 (HOST_WIDE_INT);
82 static rtx is_jump_table (rtx);
83 static const char *output_multi_immediate (rtx *, const char *, const char *,
84 int, HOST_WIDE_INT);
85 static const char *shift_op (rtx, HOST_WIDE_INT *);
86 static struct machine_function *arm_init_machine_status (void);
87 static void thumb_exit (FILE *, int);
88 static rtx is_jump_table (rtx);
89 static HOST_WIDE_INT get_jump_table_size (rtx);
90 static Mnode *move_minipool_fix_forward_ref (Mnode *, Mnode *, HOST_WIDE_INT);
91 static Mnode *add_minipool_forward_ref (Mfix *);
92 static Mnode *move_minipool_fix_backward_ref (Mnode *, Mnode *, HOST_WIDE_INT);
93 static Mnode *add_minipool_backward_ref (Mfix *);
94 static void assign_minipool_offsets (Mfix *);
95 static void arm_print_value (FILE *, rtx);
96 static void dump_minipool (rtx);
97 static int arm_barrier_cost (rtx);
98 static Mfix *create_fix_barrier (Mfix *, HOST_WIDE_INT);
99 static void push_minipool_barrier (rtx, HOST_WIDE_INT);
100 static void push_minipool_fix (rtx, HOST_WIDE_INT, rtx *, enum machine_mode,
101 rtx);
102 static void arm_reorg (void);
103 static bool note_invalid_constants (rtx, HOST_WIDE_INT, int);
104 static int current_file_function_operand (rtx);
105 static unsigned long arm_compute_save_reg0_reg12_mask (void);
106 static unsigned long arm_compute_save_reg_mask (void);
107 static unsigned long arm_isr_value (tree);
108 static unsigned long arm_compute_func_type (void);
109 static tree arm_handle_fndecl_attribute (tree *, tree, tree, int, bool *);
110 static tree arm_handle_isr_attribute (tree *, tree, tree, int, bool *);
111 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
112 static tree arm_handle_notshared_attribute (tree *, tree, tree, int, bool *);
113 #endif
114 static void arm_output_function_epilogue (FILE *, HOST_WIDE_INT);
115 static void arm_output_function_prologue (FILE *, HOST_WIDE_INT);
116 static void thumb_output_function_prologue (FILE *, HOST_WIDE_INT);
117 static int arm_comp_type_attributes (tree, tree);
118 static void arm_set_default_type_attributes (tree);
119 static int arm_adjust_cost (rtx, rtx, rtx, int);
120 static int count_insns_for_constant (HOST_WIDE_INT, int);
121 static int arm_get_strip_length (int);
122 static bool arm_function_ok_for_sibcall (tree, tree);
123 static void arm_internal_label (FILE *, const char *, unsigned long);
124 static void arm_output_mi_thunk (FILE *, tree, HOST_WIDE_INT, HOST_WIDE_INT,
125 tree);
126 static int arm_rtx_costs_1 (rtx, enum rtx_code, enum rtx_code);
127 static bool arm_size_rtx_costs (rtx, int, int, int *);
128 static bool arm_slowmul_rtx_costs (rtx, int, int, int *);
129 static bool arm_fastmul_rtx_costs (rtx, int, int, int *);
130 static bool arm_xscale_rtx_costs (rtx, int, int, int *);
131 static bool arm_9e_rtx_costs (rtx, int, int, int *);
132 static int arm_address_cost (rtx);
133 static bool arm_memory_load_p (rtx);
134 static bool arm_cirrus_insn_p (rtx);
135 static void cirrus_reorg (rtx);
136 static void arm_init_builtins (void);
137 static rtx arm_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
138 static void arm_init_iwmmxt_builtins (void);
139 static rtx safe_vector_operand (rtx, enum machine_mode);
140 static rtx arm_expand_binop_builtin (enum insn_code, tree, rtx);
141 static rtx arm_expand_unop_builtin (enum insn_code, tree, rtx, int);
142 static rtx arm_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
143 static void emit_constant_insn (rtx cond, rtx pattern);
144 static int arm_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode,
145 tree, bool);
147 #ifndef ARM_PE
148 static void arm_encode_section_info (tree, rtx, int);
149 #endif
151 static void arm_file_end (void);
153 #ifdef AOF_ASSEMBLER
154 static void aof_globalize_label (FILE *, const char *);
155 static void aof_dump_imports (FILE *);
156 static void aof_dump_pic_table (FILE *);
157 static void aof_file_start (void);
158 static void aof_file_end (void);
159 #endif
160 static rtx arm_struct_value_rtx (tree, int);
161 static void arm_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
162 tree, int *, int);
163 static bool arm_pass_by_reference (CUMULATIVE_ARGS *,
164 enum machine_mode, tree, bool);
165 static bool arm_promote_prototypes (tree);
166 static bool arm_default_short_enums (void);
167 static bool arm_align_anon_bitfield (void);
169 static tree arm_cxx_guard_type (void);
170 static bool arm_cxx_guard_mask_bit (void);
171 static tree arm_get_cookie_size (tree);
172 static bool arm_cookie_has_size (void);
173 static bool arm_cxx_cdtor_returns_this (void);
174 static bool arm_cxx_key_method_may_be_inline (void);
175 static void arm_cxx_determine_class_data_visibility (tree);
176 static bool arm_cxx_class_data_always_comdat (void);
177 static void arm_init_libfuncs (void);
178 static unsigned HOST_WIDE_INT arm_shift_truncation_mask (enum machine_mode);
180 /* Initialize the GCC target structure. */
181 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
182 #undef TARGET_MERGE_DECL_ATTRIBUTES
183 #define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
184 #endif
186 #undef TARGET_ATTRIBUTE_TABLE
187 #define TARGET_ATTRIBUTE_TABLE arm_attribute_table
189 #undef TARGET_ASM_FILE_END
190 #define TARGET_ASM_FILE_END arm_file_end
192 #ifdef AOF_ASSEMBLER
193 #undef TARGET_ASM_BYTE_OP
194 #define TARGET_ASM_BYTE_OP "\tDCB\t"
195 #undef TARGET_ASM_ALIGNED_HI_OP
196 #define TARGET_ASM_ALIGNED_HI_OP "\tDCW\t"
197 #undef TARGET_ASM_ALIGNED_SI_OP
198 #define TARGET_ASM_ALIGNED_SI_OP "\tDCD\t"
199 #undef TARGET_ASM_GLOBALIZE_LABEL
200 #define TARGET_ASM_GLOBALIZE_LABEL aof_globalize_label
201 #undef TARGET_ASM_FILE_START
202 #define TARGET_ASM_FILE_START aof_file_start
203 #undef TARGET_ASM_FILE_END
204 #define TARGET_ASM_FILE_END aof_file_end
205 #else
206 #undef TARGET_ASM_ALIGNED_SI_OP
207 #define TARGET_ASM_ALIGNED_SI_OP NULL
208 #undef TARGET_ASM_INTEGER
209 #define TARGET_ASM_INTEGER arm_assemble_integer
210 #endif
212 #undef TARGET_ASM_FUNCTION_PROLOGUE
213 #define TARGET_ASM_FUNCTION_PROLOGUE arm_output_function_prologue
215 #undef TARGET_ASM_FUNCTION_EPILOGUE
216 #define TARGET_ASM_FUNCTION_EPILOGUE arm_output_function_epilogue
218 #undef TARGET_COMP_TYPE_ATTRIBUTES
219 #define TARGET_COMP_TYPE_ATTRIBUTES arm_comp_type_attributes
221 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
222 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES arm_set_default_type_attributes
224 #undef TARGET_SCHED_ADJUST_COST
225 #define TARGET_SCHED_ADJUST_COST arm_adjust_cost
227 #undef TARGET_ENCODE_SECTION_INFO
228 #ifdef ARM_PE
229 #define TARGET_ENCODE_SECTION_INFO arm_pe_encode_section_info
230 #else
231 #define TARGET_ENCODE_SECTION_INFO arm_encode_section_info
232 #endif
234 #undef TARGET_STRIP_NAME_ENCODING
235 #define TARGET_STRIP_NAME_ENCODING arm_strip_name_encoding
237 #undef TARGET_ASM_INTERNAL_LABEL
238 #define TARGET_ASM_INTERNAL_LABEL arm_internal_label
240 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
241 #define TARGET_FUNCTION_OK_FOR_SIBCALL arm_function_ok_for_sibcall
243 #undef TARGET_ASM_OUTPUT_MI_THUNK
244 #define TARGET_ASM_OUTPUT_MI_THUNK arm_output_mi_thunk
245 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
246 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
248 /* This will be overridden in arm_override_options. */
249 #undef TARGET_RTX_COSTS
250 #define TARGET_RTX_COSTS arm_slowmul_rtx_costs
251 #undef TARGET_ADDRESS_COST
252 #define TARGET_ADDRESS_COST arm_address_cost
254 #undef TARGET_SHIFT_TRUNCATION_MASK
255 #define TARGET_SHIFT_TRUNCATION_MASK arm_shift_truncation_mask
256 #undef TARGET_VECTOR_MODE_SUPPORTED_P
257 #define TARGET_VECTOR_MODE_SUPPORTED_P arm_vector_mode_supported_p
259 #undef TARGET_MACHINE_DEPENDENT_REORG
260 #define TARGET_MACHINE_DEPENDENT_REORG arm_reorg
262 #undef TARGET_INIT_BUILTINS
263 #define TARGET_INIT_BUILTINS arm_init_builtins
264 #undef TARGET_EXPAND_BUILTIN
265 #define TARGET_EXPAND_BUILTIN arm_expand_builtin
267 #undef TARGET_INIT_LIBFUNCS
268 #define TARGET_INIT_LIBFUNCS arm_init_libfuncs
270 #undef TARGET_PROMOTE_FUNCTION_ARGS
271 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
272 #undef TARGET_PROMOTE_FUNCTION_RETURN
273 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
274 #undef TARGET_PROMOTE_PROTOTYPES
275 #define TARGET_PROMOTE_PROTOTYPES arm_promote_prototypes
276 #undef TARGET_PASS_BY_REFERENCE
277 #define TARGET_PASS_BY_REFERENCE arm_pass_by_reference
278 #undef TARGET_ARG_PARTIAL_BYTES
279 #define TARGET_ARG_PARTIAL_BYTES arm_arg_partial_bytes
281 #undef TARGET_STRUCT_VALUE_RTX
282 #define TARGET_STRUCT_VALUE_RTX arm_struct_value_rtx
284 #undef TARGET_SETUP_INCOMING_VARARGS
285 #define TARGET_SETUP_INCOMING_VARARGS arm_setup_incoming_varargs
287 #undef TARGET_DEFAULT_SHORT_ENUMS
288 #define TARGET_DEFAULT_SHORT_ENUMS arm_default_short_enums
290 #undef TARGET_ALIGN_ANON_BITFIELD
291 #define TARGET_ALIGN_ANON_BITFIELD arm_align_anon_bitfield
293 #undef TARGET_CXX_GUARD_TYPE
294 #define TARGET_CXX_GUARD_TYPE arm_cxx_guard_type
296 #undef TARGET_CXX_GUARD_MASK_BIT
297 #define TARGET_CXX_GUARD_MASK_BIT arm_cxx_guard_mask_bit
299 #undef TARGET_CXX_GET_COOKIE_SIZE
300 #define TARGET_CXX_GET_COOKIE_SIZE arm_get_cookie_size
302 #undef TARGET_CXX_COOKIE_HAS_SIZE
303 #define TARGET_CXX_COOKIE_HAS_SIZE arm_cookie_has_size
305 #undef TARGET_CXX_CDTOR_RETURNS_THIS
306 #define TARGET_CXX_CDTOR_RETURNS_THIS arm_cxx_cdtor_returns_this
308 #undef TARGET_CXX_KEY_METHOD_MAY_BE_INLINE
309 #define TARGET_CXX_KEY_METHOD_MAY_BE_INLINE arm_cxx_key_method_may_be_inline
311 #undef TARGET_CXX_DETERMINE_CLASS_DATA_VISIBILITY
312 #define TARGET_CXX_DETERMINE_CLASS_DATA_VISIBILITY \
313 arm_cxx_determine_class_data_visibility
315 #undef TARGET_CXX_CLASS_DATA_ALWAYS_COMDAT
316 #define TARGET_CXX_CLASS_DATA_ALWAYS_COMDAT arm_cxx_class_data_always_comdat
318 struct gcc_target targetm = TARGET_INITIALIZER;
320 /* Obstack for minipool constant handling. */
321 static struct obstack minipool_obstack;
322 static char * minipool_startobj;
324 /* The maximum number of insns skipped which
325 will be conditionalised if possible. */
326 static int max_insns_skipped = 5;
328 extern FILE * asm_out_file;
330 /* True if we are currently building a constant table. */
331 int making_const_table;
333 /* Define the information needed to generate branch insns. This is
334 stored from the compare operation. */
335 rtx arm_compare_op0, arm_compare_op1;
337 /* The processor for which instructions should be scheduled. */
338 enum processor_type arm_tune = arm_none;
340 /* Which floating point model to use. */
341 enum arm_fp_model arm_fp_model;
343 /* Which floating point hardware is available. */
344 enum fputype arm_fpu_arch;
346 /* Which floating point hardware to schedule for. */
347 enum fputype arm_fpu_tune;
349 /* Whether to use floating point hardware. */
350 enum float_abi_type arm_float_abi;
352 /* Which ABI to use. */
353 enum arm_abi_type arm_abi;
355 /* Set by the -mfpu=... option. */
356 const char * target_fpu_name = NULL;
358 /* Set by the -mfpe=... option. */
359 const char * target_fpe_name = NULL;
361 /* Set by the -mfloat-abi=... option. */
362 const char * target_float_abi_name = NULL;
364 /* Set by the legacy -mhard-float and -msoft-float options. */
365 const char * target_float_switch = NULL;
367 /* Set by the -mabi=... option. */
368 const char * target_abi_name = NULL;
370 /* Used to parse -mstructure_size_boundary command line option. */
371 const char * structure_size_string = NULL;
372 int arm_structure_size_boundary = DEFAULT_STRUCTURE_SIZE_BOUNDARY;
374 /* Used for Thumb call_via trampolines. */
375 rtx thumb_call_via_label[14];
376 static int thumb_call_reg_needed;
378 /* Bit values used to identify processor capabilities. */
379 #define FL_CO_PROC (1 << 0) /* Has external co-processor bus */
380 #define FL_ARCH3M (1 << 1) /* Extended multiply */
381 #define FL_MODE26 (1 << 2) /* 26-bit mode support */
382 #define FL_MODE32 (1 << 3) /* 32-bit mode support */
383 #define FL_ARCH4 (1 << 4) /* Architecture rel 4 */
384 #define FL_ARCH5 (1 << 5) /* Architecture rel 5 */
385 #define FL_THUMB (1 << 6) /* Thumb aware */
386 #define FL_LDSCHED (1 << 7) /* Load scheduling necessary */
387 #define FL_STRONG (1 << 8) /* StrongARM */
388 #define FL_ARCH5E (1 << 9) /* DSP extensions to v5 */
389 #define FL_XSCALE (1 << 10) /* XScale */
390 #define FL_CIRRUS (1 << 11) /* Cirrus/DSP. */
391 #define FL_ARCH6 (1 << 12) /* Architecture rel 6. Adds
392 media instructions. */
393 #define FL_VFPV2 (1 << 13) /* Vector Floating Point V2. */
394 #define FL_WBUF (1 << 14) /* Schedule for write buffer ops.
395 Note: ARM6 & 7 derivatives only. */
397 #define FL_IWMMXT (1 << 29) /* XScale v2 or "Intel Wireless MMX technology". */
399 #define FL_FOR_ARCH2 0
400 #define FL_FOR_ARCH3 FL_MODE32
401 #define FL_FOR_ARCH3M (FL_FOR_ARCH3 | FL_ARCH3M)
402 #define FL_FOR_ARCH4 (FL_FOR_ARCH3M | FL_ARCH4)
403 #define FL_FOR_ARCH4T (FL_FOR_ARCH4 | FL_THUMB)
404 #define FL_FOR_ARCH5 (FL_FOR_ARCH4 | FL_ARCH5)
405 #define FL_FOR_ARCH5T (FL_FOR_ARCH5 | FL_THUMB)
406 #define FL_FOR_ARCH5E (FL_FOR_ARCH5 | FL_ARCH5E)
407 #define FL_FOR_ARCH5TE (FL_FOR_ARCH5E | FL_THUMB)
408 #define FL_FOR_ARCH5TEJ FL_FOR_ARCH5TE
409 #define FL_FOR_ARCH6 (FL_FOR_ARCH5TE | FL_ARCH6)
410 #define FL_FOR_ARCH6J FL_FOR_ARCH6
411 #define FL_FOR_ARCH6K FL_FOR_ARCH6
412 #define FL_FOR_ARCH6Z FL_FOR_ARCH6
413 #define FL_FOR_ARCH6ZK FL_FOR_ARCH6
415 /* The bits in this mask specify which
416 instructions we are allowed to generate. */
417 static unsigned long insn_flags = 0;
419 /* The bits in this mask specify which instruction scheduling options should
420 be used. */
421 static unsigned long tune_flags = 0;
423 /* The following are used in the arm.md file as equivalents to bits
424 in the above two flag variables. */
426 /* Nonzero if this chip supports the ARM Architecture 3M extensions. */
427 int arm_arch3m = 0;
429 /* Nonzero if this chip supports the ARM Architecture 4 extensions. */
430 int arm_arch4 = 0;
432 /* Nonzero if this chip supports the ARM Architecture 4t extensions. */
433 int arm_arch4t = 0;
435 /* Nonzero if this chip supports the ARM Architecture 5 extensions. */
436 int arm_arch5 = 0;
438 /* Nonzero if this chip supports the ARM Architecture 5E extensions. */
439 int arm_arch5e = 0;
441 /* Nonzero if this chip supports the ARM Architecture 6 extensions. */
442 int arm_arch6 = 0;
444 /* Nonzero if this chip can benefit from load scheduling. */
445 int arm_ld_sched = 0;
447 /* Nonzero if this chip is a StrongARM. */
448 int arm_tune_strongarm = 0;
450 /* Nonzero if this chip is a Cirrus variant. */
451 int arm_arch_cirrus = 0;
453 /* Nonzero if this chip supports Intel Wireless MMX technology. */
454 int arm_arch_iwmmxt = 0;
456 /* Nonzero if this chip is an XScale. */
457 int arm_arch_xscale = 0;
459 /* Nonzero if tuning for XScale */
460 int arm_tune_xscale = 0;
462 /* Nonzero if we want to tune for stores that access the write-buffer.
463 This typically means an ARM6 or ARM7 with MMU or MPU. */
464 int arm_tune_wbuf = 0;
466 /* Nonzero if generating Thumb instructions. */
467 int thumb_code = 0;
469 /* Nonzero if we should define __THUMB_INTERWORK__ in the
470 preprocessor.
471 XXX This is a bit of a hack, it's intended to help work around
472 problems in GLD which doesn't understand that armv5t code is
473 interworking clean. */
474 int arm_cpp_interwork = 0;
476 /* In case of a PRE_INC, POST_INC, PRE_DEC, POST_DEC memory reference, we
477 must report the mode of the memory reference from PRINT_OPERAND to
478 PRINT_OPERAND_ADDRESS. */
479 enum machine_mode output_memory_reference_mode;
481 /* The register number to be used for the PIC offset register. */
482 const char * arm_pic_register_string = NULL;
483 int arm_pic_register = INVALID_REGNUM;
485 /* Set to 1 when a return insn is output, this means that the epilogue
486 is not needed. */
487 int return_used_this_function;
489 /* Set to 1 after arm_reorg has started. Reset to start at the start of
490 the next function. */
491 static int after_arm_reorg = 0;
493 /* The maximum number of insns to be used when loading a constant. */
494 static int arm_constant_limit = 3;
496 /* For an explanation of these variables, see final_prescan_insn below. */
497 int arm_ccfsm_state;
498 enum arm_cond_code arm_current_cc;
499 rtx arm_target_insn;
500 int arm_target_label;
502 /* The condition codes of the ARM, and the inverse function. */
503 static const char * const arm_condition_codes[] =
505 "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc",
506 "hi", "ls", "ge", "lt", "gt", "le", "al", "nv"
509 #define streq(string1, string2) (strcmp (string1, string2) == 0)
511 /* Initialization code. */
513 struct processors
515 const char *const name;
516 enum processor_type core;
517 const char *arch;
518 const unsigned long flags;
519 bool (* rtx_costs) (rtx, int, int, int *);
522 /* Not all of these give usefully different compilation alternatives,
523 but there is no simple way of generalizing them. */
524 static const struct processors all_cores[] =
526 /* ARM Cores */
527 #define ARM_CORE(NAME, IDENT, ARCH, FLAGS, COSTS) \
528 {NAME, arm_none, #ARCH, FLAGS | FL_FOR_ARCH##ARCH, arm_##COSTS##_rtx_costs},
529 #include "arm-cores.def"
530 #undef ARM_CORE
531 {NULL, arm_none, NULL, 0, NULL}
534 static const struct processors all_architectures[] =
536 /* ARM Architectures */
537 /* We don't specify rtx_costs here as it will be figured out
538 from the core. */
540 {"armv2", arm2, "2", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH2, NULL},
541 {"armv2a", arm2, "2", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH2, NULL},
542 {"armv3", arm6, "3", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH3, NULL},
543 {"armv3m", arm7m, "3M", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH3M, NULL},
544 {"armv4", arm7tdmi, "4", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH4, NULL},
545 /* Strictly, FL_MODE26 is a permitted option for v4t, but there are no
546 implementations that support it, so we will leave it out for now. */
547 {"armv4t", arm7tdmi, "4T", FL_CO_PROC | FL_FOR_ARCH4T, NULL},
548 {"armv5", arm10tdmi, "5", FL_CO_PROC | FL_FOR_ARCH5, NULL},
549 {"armv5t", arm10tdmi, "5T", FL_CO_PROC | FL_FOR_ARCH5T, NULL},
550 {"armv5e", arm1026ejs, "5E", FL_CO_PROC | FL_FOR_ARCH5E, NULL},
551 {"armv5te", arm1026ejs, "5TE", FL_CO_PROC | FL_FOR_ARCH5TE, NULL},
552 {"armv6", arm1136js, "6", FL_CO_PROC | FL_FOR_ARCH6, NULL},
553 {"armv6j", arm1136js, "6J", FL_CO_PROC | FL_FOR_ARCH6J, NULL},
554 {"armv6k", mpcore, "6K", FL_CO_PROC | FL_FOR_ARCH6K, NULL},
555 {"armv6z", arm1176jzs, "6Z", FL_CO_PROC | FL_FOR_ARCH6Z, NULL},
556 {"armv6zk", arm1176jzs, "6ZK", FL_CO_PROC | FL_FOR_ARCH6ZK, NULL},
557 {"ep9312", ep9312, "4T", FL_LDSCHED | FL_CIRRUS | FL_FOR_ARCH4, NULL},
558 {"iwmmxt", iwmmxt, "5TE", FL_LDSCHED | FL_STRONG | FL_FOR_ARCH5TE | FL_XSCALE | FL_IWMMXT , NULL},
559 {NULL, arm_none, NULL, 0 , NULL}
562 /* This is a magic structure. The 'string' field is magically filled in
563 with a pointer to the value specified by the user on the command line
564 assuming that the user has specified such a value. */
566 struct arm_cpu_select arm_select[] =
568 /* string name processors */
569 { NULL, "-mcpu=", all_cores },
570 { NULL, "-march=", all_architectures },
571 { NULL, "-mtune=", all_cores }
575 /* The name of the proprocessor macro to define for this architecture. */
577 char arm_arch_name[] = "__ARM_ARCH_0UNK__";
579 struct fpu_desc
581 const char * name;
582 enum fputype fpu;
586 /* Available values for for -mfpu=. */
588 static const struct fpu_desc all_fpus[] =
590 {"fpa", FPUTYPE_FPA},
591 {"fpe2", FPUTYPE_FPA_EMU2},
592 {"fpe3", FPUTYPE_FPA_EMU2},
593 {"maverick", FPUTYPE_MAVERICK},
594 {"vfp", FPUTYPE_VFP}
598 /* Floating point models used by the different hardware.
599 See fputype in arm.h. */
601 static const enum fputype fp_model_for_fpu[] =
603 /* No FP hardware. */
604 ARM_FP_MODEL_UNKNOWN, /* FPUTYPE_NONE */
605 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA */
606 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA_EMU2 */
607 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA_EMU3 */
608 ARM_FP_MODEL_MAVERICK, /* FPUTYPE_MAVERICK */
609 ARM_FP_MODEL_VFP /* FPUTYPE_VFP */
613 struct float_abi
615 const char * name;
616 enum float_abi_type abi_type;
620 /* Available values for -mfloat-abi=. */
622 static const struct float_abi all_float_abis[] =
624 {"soft", ARM_FLOAT_ABI_SOFT},
625 {"softfp", ARM_FLOAT_ABI_SOFTFP},
626 {"hard", ARM_FLOAT_ABI_HARD}
630 struct abi_name
632 const char *name;
633 enum arm_abi_type abi_type;
637 /* Available values for -mabi=. */
639 static const struct abi_name arm_all_abis[] =
641 {"apcs-gnu", ARM_ABI_APCS},
642 {"atpcs", ARM_ABI_ATPCS},
643 {"aapcs", ARM_ABI_AAPCS},
644 {"iwmmxt", ARM_ABI_IWMMXT}
647 /* Return the number of bits set in VALUE. */
648 static unsigned
649 bit_count (unsigned long value)
651 unsigned long count = 0;
653 while (value)
655 count++;
656 value &= value - 1; /* Clear the least-significant set bit. */
659 return count;
662 /* Set up library functions unique to ARM. */
664 static void
665 arm_init_libfuncs (void)
667 /* There are no special library functions unless we are using the
668 ARM BPABI. */
669 if (!TARGET_BPABI)
670 return;
672 /* The functions below are described in Section 4 of the "Run-Time
673 ABI for the ARM architecture", Version 1.0. */
675 /* Double-precision floating-point arithmetic. Table 2. */
676 set_optab_libfunc (add_optab, DFmode, "__aeabi_dadd");
677 set_optab_libfunc (sdiv_optab, DFmode, "__aeabi_ddiv");
678 set_optab_libfunc (smul_optab, DFmode, "__aeabi_dmul");
679 set_optab_libfunc (neg_optab, DFmode, "__aeabi_dneg");
680 set_optab_libfunc (sub_optab, DFmode, "__aeabi_dsub");
682 /* Double-precision comparisons. Table 3. */
683 set_optab_libfunc (eq_optab, DFmode, "__aeabi_dcmpeq");
684 set_optab_libfunc (ne_optab, DFmode, NULL);
685 set_optab_libfunc (lt_optab, DFmode, "__aeabi_dcmplt");
686 set_optab_libfunc (le_optab, DFmode, "__aeabi_dcmple");
687 set_optab_libfunc (ge_optab, DFmode, "__aeabi_dcmpge");
688 set_optab_libfunc (gt_optab, DFmode, "__aeabi_dcmpgt");
689 set_optab_libfunc (unord_optab, DFmode, "__aeabi_dcmpun");
691 /* Single-precision floating-point arithmetic. Table 4. */
692 set_optab_libfunc (add_optab, SFmode, "__aeabi_fadd");
693 set_optab_libfunc (sdiv_optab, SFmode, "__aeabi_fdiv");
694 set_optab_libfunc (smul_optab, SFmode, "__aeabi_fmul");
695 set_optab_libfunc (neg_optab, SFmode, "__aeabi_fneg");
696 set_optab_libfunc (sub_optab, SFmode, "__aeabi_fsub");
698 /* Single-precision comparisons. Table 5. */
699 set_optab_libfunc (eq_optab, SFmode, "__aeabi_fcmpeq");
700 set_optab_libfunc (ne_optab, SFmode, NULL);
701 set_optab_libfunc (lt_optab, SFmode, "__aeabi_fcmplt");
702 set_optab_libfunc (le_optab, SFmode, "__aeabi_fcmple");
703 set_optab_libfunc (ge_optab, SFmode, "__aeabi_fcmpge");
704 set_optab_libfunc (gt_optab, SFmode, "__aeabi_fcmpgt");
705 set_optab_libfunc (unord_optab, SFmode, "__aeabi_fcmpun");
707 /* Floating-point to integer conversions. Table 6. */
708 set_conv_libfunc (sfix_optab, SImode, DFmode, "__aeabi_d2iz");
709 set_conv_libfunc (ufix_optab, SImode, DFmode, "__aeabi_d2uiz");
710 set_conv_libfunc (sfix_optab, DImode, DFmode, "__aeabi_d2lz");
711 set_conv_libfunc (ufix_optab, DImode, DFmode, "__aeabi_d2ulz");
712 set_conv_libfunc (sfix_optab, SImode, SFmode, "__aeabi_f2iz");
713 set_conv_libfunc (ufix_optab, SImode, SFmode, "__aeabi_f2uiz");
714 set_conv_libfunc (sfix_optab, DImode, SFmode, "__aeabi_f2lz");
715 set_conv_libfunc (ufix_optab, DImode, SFmode, "__aeabi_f2ulz");
717 /* Conversions between floating types. Table 7. */
718 set_conv_libfunc (trunc_optab, SFmode, DFmode, "__aeabi_d2f");
719 set_conv_libfunc (sext_optab, DFmode, SFmode, "__aeabi_f2d");
721 /* Integer to floating-point conversions. Table 8. */
722 set_conv_libfunc (sfloat_optab, DFmode, SImode, "__aeabi_i2d");
723 set_conv_libfunc (ufloat_optab, DFmode, SImode, "__aeabi_ui2d");
724 set_conv_libfunc (sfloat_optab, DFmode, DImode, "__aeabi_l2d");
725 set_conv_libfunc (ufloat_optab, DFmode, DImode, "__aeabi_ul2d");
726 set_conv_libfunc (sfloat_optab, SFmode, SImode, "__aeabi_i2f");
727 set_conv_libfunc (ufloat_optab, SFmode, SImode, "__aeabi_ui2f");
728 set_conv_libfunc (sfloat_optab, SFmode, DImode, "__aeabi_l2f");
729 set_conv_libfunc (ufloat_optab, SFmode, DImode, "__aeabi_ul2f");
731 /* Long long. Table 9. */
732 set_optab_libfunc (smul_optab, DImode, "__aeabi_lmul");
733 set_optab_libfunc (sdivmod_optab, DImode, "__aeabi_ldivmod");
734 set_optab_libfunc (udivmod_optab, DImode, "__aeabi_uldivmod");
735 set_optab_libfunc (ashl_optab, DImode, "__aeabi_llsl");
736 set_optab_libfunc (lshr_optab, DImode, "__aeabi_llsr");
737 set_optab_libfunc (ashr_optab, DImode, "__aeabi_lasr");
738 set_optab_libfunc (cmp_optab, DImode, "__aeabi_lcmp");
739 set_optab_libfunc (ucmp_optab, DImode, "__aeabi_ulcmp");
741 /* Integer (32/32->32) division. \S 4.3.1. */
742 set_optab_libfunc (sdivmod_optab, SImode, "__aeabi_idivmod");
743 set_optab_libfunc (udivmod_optab, SImode, "__aeabi_uidivmod");
745 /* The divmod functions are designed so that they can be used for
746 plain division, even though they return both the quotient and the
747 remainder. The quotient is returned in the usual location (i.e.,
748 r0 for SImode, {r0, r1} for DImode), just as would be expected
749 for an ordinary division routine. Because the AAPCS calling
750 conventions specify that all of { r0, r1, r2, r3 } are
751 callee-saved registers, there is no need to tell the compiler
752 explicitly that those registers are clobbered by these
753 routines. */
754 set_optab_libfunc (sdiv_optab, DImode, "__aeabi_ldivmod");
755 set_optab_libfunc (udiv_optab, DImode, "__aeabi_uldivmod");
756 set_optab_libfunc (sdiv_optab, SImode, "__aeabi_idivmod");
757 set_optab_libfunc (udiv_optab, SImode, "__aeabi_uidivmod");
759 /* We don't have mod libcalls. Fortunately gcc knows how to use the
760 divmod libcalls instead. */
761 set_optab_libfunc (smod_optab, DImode, NULL);
762 set_optab_libfunc (umod_optab, DImode, NULL);
763 set_optab_libfunc (smod_optab, SImode, NULL);
764 set_optab_libfunc (umod_optab, SImode, NULL);
767 /* Fix up any incompatible options that the user has specified.
768 This has now turned into a maze. */
769 void
770 arm_override_options (void)
772 unsigned i;
774 /* Set up the flags based on the cpu/architecture selected by the user. */
775 for (i = ARRAY_SIZE (arm_select); i--;)
777 struct arm_cpu_select * ptr = arm_select + i;
779 if (ptr->string != NULL && ptr->string[0] != '\0')
781 const struct processors * sel;
783 for (sel = ptr->processors; sel->name != NULL; sel++)
784 if (streq (ptr->string, sel->name))
786 /* Set the architecture define. */
787 if (i != 2)
788 sprintf (arm_arch_name, "__ARM_ARCH_%s__", sel->arch);
790 /* Determine the processor core for which we should
791 tune code-generation. */
792 if (/* -mcpu= is a sensible default. */
793 i == 0
794 /* If -march= is used, and -mcpu= has not been used,
795 assume that we should tune for a representative
796 CPU from that architecture. */
797 || i == 1
798 /* -mtune= overrides -mcpu= and -march=. */
799 || i == 2)
800 arm_tune = (enum processor_type) (sel - ptr->processors);
802 if (i != 2)
804 /* If we have been given an architecture and a processor
805 make sure that they are compatible. We only generate
806 a warning though, and we prefer the CPU over the
807 architecture. */
808 if (insn_flags != 0 && (insn_flags ^ sel->flags))
809 warning (0, "switch -mcpu=%s conflicts with -march= switch",
810 ptr->string);
812 insn_flags = sel->flags;
815 break;
818 if (sel->name == NULL)
819 error ("bad value (%s) for %s switch", ptr->string, ptr->name);
823 /* If the user did not specify a processor, choose one for them. */
824 if (insn_flags == 0)
826 const struct processors * sel;
827 unsigned int sought;
828 enum processor_type cpu;
830 cpu = TARGET_CPU_DEFAULT;
831 if (cpu == arm_none)
833 #ifdef SUBTARGET_CPU_DEFAULT
834 /* Use the subtarget default CPU if none was specified by
835 configure. */
836 cpu = SUBTARGET_CPU_DEFAULT;
837 #endif
838 /* Default to ARM6. */
839 if (cpu == arm_none)
840 cpu = arm6;
842 sel = &all_cores[cpu];
844 insn_flags = sel->flags;
846 /* Now check to see if the user has specified some command line
847 switch that require certain abilities from the cpu. */
848 sought = 0;
850 if (TARGET_INTERWORK || TARGET_THUMB)
852 sought |= (FL_THUMB | FL_MODE32);
854 /* There are no ARM processors that support both APCS-26 and
855 interworking. Therefore we force FL_MODE26 to be removed
856 from insn_flags here (if it was set), so that the search
857 below will always be able to find a compatible processor. */
858 insn_flags &= ~FL_MODE26;
861 if (sought != 0 && ((sought & insn_flags) != sought))
863 /* Try to locate a CPU type that supports all of the abilities
864 of the default CPU, plus the extra abilities requested by
865 the user. */
866 for (sel = all_cores; sel->name != NULL; sel++)
867 if ((sel->flags & sought) == (sought | insn_flags))
868 break;
870 if (sel->name == NULL)
872 unsigned current_bit_count = 0;
873 const struct processors * best_fit = NULL;
875 /* Ideally we would like to issue an error message here
876 saying that it was not possible to find a CPU compatible
877 with the default CPU, but which also supports the command
878 line options specified by the programmer, and so they
879 ought to use the -mcpu=<name> command line option to
880 override the default CPU type.
882 If we cannot find a cpu that has both the
883 characteristics of the default cpu and the given
884 command line options we scan the array again looking
885 for a best match. */
886 for (sel = all_cores; sel->name != NULL; sel++)
887 if ((sel->flags & sought) == sought)
889 unsigned count;
891 count = bit_count (sel->flags & insn_flags);
893 if (count >= current_bit_count)
895 best_fit = sel;
896 current_bit_count = count;
900 if (best_fit == NULL)
901 abort ();
902 else
903 sel = best_fit;
906 insn_flags = sel->flags;
908 sprintf (arm_arch_name, "__ARM_ARCH_%s__", sel->arch);
909 if (arm_tune == arm_none)
910 arm_tune = (enum processor_type) (sel - all_cores);
913 /* The processor for which we should tune should now have been
914 chosen. */
915 if (arm_tune == arm_none)
916 abort ();
918 tune_flags = all_cores[(int)arm_tune].flags;
919 if (optimize_size)
920 targetm.rtx_costs = arm_size_rtx_costs;
921 else
922 targetm.rtx_costs = all_cores[(int)arm_tune].rtx_costs;
924 /* Make sure that the processor choice does not conflict with any of the
925 other command line choices. */
926 if (TARGET_INTERWORK && !(insn_flags & FL_THUMB))
928 warning (0, "target CPU does not support interworking" );
929 target_flags &= ~ARM_FLAG_INTERWORK;
932 if (TARGET_THUMB && !(insn_flags & FL_THUMB))
934 warning (0, "target CPU does not support THUMB instructions");
935 target_flags &= ~ARM_FLAG_THUMB;
938 if (TARGET_APCS_FRAME && TARGET_THUMB)
940 /* warning (0, "ignoring -mapcs-frame because -mthumb was used"); */
941 target_flags &= ~ARM_FLAG_APCS_FRAME;
944 /* TARGET_BACKTRACE calls leaf_function_p, which causes a crash if done
945 from here where no function is being compiled currently. */
946 if ((target_flags & (THUMB_FLAG_LEAF_BACKTRACE | THUMB_FLAG_BACKTRACE))
947 && TARGET_ARM)
948 warning (0, "enabling backtrace support is only meaningful when compiling for the Thumb");
950 if (TARGET_ARM && TARGET_CALLEE_INTERWORKING)
951 warning (0, "enabling callee interworking support is only meaningful when compiling for the Thumb");
953 if (TARGET_ARM && TARGET_CALLER_INTERWORKING)
954 warning (0, "enabling caller interworking support is only meaningful when compiling for the Thumb");
956 if (TARGET_APCS_STACK && !TARGET_APCS_FRAME)
958 warning (0, "-mapcs-stack-check incompatible with -mno-apcs-frame");
959 target_flags |= ARM_FLAG_APCS_FRAME;
962 if (TARGET_POKE_FUNCTION_NAME)
963 target_flags |= ARM_FLAG_APCS_FRAME;
965 if (TARGET_APCS_REENT && flag_pic)
966 error ("-fpic and -mapcs-reent are incompatible");
968 if (TARGET_APCS_REENT)
969 warning (0, "APCS reentrant code not supported. Ignored");
971 /* If this target is normally configured to use APCS frames, warn if they
972 are turned off and debugging is turned on. */
973 if (TARGET_ARM
974 && write_symbols != NO_DEBUG
975 && !TARGET_APCS_FRAME
976 && (TARGET_DEFAULT & ARM_FLAG_APCS_FRAME))
977 warning (0, "-g with -mno-apcs-frame may not give sensible debugging");
979 /* If stack checking is disabled, we can use r10 as the PIC register,
980 which keeps r9 available. */
981 if (flag_pic)
982 arm_pic_register = TARGET_APCS_STACK ? 9 : 10;
984 if (TARGET_APCS_FLOAT)
985 warning (0, "passing floating point arguments in fp regs not yet supported");
987 /* Initialize boolean versions of the flags, for use in the arm.md file. */
988 arm_arch3m = (insn_flags & FL_ARCH3M) != 0;
989 arm_arch4 = (insn_flags & FL_ARCH4) != 0;
990 arm_arch4t = arm_arch4 & ((insn_flags & FL_THUMB) != 0);
991 arm_arch5 = (insn_flags & FL_ARCH5) != 0;
992 arm_arch5e = (insn_flags & FL_ARCH5E) != 0;
993 arm_arch6 = (insn_flags & FL_ARCH6) != 0;
994 arm_arch_xscale = (insn_flags & FL_XSCALE) != 0;
995 arm_arch_cirrus = (insn_flags & FL_CIRRUS) != 0;
997 arm_ld_sched = (tune_flags & FL_LDSCHED) != 0;
998 arm_tune_strongarm = (tune_flags & FL_STRONG) != 0;
999 thumb_code = (TARGET_ARM == 0);
1000 arm_tune_wbuf = (tune_flags & FL_WBUF) != 0;
1001 arm_tune_xscale = (tune_flags & FL_XSCALE) != 0;
1002 arm_arch_iwmmxt = (insn_flags & FL_IWMMXT) != 0;
1004 /* V5 code we generate is completely interworking capable, so we turn off
1005 TARGET_INTERWORK here to avoid many tests later on. */
1007 /* XXX However, we must pass the right pre-processor defines to CPP
1008 or GLD can get confused. This is a hack. */
1009 if (TARGET_INTERWORK)
1010 arm_cpp_interwork = 1;
1012 if (arm_arch5)
1013 target_flags &= ~ARM_FLAG_INTERWORK;
1015 if (target_abi_name)
1017 for (i = 0; i < ARRAY_SIZE (arm_all_abis); i++)
1019 if (streq (arm_all_abis[i].name, target_abi_name))
1021 arm_abi = arm_all_abis[i].abi_type;
1022 break;
1025 if (i == ARRAY_SIZE (arm_all_abis))
1026 error ("invalid ABI option: -mabi=%s", target_abi_name);
1028 else
1029 arm_abi = ARM_DEFAULT_ABI;
1031 if (TARGET_IWMMXT && !ARM_DOUBLEWORD_ALIGN)
1032 error ("iwmmxt requires an AAPCS compatible ABI for proper operation");
1034 if (TARGET_IWMMXT_ABI && !TARGET_IWMMXT)
1035 error ("iwmmxt abi requires an iwmmxt capable cpu");
1037 arm_fp_model = ARM_FP_MODEL_UNKNOWN;
1038 if (target_fpu_name == NULL && target_fpe_name != NULL)
1040 if (streq (target_fpe_name, "2"))
1041 target_fpu_name = "fpe2";
1042 else if (streq (target_fpe_name, "3"))
1043 target_fpu_name = "fpe3";
1044 else
1045 error ("invalid floating point emulation option: -mfpe=%s",
1046 target_fpe_name);
1048 if (target_fpu_name != NULL)
1050 /* The user specified a FPU. */
1051 for (i = 0; i < ARRAY_SIZE (all_fpus); i++)
1053 if (streq (all_fpus[i].name, target_fpu_name))
1055 arm_fpu_arch = all_fpus[i].fpu;
1056 arm_fpu_tune = arm_fpu_arch;
1057 arm_fp_model = fp_model_for_fpu[arm_fpu_arch];
1058 break;
1061 if (arm_fp_model == ARM_FP_MODEL_UNKNOWN)
1062 error ("invalid floating point option: -mfpu=%s", target_fpu_name);
1064 else
1066 #ifdef FPUTYPE_DEFAULT
1067 /* Use the default if it is specified for this platform. */
1068 arm_fpu_arch = FPUTYPE_DEFAULT;
1069 arm_fpu_tune = FPUTYPE_DEFAULT;
1070 #else
1071 /* Pick one based on CPU type. */
1072 /* ??? Some targets assume FPA is the default.
1073 if ((insn_flags & FL_VFP) != 0)
1074 arm_fpu_arch = FPUTYPE_VFP;
1075 else
1077 if (arm_arch_cirrus)
1078 arm_fpu_arch = FPUTYPE_MAVERICK;
1079 else
1080 arm_fpu_arch = FPUTYPE_FPA_EMU2;
1081 #endif
1082 if (tune_flags & FL_CO_PROC && arm_fpu_arch == FPUTYPE_FPA_EMU2)
1083 arm_fpu_tune = FPUTYPE_FPA;
1084 else
1085 arm_fpu_tune = arm_fpu_arch;
1086 arm_fp_model = fp_model_for_fpu[arm_fpu_arch];
1087 if (arm_fp_model == ARM_FP_MODEL_UNKNOWN)
1088 abort ();
1091 if (target_float_abi_name != NULL)
1093 /* The user specified a FP ABI. */
1094 for (i = 0; i < ARRAY_SIZE (all_float_abis); i++)
1096 if (streq (all_float_abis[i].name, target_float_abi_name))
1098 arm_float_abi = all_float_abis[i].abi_type;
1099 break;
1102 if (i == ARRAY_SIZE (all_float_abis))
1103 error ("invalid floating point abi: -mfloat-abi=%s",
1104 target_float_abi_name);
1106 else if (target_float_switch)
1108 /* This is a bit of a hack to avoid needing target flags for these. */
1109 if (target_float_switch[0] == 'h')
1110 arm_float_abi = ARM_FLOAT_ABI_HARD;
1111 else
1112 arm_float_abi = ARM_FLOAT_ABI_SOFT;
1114 else
1115 arm_float_abi = TARGET_DEFAULT_FLOAT_ABI;
1117 if (arm_float_abi == ARM_FLOAT_ABI_HARD && TARGET_VFP)
1118 sorry ("-mfloat-abi=hard and VFP");
1120 /* If soft-float is specified then don't use FPU. */
1121 if (TARGET_SOFT_FLOAT)
1122 arm_fpu_arch = FPUTYPE_NONE;
1124 /* For arm2/3 there is no need to do any scheduling if there is only
1125 a floating point emulator, or we are doing software floating-point. */
1126 if ((TARGET_SOFT_FLOAT
1127 || arm_fpu_tune == FPUTYPE_FPA_EMU2
1128 || arm_fpu_tune == FPUTYPE_FPA_EMU3)
1129 && (tune_flags & FL_MODE32) == 0)
1130 flag_schedule_insns = flag_schedule_insns_after_reload = 0;
1132 /* Override the default structure alignment for AAPCS ABI. */
1133 if (arm_abi == ARM_ABI_AAPCS)
1134 arm_structure_size_boundary = 8;
1136 if (structure_size_string != NULL)
1138 int size = strtol (structure_size_string, NULL, 0);
1140 if (size == 8 || size == 32
1141 || (ARM_DOUBLEWORD_ALIGN && size == 64))
1142 arm_structure_size_boundary = size;
1143 else
1144 warning (0, "structure size boundary can only be set to %s",
1145 ARM_DOUBLEWORD_ALIGN ? "8, 32 or 64": "8 or 32");
1148 if (arm_pic_register_string != NULL)
1150 int pic_register = decode_reg_name (arm_pic_register_string);
1152 if (!flag_pic)
1153 warning (0, "-mpic-register= is useless without -fpic");
1155 /* Prevent the user from choosing an obviously stupid PIC register. */
1156 else if (pic_register < 0 || call_used_regs[pic_register]
1157 || pic_register == HARD_FRAME_POINTER_REGNUM
1158 || pic_register == STACK_POINTER_REGNUM
1159 || pic_register >= PC_REGNUM)
1160 error ("unable to use '%s' for PIC register", arm_pic_register_string);
1161 else
1162 arm_pic_register = pic_register;
1165 if (TARGET_THUMB && flag_schedule_insns)
1167 /* Don't warn since it's on by default in -O2. */
1168 flag_schedule_insns = 0;
1171 if (optimize_size)
1173 /* There's some dispute as to whether this should be 1 or 2. However,
1174 experiments seem to show that in pathological cases a setting of
1175 1 degrades less severely than a setting of 2. This could change if
1176 other parts of the compiler change their behavior. */
1177 arm_constant_limit = 1;
1179 /* If optimizing for size, bump the number of instructions that we
1180 are prepared to conditionally execute (even on a StrongARM). */
1181 max_insns_skipped = 6;
1183 else
1185 /* For processors with load scheduling, it never costs more than
1186 2 cycles to load a constant, and the load scheduler may well
1187 reduce that to 1. */
1188 if (arm_ld_sched)
1189 arm_constant_limit = 1;
1191 /* On XScale the longer latency of a load makes it more difficult
1192 to achieve a good schedule, so it's faster to synthesize
1193 constants that can be done in two insns. */
1194 if (arm_tune_xscale)
1195 arm_constant_limit = 2;
1197 /* StrongARM has early execution of branches, so a sequence
1198 that is worth skipping is shorter. */
1199 if (arm_tune_strongarm)
1200 max_insns_skipped = 3;
1203 /* Register global variables with the garbage collector. */
1204 arm_add_gc_roots ();
1207 static void
1208 arm_add_gc_roots (void)
1210 gcc_obstack_init(&minipool_obstack);
1211 minipool_startobj = (char *) obstack_alloc (&minipool_obstack, 0);
1214 /* A table of known ARM exception types.
1215 For use with the interrupt function attribute. */
1217 typedef struct
1219 const char *const arg;
1220 const unsigned long return_value;
1222 isr_attribute_arg;
1224 static const isr_attribute_arg isr_attribute_args [] =
1226 { "IRQ", ARM_FT_ISR },
1227 { "irq", ARM_FT_ISR },
1228 { "FIQ", ARM_FT_FIQ },
1229 { "fiq", ARM_FT_FIQ },
1230 { "ABORT", ARM_FT_ISR },
1231 { "abort", ARM_FT_ISR },
1232 { "ABORT", ARM_FT_ISR },
1233 { "abort", ARM_FT_ISR },
1234 { "UNDEF", ARM_FT_EXCEPTION },
1235 { "undef", ARM_FT_EXCEPTION },
1236 { "SWI", ARM_FT_EXCEPTION },
1237 { "swi", ARM_FT_EXCEPTION },
1238 { NULL, ARM_FT_NORMAL }
1241 /* Returns the (interrupt) function type of the current
1242 function, or ARM_FT_UNKNOWN if the type cannot be determined. */
1244 static unsigned long
1245 arm_isr_value (tree argument)
1247 const isr_attribute_arg * ptr;
1248 const char * arg;
1250 /* No argument - default to IRQ. */
1251 if (argument == NULL_TREE)
1252 return ARM_FT_ISR;
1254 /* Get the value of the argument. */
1255 if (TREE_VALUE (argument) == NULL_TREE
1256 || TREE_CODE (TREE_VALUE (argument)) != STRING_CST)
1257 return ARM_FT_UNKNOWN;
1259 arg = TREE_STRING_POINTER (TREE_VALUE (argument));
1261 /* Check it against the list of known arguments. */
1262 for (ptr = isr_attribute_args; ptr->arg != NULL; ptr++)
1263 if (streq (arg, ptr->arg))
1264 return ptr->return_value;
1266 /* An unrecognized interrupt type. */
1267 return ARM_FT_UNKNOWN;
1270 /* Computes the type of the current function. */
1272 static unsigned long
1273 arm_compute_func_type (void)
1275 unsigned long type = ARM_FT_UNKNOWN;
1276 tree a;
1277 tree attr;
1279 if (TREE_CODE (current_function_decl) != FUNCTION_DECL)
1280 abort ();
1282 /* Decide if the current function is volatile. Such functions
1283 never return, and many memory cycles can be saved by not storing
1284 register values that will never be needed again. This optimization
1285 was added to speed up context switching in a kernel application. */
1286 if (optimize > 0
1287 && TREE_NOTHROW (current_function_decl)
1288 && TREE_THIS_VOLATILE (current_function_decl))
1289 type |= ARM_FT_VOLATILE;
1291 if (cfun->static_chain_decl != NULL)
1292 type |= ARM_FT_NESTED;
1294 attr = DECL_ATTRIBUTES (current_function_decl);
1296 a = lookup_attribute ("naked", attr);
1297 if (a != NULL_TREE)
1298 type |= ARM_FT_NAKED;
1300 a = lookup_attribute ("isr", attr);
1301 if (a == NULL_TREE)
1302 a = lookup_attribute ("interrupt", attr);
1304 if (a == NULL_TREE)
1305 type |= TARGET_INTERWORK ? ARM_FT_INTERWORKED : ARM_FT_NORMAL;
1306 else
1307 type |= arm_isr_value (TREE_VALUE (a));
1309 return type;
1312 /* Returns the type of the current function. */
1314 unsigned long
1315 arm_current_func_type (void)
1317 if (ARM_FUNC_TYPE (cfun->machine->func_type) == ARM_FT_UNKNOWN)
1318 cfun->machine->func_type = arm_compute_func_type ();
1320 return cfun->machine->func_type;
1323 /* Return 1 if it is possible to return using a single instruction.
1324 If SIBLING is non-null, this is a test for a return before a sibling
1325 call. SIBLING is the call insn, so we can examine its register usage. */
1328 use_return_insn (int iscond, rtx sibling)
1330 int regno;
1331 unsigned int func_type;
1332 unsigned long saved_int_regs;
1333 unsigned HOST_WIDE_INT stack_adjust;
1334 arm_stack_offsets *offsets;
1336 /* Never use a return instruction before reload has run. */
1337 if (!reload_completed)
1338 return 0;
1340 func_type = arm_current_func_type ();
1342 /* Naked functions and volatile functions need special
1343 consideration. */
1344 if (func_type & (ARM_FT_VOLATILE | ARM_FT_NAKED))
1345 return 0;
1347 /* So do interrupt functions that use the frame pointer. */
1348 if (IS_INTERRUPT (func_type) && frame_pointer_needed)
1349 return 0;
1351 offsets = arm_get_frame_offsets ();
1352 stack_adjust = offsets->outgoing_args - offsets->saved_regs;
1354 /* As do variadic functions. */
1355 if (current_function_pretend_args_size
1356 || cfun->machine->uses_anonymous_args
1357 /* Or if the function calls __builtin_eh_return () */
1358 || current_function_calls_eh_return
1359 /* Or if the function calls alloca */
1360 || current_function_calls_alloca
1361 /* Or if there is a stack adjustment. However, if the stack pointer
1362 is saved on the stack, we can use a pre-incrementing stack load. */
1363 || !(stack_adjust == 0 || (frame_pointer_needed && stack_adjust == 4)))
1364 return 0;
1366 saved_int_regs = arm_compute_save_reg_mask ();
1368 /* Unfortunately, the insn
1370 ldmib sp, {..., sp, ...}
1372 triggers a bug on most SA-110 based devices, such that the stack
1373 pointer won't be correctly restored if the instruction takes a
1374 page fault. We work around this problem by popping r3 along with
1375 the other registers, since that is never slower than executing
1376 another instruction.
1378 We test for !arm_arch5 here, because code for any architecture
1379 less than this could potentially be run on one of the buggy
1380 chips. */
1381 if (stack_adjust == 4 && !arm_arch5)
1383 /* Validate that r3 is a call-clobbered register (always true in
1384 the default abi) ... */
1385 if (!call_used_regs[3])
1386 return 0;
1388 /* ... that it isn't being used for a return value (always true
1389 until we implement return-in-regs), or for a tail-call
1390 argument ... */
1391 if (sibling)
1393 if (GET_CODE (sibling) != CALL_INSN)
1394 abort ();
1396 if (find_regno_fusage (sibling, USE, 3))
1397 return 0;
1400 /* ... and that there are no call-saved registers in r0-r2
1401 (always true in the default ABI). */
1402 if (saved_int_regs & 0x7)
1403 return 0;
1406 /* Can't be done if interworking with Thumb, and any registers have been
1407 stacked. */
1408 if (TARGET_INTERWORK && saved_int_regs != 0)
1409 return 0;
1411 /* On StrongARM, conditional returns are expensive if they aren't
1412 taken and multiple registers have been stacked. */
1413 if (iscond && arm_tune_strongarm)
1415 /* Conditional return when just the LR is stored is a simple
1416 conditional-load instruction, that's not expensive. */
1417 if (saved_int_regs != 0 && saved_int_regs != (1 << LR_REGNUM))
1418 return 0;
1420 if (flag_pic && regs_ever_live[PIC_OFFSET_TABLE_REGNUM])
1421 return 0;
1424 /* If there are saved registers but the LR isn't saved, then we need
1425 two instructions for the return. */
1426 if (saved_int_regs && !(saved_int_regs & (1 << LR_REGNUM)))
1427 return 0;
1429 /* Can't be done if any of the FPA regs are pushed,
1430 since this also requires an insn. */
1431 if (TARGET_HARD_FLOAT && TARGET_FPA)
1432 for (regno = FIRST_FPA_REGNUM; regno <= LAST_FPA_REGNUM; regno++)
1433 if (regs_ever_live[regno] && !call_used_regs[regno])
1434 return 0;
1436 /* Likewise VFP regs. */
1437 if (TARGET_HARD_FLOAT && TARGET_VFP)
1438 for (regno = FIRST_VFP_REGNUM; regno <= LAST_VFP_REGNUM; regno++)
1439 if (regs_ever_live[regno] && !call_used_regs[regno])
1440 return 0;
1442 if (TARGET_REALLY_IWMMXT)
1443 for (regno = FIRST_IWMMXT_REGNUM; regno <= LAST_IWMMXT_REGNUM; regno++)
1444 if (regs_ever_live[regno] && ! call_used_regs [regno])
1445 return 0;
1447 return 1;
1450 /* Return TRUE if int I is a valid immediate ARM constant. */
1453 const_ok_for_arm (HOST_WIDE_INT i)
1455 unsigned HOST_WIDE_INT mask = ~(unsigned HOST_WIDE_INT)0xFF;
1457 /* For machines with >32 bit HOST_WIDE_INT, the bits above bit 31 must
1458 be all zero, or all one. */
1459 if ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff) != 0
1460 && ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff)
1461 != ((~(unsigned HOST_WIDE_INT) 0)
1462 & ~(unsigned HOST_WIDE_INT) 0xffffffff)))
1463 return FALSE;
1465 /* Fast return for 0 and powers of 2 */
1466 if ((i & (i - 1)) == 0)
1467 return TRUE;
1471 if ((i & mask & (unsigned HOST_WIDE_INT) 0xffffffff) == 0)
1472 return TRUE;
1473 mask =
1474 (mask << 2) | ((mask & (unsigned HOST_WIDE_INT) 0xffffffff)
1475 >> (32 - 2)) | ~(unsigned HOST_WIDE_INT) 0xffffffff;
1477 while (mask != ~(unsigned HOST_WIDE_INT) 0xFF);
1479 return FALSE;
1482 /* Return true if I is a valid constant for the operation CODE. */
1483 static int
1484 const_ok_for_op (HOST_WIDE_INT i, enum rtx_code code)
1486 if (const_ok_for_arm (i))
1487 return 1;
1489 switch (code)
1491 case PLUS:
1492 return const_ok_for_arm (ARM_SIGN_EXTEND (-i));
1494 case MINUS: /* Should only occur with (MINUS I reg) => rsb */
1495 case XOR:
1496 case IOR:
1497 return 0;
1499 case AND:
1500 return const_ok_for_arm (ARM_SIGN_EXTEND (~i));
1502 default:
1503 abort ();
1507 /* Emit a sequence of insns to handle a large constant.
1508 CODE is the code of the operation required, it can be any of SET, PLUS,
1509 IOR, AND, XOR, MINUS;
1510 MODE is the mode in which the operation is being performed;
1511 VAL is the integer to operate on;
1512 SOURCE is the other operand (a register, or a null-pointer for SET);
1513 SUBTARGETS means it is safe to create scratch registers if that will
1514 either produce a simpler sequence, or we will want to cse the values.
1515 Return value is the number of insns emitted. */
1518 arm_split_constant (enum rtx_code code, enum machine_mode mode, rtx insn,
1519 HOST_WIDE_INT val, rtx target, rtx source, int subtargets)
1521 rtx cond;
1523 if (insn && GET_CODE (PATTERN (insn)) == COND_EXEC)
1524 cond = COND_EXEC_TEST (PATTERN (insn));
1525 else
1526 cond = NULL_RTX;
1528 if (subtargets || code == SET
1529 || (GET_CODE (target) == REG && GET_CODE (source) == REG
1530 && REGNO (target) != REGNO (source)))
1532 /* After arm_reorg has been called, we can't fix up expensive
1533 constants by pushing them into memory so we must synthesize
1534 them in-line, regardless of the cost. This is only likely to
1535 be more costly on chips that have load delay slots and we are
1536 compiling without running the scheduler (so no splitting
1537 occurred before the final instruction emission).
1539 Ref: gcc -O1 -mcpu=strongarm gcc.c-torture/compile/980506-2.c
1541 if (!after_arm_reorg
1542 && !cond
1543 && (arm_gen_constant (code, mode, NULL_RTX, val, target, source,
1544 1, 0)
1545 > arm_constant_limit + (code != SET)))
1547 if (code == SET)
1549 /* Currently SET is the only monadic value for CODE, all
1550 the rest are diadic. */
1551 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (val)));
1552 return 1;
1554 else
1556 rtx temp = subtargets ? gen_reg_rtx (mode) : target;
1558 emit_insn (gen_rtx_SET (VOIDmode, temp, GEN_INT (val)));
1559 /* For MINUS, the value is subtracted from, since we never
1560 have subtraction of a constant. */
1561 if (code == MINUS)
1562 emit_insn (gen_rtx_SET (VOIDmode, target,
1563 gen_rtx_MINUS (mode, temp, source)));
1564 else
1565 emit_insn (gen_rtx_SET (VOIDmode, target,
1566 gen_rtx_fmt_ee (code, mode, source, temp)));
1567 return 2;
1572 return arm_gen_constant (code, mode, cond, val, target, source, subtargets,
1576 static int
1577 count_insns_for_constant (HOST_WIDE_INT remainder, int i)
1579 HOST_WIDE_INT temp1;
1580 int num_insns = 0;
1583 int end;
1585 if (i <= 0)
1586 i += 32;
1587 if (remainder & (3 << (i - 2)))
1589 end = i - 8;
1590 if (end < 0)
1591 end += 32;
1592 temp1 = remainder & ((0x0ff << end)
1593 | ((i < end) ? (0xff >> (32 - end)) : 0));
1594 remainder &= ~temp1;
1595 num_insns++;
1596 i -= 6;
1598 i -= 2;
1599 } while (remainder);
1600 return num_insns;
1603 /* Emit an instruction with the indicated PATTERN. If COND is
1604 non-NULL, conditionalize the execution of the instruction on COND
1605 being true. */
1607 static void
1608 emit_constant_insn (rtx cond, rtx pattern)
1610 if (cond)
1611 pattern = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (cond), pattern);
1612 emit_insn (pattern);
1615 /* As above, but extra parameter GENERATE which, if clear, suppresses
1616 RTL generation. */
1618 static int
1619 arm_gen_constant (enum rtx_code code, enum machine_mode mode, rtx cond,
1620 HOST_WIDE_INT val, rtx target, rtx source, int subtargets,
1621 int generate)
1623 int can_invert = 0;
1624 int can_negate = 0;
1625 int can_negate_initial = 0;
1626 int can_shift = 0;
1627 int i;
1628 int num_bits_set = 0;
1629 int set_sign_bit_copies = 0;
1630 int clear_sign_bit_copies = 0;
1631 int clear_zero_bit_copies = 0;
1632 int set_zero_bit_copies = 0;
1633 int insns = 0;
1634 unsigned HOST_WIDE_INT temp1, temp2;
1635 unsigned HOST_WIDE_INT remainder = val & 0xffffffff;
1637 /* Find out which operations are safe for a given CODE. Also do a quick
1638 check for degenerate cases; these can occur when DImode operations
1639 are split. */
1640 switch (code)
1642 case SET:
1643 can_invert = 1;
1644 can_shift = 1;
1645 can_negate = 1;
1646 break;
1648 case PLUS:
1649 can_negate = 1;
1650 can_negate_initial = 1;
1651 break;
1653 case IOR:
1654 if (remainder == 0xffffffff)
1656 if (generate)
1657 emit_constant_insn (cond,
1658 gen_rtx_SET (VOIDmode, target,
1659 GEN_INT (ARM_SIGN_EXTEND (val))));
1660 return 1;
1662 if (remainder == 0)
1664 if (reload_completed && rtx_equal_p (target, source))
1665 return 0;
1666 if (generate)
1667 emit_constant_insn (cond,
1668 gen_rtx_SET (VOIDmode, target, source));
1669 return 1;
1671 break;
1673 case AND:
1674 if (remainder == 0)
1676 if (generate)
1677 emit_constant_insn (cond,
1678 gen_rtx_SET (VOIDmode, target, const0_rtx));
1679 return 1;
1681 if (remainder == 0xffffffff)
1683 if (reload_completed && rtx_equal_p (target, source))
1684 return 0;
1685 if (generate)
1686 emit_constant_insn (cond,
1687 gen_rtx_SET (VOIDmode, target, source));
1688 return 1;
1690 can_invert = 1;
1691 break;
1693 case XOR:
1694 if (remainder == 0)
1696 if (reload_completed && rtx_equal_p (target, source))
1697 return 0;
1698 if (generate)
1699 emit_constant_insn (cond,
1700 gen_rtx_SET (VOIDmode, target, source));
1701 return 1;
1703 if (remainder == 0xffffffff)
1705 if (generate)
1706 emit_constant_insn (cond,
1707 gen_rtx_SET (VOIDmode, target,
1708 gen_rtx_NOT (mode, source)));
1709 return 1;
1712 /* We don't know how to handle this yet below. */
1713 abort ();
1715 case MINUS:
1716 /* We treat MINUS as (val - source), since (source - val) is always
1717 passed as (source + (-val)). */
1718 if (remainder == 0)
1720 if (generate)
1721 emit_constant_insn (cond,
1722 gen_rtx_SET (VOIDmode, target,
1723 gen_rtx_NEG (mode, source)));
1724 return 1;
1726 if (const_ok_for_arm (val))
1728 if (generate)
1729 emit_constant_insn (cond,
1730 gen_rtx_SET (VOIDmode, target,
1731 gen_rtx_MINUS (mode, GEN_INT (val),
1732 source)));
1733 return 1;
1735 can_negate = 1;
1737 break;
1739 default:
1740 abort ();
1743 /* If we can do it in one insn get out quickly. */
1744 if (const_ok_for_arm (val)
1745 || (can_negate_initial && const_ok_for_arm (-val))
1746 || (can_invert && const_ok_for_arm (~val)))
1748 if (generate)
1749 emit_constant_insn (cond,
1750 gen_rtx_SET (VOIDmode, target,
1751 (source
1752 ? gen_rtx_fmt_ee (code, mode, source,
1753 GEN_INT (val))
1754 : GEN_INT (val))));
1755 return 1;
1758 /* Calculate a few attributes that may be useful for specific
1759 optimizations. */
1760 for (i = 31; i >= 0; i--)
1762 if ((remainder & (1 << i)) == 0)
1763 clear_sign_bit_copies++;
1764 else
1765 break;
1768 for (i = 31; i >= 0; i--)
1770 if ((remainder & (1 << i)) != 0)
1771 set_sign_bit_copies++;
1772 else
1773 break;
1776 for (i = 0; i <= 31; i++)
1778 if ((remainder & (1 << i)) == 0)
1779 clear_zero_bit_copies++;
1780 else
1781 break;
1784 for (i = 0; i <= 31; i++)
1786 if ((remainder & (1 << i)) != 0)
1787 set_zero_bit_copies++;
1788 else
1789 break;
1792 switch (code)
1794 case SET:
1795 /* See if we can do this by sign_extending a constant that is known
1796 to be negative. This is a good, way of doing it, since the shift
1797 may well merge into a subsequent insn. */
1798 if (set_sign_bit_copies > 1)
1800 if (const_ok_for_arm
1801 (temp1 = ARM_SIGN_EXTEND (remainder
1802 << (set_sign_bit_copies - 1))))
1804 if (generate)
1806 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1807 emit_constant_insn (cond,
1808 gen_rtx_SET (VOIDmode, new_src,
1809 GEN_INT (temp1)));
1810 emit_constant_insn (cond,
1811 gen_ashrsi3 (target, new_src,
1812 GEN_INT (set_sign_bit_copies - 1)));
1814 return 2;
1816 /* For an inverted constant, we will need to set the low bits,
1817 these will be shifted out of harm's way. */
1818 temp1 |= (1 << (set_sign_bit_copies - 1)) - 1;
1819 if (const_ok_for_arm (~temp1))
1821 if (generate)
1823 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1824 emit_constant_insn (cond,
1825 gen_rtx_SET (VOIDmode, new_src,
1826 GEN_INT (temp1)));
1827 emit_constant_insn (cond,
1828 gen_ashrsi3 (target, new_src,
1829 GEN_INT (set_sign_bit_copies - 1)));
1831 return 2;
1835 /* See if we can generate this by setting the bottom (or the top)
1836 16 bits, and then shifting these into the other half of the
1837 word. We only look for the simplest cases, to do more would cost
1838 too much. Be careful, however, not to generate this when the
1839 alternative would take fewer insns. */
1840 if (val & 0xffff0000)
1842 temp1 = remainder & 0xffff0000;
1843 temp2 = remainder & 0x0000ffff;
1845 /* Overlaps outside this range are best done using other methods. */
1846 for (i = 9; i < 24; i++)
1848 if ((((temp2 | (temp2 << i)) & 0xffffffff) == remainder)
1849 && !const_ok_for_arm (temp2))
1851 rtx new_src = (subtargets
1852 ? (generate ? gen_reg_rtx (mode) : NULL_RTX)
1853 : target);
1854 insns = arm_gen_constant (code, mode, cond, temp2, new_src,
1855 source, subtargets, generate);
1856 source = new_src;
1857 if (generate)
1858 emit_constant_insn
1859 (cond,
1860 gen_rtx_SET
1861 (VOIDmode, target,
1862 gen_rtx_IOR (mode,
1863 gen_rtx_ASHIFT (mode, source,
1864 GEN_INT (i)),
1865 source)));
1866 return insns + 1;
1870 /* Don't duplicate cases already considered. */
1871 for (i = 17; i < 24; i++)
1873 if (((temp1 | (temp1 >> i)) == remainder)
1874 && !const_ok_for_arm (temp1))
1876 rtx new_src = (subtargets
1877 ? (generate ? gen_reg_rtx (mode) : NULL_RTX)
1878 : target);
1879 insns = arm_gen_constant (code, mode, cond, temp1, new_src,
1880 source, subtargets, generate);
1881 source = new_src;
1882 if (generate)
1883 emit_constant_insn
1884 (cond,
1885 gen_rtx_SET (VOIDmode, target,
1886 gen_rtx_IOR
1887 (mode,
1888 gen_rtx_LSHIFTRT (mode, source,
1889 GEN_INT (i)),
1890 source)));
1891 return insns + 1;
1895 break;
1897 case IOR:
1898 case XOR:
1899 /* If we have IOR or XOR, and the constant can be loaded in a
1900 single instruction, and we can find a temporary to put it in,
1901 then this can be done in two instructions instead of 3-4. */
1902 if (subtargets
1903 /* TARGET can't be NULL if SUBTARGETS is 0 */
1904 || (reload_completed && !reg_mentioned_p (target, source)))
1906 if (const_ok_for_arm (ARM_SIGN_EXTEND (~val)))
1908 if (generate)
1910 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1912 emit_constant_insn (cond,
1913 gen_rtx_SET (VOIDmode, sub,
1914 GEN_INT (val)));
1915 emit_constant_insn (cond,
1916 gen_rtx_SET (VOIDmode, target,
1917 gen_rtx_fmt_ee (code, mode,
1918 source, sub)));
1920 return 2;
1924 if (code == XOR)
1925 break;
1927 if (set_sign_bit_copies > 8
1928 && (val & (-1 << (32 - set_sign_bit_copies))) == val)
1930 if (generate)
1932 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1933 rtx shift = GEN_INT (set_sign_bit_copies);
1935 emit_constant_insn
1936 (cond,
1937 gen_rtx_SET (VOIDmode, sub,
1938 gen_rtx_NOT (mode,
1939 gen_rtx_ASHIFT (mode,
1940 source,
1941 shift))));
1942 emit_constant_insn
1943 (cond,
1944 gen_rtx_SET (VOIDmode, target,
1945 gen_rtx_NOT (mode,
1946 gen_rtx_LSHIFTRT (mode, sub,
1947 shift))));
1949 return 2;
1952 if (set_zero_bit_copies > 8
1953 && (remainder & ((1 << set_zero_bit_copies) - 1)) == remainder)
1955 if (generate)
1957 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1958 rtx shift = GEN_INT (set_zero_bit_copies);
1960 emit_constant_insn
1961 (cond,
1962 gen_rtx_SET (VOIDmode, sub,
1963 gen_rtx_NOT (mode,
1964 gen_rtx_LSHIFTRT (mode,
1965 source,
1966 shift))));
1967 emit_constant_insn
1968 (cond,
1969 gen_rtx_SET (VOIDmode, target,
1970 gen_rtx_NOT (mode,
1971 gen_rtx_ASHIFT (mode, sub,
1972 shift))));
1974 return 2;
1977 if (const_ok_for_arm (temp1 = ARM_SIGN_EXTEND (~val)))
1979 if (generate)
1981 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1982 emit_constant_insn (cond,
1983 gen_rtx_SET (VOIDmode, sub,
1984 gen_rtx_NOT (mode, source)));
1985 source = sub;
1986 if (subtargets)
1987 sub = gen_reg_rtx (mode);
1988 emit_constant_insn (cond,
1989 gen_rtx_SET (VOIDmode, sub,
1990 gen_rtx_AND (mode, source,
1991 GEN_INT (temp1))));
1992 emit_constant_insn (cond,
1993 gen_rtx_SET (VOIDmode, target,
1994 gen_rtx_NOT (mode, sub)));
1996 return 3;
1998 break;
2000 case AND:
2001 /* See if two shifts will do 2 or more insn's worth of work. */
2002 if (clear_sign_bit_copies >= 16 && clear_sign_bit_copies < 24)
2004 HOST_WIDE_INT shift_mask = ((0xffffffff
2005 << (32 - clear_sign_bit_copies))
2006 & 0xffffffff);
2008 if ((remainder | shift_mask) != 0xffffffff)
2010 if (generate)
2012 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2013 insns = arm_gen_constant (AND, mode, cond,
2014 remainder | shift_mask,
2015 new_src, source, subtargets, 1);
2016 source = new_src;
2018 else
2020 rtx targ = subtargets ? NULL_RTX : target;
2021 insns = arm_gen_constant (AND, mode, cond,
2022 remainder | shift_mask,
2023 targ, source, subtargets, 0);
2027 if (generate)
2029 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2030 rtx shift = GEN_INT (clear_sign_bit_copies);
2032 emit_insn (gen_ashlsi3 (new_src, source, shift));
2033 emit_insn (gen_lshrsi3 (target, new_src, shift));
2036 return insns + 2;
2039 if (clear_zero_bit_copies >= 16 && clear_zero_bit_copies < 24)
2041 HOST_WIDE_INT shift_mask = (1 << clear_zero_bit_copies) - 1;
2043 if ((remainder | shift_mask) != 0xffffffff)
2045 if (generate)
2047 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2049 insns = arm_gen_constant (AND, mode, cond,
2050 remainder | shift_mask,
2051 new_src, source, subtargets, 1);
2052 source = new_src;
2054 else
2056 rtx targ = subtargets ? NULL_RTX : target;
2058 insns = arm_gen_constant (AND, mode, cond,
2059 remainder | shift_mask,
2060 targ, source, subtargets, 0);
2064 if (generate)
2066 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2067 rtx shift = GEN_INT (clear_zero_bit_copies);
2069 emit_insn (gen_lshrsi3 (new_src, source, shift));
2070 emit_insn (gen_ashlsi3 (target, new_src, shift));
2073 return insns + 2;
2076 break;
2078 default:
2079 break;
2082 for (i = 0; i < 32; i++)
2083 if (remainder & (1 << i))
2084 num_bits_set++;
2086 if (code == AND || (can_invert && num_bits_set > 16))
2087 remainder = (~remainder) & 0xffffffff;
2088 else if (code == PLUS && num_bits_set > 16)
2089 remainder = (-remainder) & 0xffffffff;
2090 else
2092 can_invert = 0;
2093 can_negate = 0;
2096 /* Now try and find a way of doing the job in either two or three
2097 instructions.
2098 We start by looking for the largest block of zeros that are aligned on
2099 a 2-bit boundary, we then fill up the temps, wrapping around to the
2100 top of the word when we drop off the bottom.
2101 In the worst case this code should produce no more than four insns. */
2103 int best_start = 0;
2104 int best_consecutive_zeros = 0;
2106 for (i = 0; i < 32; i += 2)
2108 int consecutive_zeros = 0;
2110 if (!(remainder & (3 << i)))
2112 while ((i < 32) && !(remainder & (3 << i)))
2114 consecutive_zeros += 2;
2115 i += 2;
2117 if (consecutive_zeros > best_consecutive_zeros)
2119 best_consecutive_zeros = consecutive_zeros;
2120 best_start = i - consecutive_zeros;
2122 i -= 2;
2126 /* So long as it won't require any more insns to do so, it's
2127 desirable to emit a small constant (in bits 0...9) in the last
2128 insn. This way there is more chance that it can be combined with
2129 a later addressing insn to form a pre-indexed load or store
2130 operation. Consider:
2132 *((volatile int *)0xe0000100) = 1;
2133 *((volatile int *)0xe0000110) = 2;
2135 We want this to wind up as:
2137 mov rA, #0xe0000000
2138 mov rB, #1
2139 str rB, [rA, #0x100]
2140 mov rB, #2
2141 str rB, [rA, #0x110]
2143 rather than having to synthesize both large constants from scratch.
2145 Therefore, we calculate how many insns would be required to emit
2146 the constant starting from `best_start', and also starting from
2147 zero (i.e. with bit 31 first to be output). If `best_start' doesn't
2148 yield a shorter sequence, we may as well use zero. */
2149 if (best_start != 0
2150 && ((((unsigned HOST_WIDE_INT) 1) << best_start) < remainder)
2151 && (count_insns_for_constant (remainder, 0) <=
2152 count_insns_for_constant (remainder, best_start)))
2153 best_start = 0;
2155 /* Now start emitting the insns. */
2156 i = best_start;
2159 int end;
2161 if (i <= 0)
2162 i += 32;
2163 if (remainder & (3 << (i - 2)))
2165 end = i - 8;
2166 if (end < 0)
2167 end += 32;
2168 temp1 = remainder & ((0x0ff << end)
2169 | ((i < end) ? (0xff >> (32 - end)) : 0));
2170 remainder &= ~temp1;
2172 if (generate)
2174 rtx new_src, temp1_rtx;
2176 if (code == SET || code == MINUS)
2178 new_src = (subtargets ? gen_reg_rtx (mode) : target);
2179 if (can_invert && code != MINUS)
2180 temp1 = ~temp1;
2182 else
2184 if (remainder && subtargets)
2185 new_src = gen_reg_rtx (mode);
2186 else
2187 new_src = target;
2188 if (can_invert)
2189 temp1 = ~temp1;
2190 else if (can_negate)
2191 temp1 = -temp1;
2194 temp1 = trunc_int_for_mode (temp1, mode);
2195 temp1_rtx = GEN_INT (temp1);
2197 if (code == SET)
2199 else if (code == MINUS)
2200 temp1_rtx = gen_rtx_MINUS (mode, temp1_rtx, source);
2201 else
2202 temp1_rtx = gen_rtx_fmt_ee (code, mode, source, temp1_rtx);
2204 emit_constant_insn (cond,
2205 gen_rtx_SET (VOIDmode, new_src,
2206 temp1_rtx));
2207 source = new_src;
2210 if (code == SET)
2212 can_invert = 0;
2213 code = PLUS;
2215 else if (code == MINUS)
2216 code = PLUS;
2218 insns++;
2219 i -= 6;
2221 i -= 2;
2223 while (remainder);
2226 return insns;
2229 /* Canonicalize a comparison so that we are more likely to recognize it.
2230 This can be done for a few constant compares, where we can make the
2231 immediate value easier to load. */
2233 enum rtx_code
2234 arm_canonicalize_comparison (enum rtx_code code, rtx * op1)
2236 unsigned HOST_WIDE_INT i = INTVAL (*op1);
2238 switch (code)
2240 case EQ:
2241 case NE:
2242 return code;
2244 case GT:
2245 case LE:
2246 if (i != ((((unsigned HOST_WIDE_INT) 1) << (HOST_BITS_PER_WIDE_INT - 1)) - 1)
2247 && (const_ok_for_arm (i + 1) || const_ok_for_arm (-(i + 1))))
2249 *op1 = GEN_INT (i + 1);
2250 return code == GT ? GE : LT;
2252 break;
2254 case GE:
2255 case LT:
2256 if (i != (((unsigned HOST_WIDE_INT) 1) << (HOST_BITS_PER_WIDE_INT - 1))
2257 && (const_ok_for_arm (i - 1) || const_ok_for_arm (-(i - 1))))
2259 *op1 = GEN_INT (i - 1);
2260 return code == GE ? GT : LE;
2262 break;
2264 case GTU:
2265 case LEU:
2266 if (i != ~((unsigned HOST_WIDE_INT) 0)
2267 && (const_ok_for_arm (i + 1) || const_ok_for_arm (-(i + 1))))
2269 *op1 = GEN_INT (i + 1);
2270 return code == GTU ? GEU : LTU;
2272 break;
2274 case GEU:
2275 case LTU:
2276 if (i != 0
2277 && (const_ok_for_arm (i - 1) || const_ok_for_arm (-(i - 1))))
2279 *op1 = GEN_INT (i - 1);
2280 return code == GEU ? GTU : LEU;
2282 break;
2284 default:
2285 abort ();
2288 return code;
2292 /* Define how to find the value returned by a function. */
2295 arm_function_value(tree type, tree func ATTRIBUTE_UNUSED)
2297 enum machine_mode mode;
2298 int unsignedp ATTRIBUTE_UNUSED;
2299 rtx r ATTRIBUTE_UNUSED;
2302 mode = TYPE_MODE (type);
2303 /* Promote integer types. */
2304 if (INTEGRAL_TYPE_P (type))
2305 PROMOTE_FUNCTION_MODE (mode, unsignedp, type);
2306 return LIBCALL_VALUE(mode);
2309 /* Determine the amount of memory needed to store the possible return
2310 registers of an untyped call. */
2312 arm_apply_result_size (void)
2314 int size = 16;
2316 if (TARGET_ARM)
2318 if (TARGET_HARD_FLOAT_ABI)
2320 if (TARGET_FPA)
2321 size += 12;
2322 if (TARGET_MAVERICK)
2323 size += 8;
2325 if (TARGET_IWMMXT_ABI)
2326 size += 8;
2329 return size;
2332 /* Decide whether a type should be returned in memory (true)
2333 or in a register (false). This is called by the macro
2334 RETURN_IN_MEMORY. */
2336 arm_return_in_memory (tree type)
2338 HOST_WIDE_INT size;
2340 if (!AGGREGATE_TYPE_P (type) &&
2341 !(TARGET_AAPCS_BASED && TREE_CODE (type) == COMPLEX_TYPE))
2342 /* All simple types are returned in registers.
2343 For AAPCS, complex types are treated the same as aggregates. */
2344 return 0;
2346 size = int_size_in_bytes (type);
2348 if (arm_abi != ARM_ABI_APCS)
2350 /* ATPCS and later return aggregate types in memory only if they are
2351 larger than a word (or are variable size). */
2352 return (size < 0 || size > UNITS_PER_WORD);
2355 /* For the arm-wince targets we choose to be compatible with Microsoft's
2356 ARM and Thumb compilers, which always return aggregates in memory. */
2357 #ifndef ARM_WINCE
2358 /* All structures/unions bigger than one word are returned in memory.
2359 Also catch the case where int_size_in_bytes returns -1. In this case
2360 the aggregate is either huge or of variable size, and in either case
2361 we will want to return it via memory and not in a register. */
2362 if (size < 0 || size > UNITS_PER_WORD)
2363 return 1;
2365 if (TREE_CODE (type) == RECORD_TYPE)
2367 tree field;
2369 /* For a struct the APCS says that we only return in a register
2370 if the type is 'integer like' and every addressable element
2371 has an offset of zero. For practical purposes this means
2372 that the structure can have at most one non bit-field element
2373 and that this element must be the first one in the structure. */
2375 /* Find the first field, ignoring non FIELD_DECL things which will
2376 have been created by C++. */
2377 for (field = TYPE_FIELDS (type);
2378 field && TREE_CODE (field) != FIELD_DECL;
2379 field = TREE_CHAIN (field))
2380 continue;
2382 if (field == NULL)
2383 return 0; /* An empty structure. Allowed by an extension to ANSI C. */
2385 /* Check that the first field is valid for returning in a register. */
2387 /* ... Floats are not allowed */
2388 if (FLOAT_TYPE_P (TREE_TYPE (field)))
2389 return 1;
2391 /* ... Aggregates that are not themselves valid for returning in
2392 a register are not allowed. */
2393 if (RETURN_IN_MEMORY (TREE_TYPE (field)))
2394 return 1;
2396 /* Now check the remaining fields, if any. Only bitfields are allowed,
2397 since they are not addressable. */
2398 for (field = TREE_CHAIN (field);
2399 field;
2400 field = TREE_CHAIN (field))
2402 if (TREE_CODE (field) != FIELD_DECL)
2403 continue;
2405 if (!DECL_BIT_FIELD_TYPE (field))
2406 return 1;
2409 return 0;
2412 if (TREE_CODE (type) == UNION_TYPE)
2414 tree field;
2416 /* Unions can be returned in registers if every element is
2417 integral, or can be returned in an integer register. */
2418 for (field = TYPE_FIELDS (type);
2419 field;
2420 field = TREE_CHAIN (field))
2422 if (TREE_CODE (field) != FIELD_DECL)
2423 continue;
2425 if (FLOAT_TYPE_P (TREE_TYPE (field)))
2426 return 1;
2428 if (RETURN_IN_MEMORY (TREE_TYPE (field)))
2429 return 1;
2432 return 0;
2434 #endif /* not ARM_WINCE */
2436 /* Return all other types in memory. */
2437 return 1;
2440 /* Indicate whether or not words of a double are in big-endian order. */
2443 arm_float_words_big_endian (void)
2445 if (TARGET_MAVERICK)
2446 return 0;
2448 /* For FPA, float words are always big-endian. For VFP, floats words
2449 follow the memory system mode. */
2451 if (TARGET_FPA)
2453 return 1;
2456 if (TARGET_VFP)
2457 return (TARGET_BIG_END ? 1 : 0);
2459 return 1;
2462 /* Initialize a variable CUM of type CUMULATIVE_ARGS
2463 for a call to a function whose data type is FNTYPE.
2464 For a library call, FNTYPE is NULL. */
2465 void
2466 arm_init_cumulative_args (CUMULATIVE_ARGS *pcum, tree fntype,
2467 rtx libname ATTRIBUTE_UNUSED,
2468 tree fndecl ATTRIBUTE_UNUSED)
2470 /* On the ARM, the offset starts at 0. */
2471 pcum->nregs = ((fntype && aggregate_value_p (TREE_TYPE (fntype), fntype)) ? 1 : 0);
2472 pcum->iwmmxt_nregs = 0;
2473 pcum->can_split = true;
2475 pcum->call_cookie = CALL_NORMAL;
2477 if (TARGET_LONG_CALLS)
2478 pcum->call_cookie = CALL_LONG;
2480 /* Check for long call/short call attributes. The attributes
2481 override any command line option. */
2482 if (fntype)
2484 if (lookup_attribute ("short_call", TYPE_ATTRIBUTES (fntype)))
2485 pcum->call_cookie = CALL_SHORT;
2486 else if (lookup_attribute ("long_call", TYPE_ATTRIBUTES (fntype)))
2487 pcum->call_cookie = CALL_LONG;
2490 /* Varargs vectors are treated the same as long long.
2491 named_count avoids having to change the way arm handles 'named' */
2492 pcum->named_count = 0;
2493 pcum->nargs = 0;
2495 if (TARGET_REALLY_IWMMXT && fntype)
2497 tree fn_arg;
2499 for (fn_arg = TYPE_ARG_TYPES (fntype);
2500 fn_arg;
2501 fn_arg = TREE_CHAIN (fn_arg))
2502 pcum->named_count += 1;
2504 if (! pcum->named_count)
2505 pcum->named_count = INT_MAX;
2510 /* Return true if mode/type need doubleword alignment. */
2511 bool
2512 arm_needs_doubleword_align (enum machine_mode mode, tree type)
2514 return (GET_MODE_ALIGNMENT (mode) > PARM_BOUNDARY
2515 || (type && TYPE_ALIGN (type) > PARM_BOUNDARY));
2519 /* Determine where to put an argument to a function.
2520 Value is zero to push the argument on the stack,
2521 or a hard register in which to store the argument.
2523 MODE is the argument's machine mode.
2524 TYPE is the data type of the argument (as a tree).
2525 This is null for libcalls where that information may
2526 not be available.
2527 CUM is a variable of type CUMULATIVE_ARGS which gives info about
2528 the preceding args and about the function being called.
2529 NAMED is nonzero if this argument is a named parameter
2530 (otherwise it is an extra parameter matching an ellipsis). */
2533 arm_function_arg (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
2534 tree type, int named)
2536 int nregs;
2538 /* Varargs vectors are treated the same as long long.
2539 named_count avoids having to change the way arm handles 'named' */
2540 if (TARGET_IWMMXT_ABI
2541 && arm_vector_mode_supported_p (mode)
2542 && pcum->named_count > pcum->nargs + 1)
2544 if (pcum->iwmmxt_nregs <= 9)
2545 return gen_rtx_REG (mode, pcum->iwmmxt_nregs + FIRST_IWMMXT_REGNUM);
2546 else
2548 pcum->can_split = false;
2549 return NULL_RTX;
2553 /* Put doubleword aligned quantities in even register pairs. */
2554 if (pcum->nregs & 1
2555 && ARM_DOUBLEWORD_ALIGN
2556 && arm_needs_doubleword_align (mode, type))
2557 pcum->nregs++;
2559 if (mode == VOIDmode)
2560 /* Compute operand 2 of the call insn. */
2561 return GEN_INT (pcum->call_cookie);
2563 /* Only allow splitting an arg between regs and memory if all preceding
2564 args were allocated to regs. For args passed by reference we only count
2565 the reference pointer. */
2566 if (pcum->can_split)
2567 nregs = 1;
2568 else
2569 nregs = ARM_NUM_REGS2 (mode, type);
2571 if (!named || pcum->nregs + nregs > NUM_ARG_REGS)
2572 return NULL_RTX;
2574 return gen_rtx_REG (mode, pcum->nregs);
2577 static int
2578 arm_arg_partial_bytes (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
2579 tree type, bool named ATTRIBUTE_UNUSED)
2581 int nregs = pcum->nregs;
2583 if (arm_vector_mode_supported_p (mode))
2584 return 0;
2586 if (NUM_ARG_REGS > nregs
2587 && (NUM_ARG_REGS < nregs + ARM_NUM_REGS2 (mode, type))
2588 && pcum->can_split)
2589 return (NUM_ARG_REGS - nregs) * UNITS_PER_WORD;
2591 return 0;
2594 /* Variable sized types are passed by reference. This is a GCC
2595 extension to the ARM ABI. */
2597 static bool
2598 arm_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
2599 enum machine_mode mode ATTRIBUTE_UNUSED,
2600 tree type, bool named ATTRIBUTE_UNUSED)
2602 return type && TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST;
2605 /* Encode the current state of the #pragma [no_]long_calls. */
2606 typedef enum
2608 OFF, /* No #pramgma [no_]long_calls is in effect. */
2609 LONG, /* #pragma long_calls is in effect. */
2610 SHORT /* #pragma no_long_calls is in effect. */
2611 } arm_pragma_enum;
2613 static arm_pragma_enum arm_pragma_long_calls = OFF;
2615 void
2616 arm_pr_long_calls (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2618 arm_pragma_long_calls = LONG;
2621 void
2622 arm_pr_no_long_calls (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2624 arm_pragma_long_calls = SHORT;
2627 void
2628 arm_pr_long_calls_off (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2630 arm_pragma_long_calls = OFF;
2633 /* Table of machine attributes. */
2634 const struct attribute_spec arm_attribute_table[] =
2636 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
2637 /* Function calls made to this symbol must be done indirectly, because
2638 it may lie outside of the 26 bit addressing range of a normal function
2639 call. */
2640 { "long_call", 0, 0, false, true, true, NULL },
2641 /* Whereas these functions are always known to reside within the 26 bit
2642 addressing range. */
2643 { "short_call", 0, 0, false, true, true, NULL },
2644 /* Interrupt Service Routines have special prologue and epilogue requirements. */
2645 { "isr", 0, 1, false, false, false, arm_handle_isr_attribute },
2646 { "interrupt", 0, 1, false, false, false, arm_handle_isr_attribute },
2647 { "naked", 0, 0, true, false, false, arm_handle_fndecl_attribute },
2648 #ifdef ARM_PE
2649 /* ARM/PE has three new attributes:
2650 interfacearm - ?
2651 dllexport - for exporting a function/variable that will live in a dll
2652 dllimport - for importing a function/variable from a dll
2654 Microsoft allows multiple declspecs in one __declspec, separating
2655 them with spaces. We do NOT support this. Instead, use __declspec
2656 multiple times.
2658 { "dllimport", 0, 0, true, false, false, NULL },
2659 { "dllexport", 0, 0, true, false, false, NULL },
2660 { "interfacearm", 0, 0, true, false, false, arm_handle_fndecl_attribute },
2661 #elif TARGET_DLLIMPORT_DECL_ATTRIBUTES
2662 { "dllimport", 0, 0, false, false, false, handle_dll_attribute },
2663 { "dllexport", 0, 0, false, false, false, handle_dll_attribute },
2664 { "notshared", 0, 0, false, true, false, arm_handle_notshared_attribute },
2665 #endif
2666 { NULL, 0, 0, false, false, false, NULL }
2669 /* Handle an attribute requiring a FUNCTION_DECL;
2670 arguments as in struct attribute_spec.handler. */
2671 static tree
2672 arm_handle_fndecl_attribute (tree *node, tree name, tree args ATTRIBUTE_UNUSED,
2673 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
2675 if (TREE_CODE (*node) != FUNCTION_DECL)
2677 warning (0, "%qs attribute only applies to functions",
2678 IDENTIFIER_POINTER (name));
2679 *no_add_attrs = true;
2682 return NULL_TREE;
2685 /* Handle an "interrupt" or "isr" attribute;
2686 arguments as in struct attribute_spec.handler. */
2687 static tree
2688 arm_handle_isr_attribute (tree *node, tree name, tree args, int flags,
2689 bool *no_add_attrs)
2691 if (DECL_P (*node))
2693 if (TREE_CODE (*node) != FUNCTION_DECL)
2695 warning (0, "%qs attribute only applies to functions",
2696 IDENTIFIER_POINTER (name));
2697 *no_add_attrs = true;
2699 /* FIXME: the argument if any is checked for type attributes;
2700 should it be checked for decl ones? */
2702 else
2704 if (TREE_CODE (*node) == FUNCTION_TYPE
2705 || TREE_CODE (*node) == METHOD_TYPE)
2707 if (arm_isr_value (args) == ARM_FT_UNKNOWN)
2709 warning (0, "%qs attribute ignored", IDENTIFIER_POINTER (name));
2710 *no_add_attrs = true;
2713 else if (TREE_CODE (*node) == POINTER_TYPE
2714 && (TREE_CODE (TREE_TYPE (*node)) == FUNCTION_TYPE
2715 || TREE_CODE (TREE_TYPE (*node)) == METHOD_TYPE)
2716 && arm_isr_value (args) != ARM_FT_UNKNOWN)
2718 *node = build_variant_type_copy (*node);
2719 TREE_TYPE (*node) = build_type_attribute_variant
2720 (TREE_TYPE (*node),
2721 tree_cons (name, args, TYPE_ATTRIBUTES (TREE_TYPE (*node))));
2722 *no_add_attrs = true;
2724 else
2726 /* Possibly pass this attribute on from the type to a decl. */
2727 if (flags & ((int) ATTR_FLAG_DECL_NEXT
2728 | (int) ATTR_FLAG_FUNCTION_NEXT
2729 | (int) ATTR_FLAG_ARRAY_NEXT))
2731 *no_add_attrs = true;
2732 return tree_cons (name, args, NULL_TREE);
2734 else
2736 warning (0, "%qs attribute ignored", IDENTIFIER_POINTER (name));
2741 return NULL_TREE;
2744 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
2745 /* Handle the "notshared" attribute. This attribute is another way of
2746 requesting hidden visibility. ARM's compiler supports
2747 "__declspec(notshared)"; we support the same thing via an
2748 attribute. */
2750 static tree
2751 arm_handle_notshared_attribute (tree *node,
2752 tree name ATTRIBUTE_UNUSED,
2753 tree args ATTRIBUTE_UNUSED,
2754 int flags ATTRIBUTE_UNUSED,
2755 bool *no_add_attrs)
2757 tree decl = TYPE_NAME (*node);
2759 if (decl)
2761 DECL_VISIBILITY (decl) = VISIBILITY_HIDDEN;
2762 DECL_VISIBILITY_SPECIFIED (decl) = 1;
2763 *no_add_attrs = false;
2765 return NULL_TREE;
2767 #endif
2769 /* Return 0 if the attributes for two types are incompatible, 1 if they
2770 are compatible, and 2 if they are nearly compatible (which causes a
2771 warning to be generated). */
2772 static int
2773 arm_comp_type_attributes (tree type1, tree type2)
2775 int l1, l2, s1, s2;
2777 /* Check for mismatch of non-default calling convention. */
2778 if (TREE_CODE (type1) != FUNCTION_TYPE)
2779 return 1;
2781 /* Check for mismatched call attributes. */
2782 l1 = lookup_attribute ("long_call", TYPE_ATTRIBUTES (type1)) != NULL;
2783 l2 = lookup_attribute ("long_call", TYPE_ATTRIBUTES (type2)) != NULL;
2784 s1 = lookup_attribute ("short_call", TYPE_ATTRIBUTES (type1)) != NULL;
2785 s2 = lookup_attribute ("short_call", TYPE_ATTRIBUTES (type2)) != NULL;
2787 /* Only bother to check if an attribute is defined. */
2788 if (l1 | l2 | s1 | s2)
2790 /* If one type has an attribute, the other must have the same attribute. */
2791 if ((l1 != l2) || (s1 != s2))
2792 return 0;
2794 /* Disallow mixed attributes. */
2795 if ((l1 & s2) || (l2 & s1))
2796 return 0;
2799 /* Check for mismatched ISR attribute. */
2800 l1 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type1)) != NULL;
2801 if (! l1)
2802 l1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type1)) != NULL;
2803 l2 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type2)) != NULL;
2804 if (! l2)
2805 l1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type2)) != NULL;
2806 if (l1 != l2)
2807 return 0;
2809 return 1;
2812 /* Encode long_call or short_call attribute by prefixing
2813 symbol name in DECL with a special character FLAG. */
2814 void
2815 arm_encode_call_attribute (tree decl, int flag)
2817 const char * str = XSTR (XEXP (DECL_RTL (decl), 0), 0);
2818 int len = strlen (str);
2819 char * newstr;
2821 /* Do not allow weak functions to be treated as short call. */
2822 if (DECL_WEAK (decl) && flag == SHORT_CALL_FLAG_CHAR)
2823 return;
2825 newstr = alloca (len + 2);
2826 newstr[0] = flag;
2827 strcpy (newstr + 1, str);
2829 newstr = (char *) ggc_alloc_string (newstr, len + 1);
2830 XSTR (XEXP (DECL_RTL (decl), 0), 0) = newstr;
2833 /* Assigns default attributes to newly defined type. This is used to
2834 set short_call/long_call attributes for function types of
2835 functions defined inside corresponding #pragma scopes. */
2836 static void
2837 arm_set_default_type_attributes (tree type)
2839 /* Add __attribute__ ((long_call)) to all functions, when
2840 inside #pragma long_calls or __attribute__ ((short_call)),
2841 when inside #pragma no_long_calls. */
2842 if (TREE_CODE (type) == FUNCTION_TYPE || TREE_CODE (type) == METHOD_TYPE)
2844 tree type_attr_list, attr_name;
2845 type_attr_list = TYPE_ATTRIBUTES (type);
2847 if (arm_pragma_long_calls == LONG)
2848 attr_name = get_identifier ("long_call");
2849 else if (arm_pragma_long_calls == SHORT)
2850 attr_name = get_identifier ("short_call");
2851 else
2852 return;
2854 type_attr_list = tree_cons (attr_name, NULL_TREE, type_attr_list);
2855 TYPE_ATTRIBUTES (type) = type_attr_list;
2859 /* Return 1 if the operand is a SYMBOL_REF for a function known to be
2860 defined within the current compilation unit. If this cannot be
2861 determined, then 0 is returned. */
2862 static int
2863 current_file_function_operand (rtx sym_ref)
2865 /* This is a bit of a fib. A function will have a short call flag
2866 applied to its name if it has the short call attribute, or it has
2867 already been defined within the current compilation unit. */
2868 if (ENCODED_SHORT_CALL_ATTR_P (XSTR (sym_ref, 0)))
2869 return 1;
2871 /* The current function is always defined within the current compilation
2872 unit. If it s a weak definition however, then this may not be the real
2873 definition of the function, and so we have to say no. */
2874 if (sym_ref == XEXP (DECL_RTL (current_function_decl), 0)
2875 && !DECL_WEAK (current_function_decl))
2876 return 1;
2878 /* We cannot make the determination - default to returning 0. */
2879 return 0;
2882 /* Return nonzero if a 32 bit "long_call" should be generated for
2883 this call. We generate a long_call if the function:
2885 a. has an __attribute__((long call))
2886 or b. is within the scope of a #pragma long_calls
2887 or c. the -mlong-calls command line switch has been specified
2888 . and either:
2889 1. -ffunction-sections is in effect
2890 or 2. the current function has __attribute__ ((section))
2891 or 3. the target function has __attribute__ ((section))
2893 However we do not generate a long call if the function:
2895 d. has an __attribute__ ((short_call))
2896 or e. is inside the scope of a #pragma no_long_calls
2897 or f. is defined within the current compilation unit.
2899 This function will be called by C fragments contained in the machine
2900 description file. SYM_REF and CALL_COOKIE correspond to the matched
2901 rtl operands. CALL_SYMBOL is used to distinguish between
2902 two different callers of the function. It is set to 1 in the
2903 "call_symbol" and "call_symbol_value" patterns and to 0 in the "call"
2904 and "call_value" patterns. This is because of the difference in the
2905 SYM_REFs passed by these patterns. */
2907 arm_is_longcall_p (rtx sym_ref, int call_cookie, int call_symbol)
2909 if (!call_symbol)
2911 if (GET_CODE (sym_ref) != MEM)
2912 return 0;
2914 sym_ref = XEXP (sym_ref, 0);
2917 if (GET_CODE (sym_ref) != SYMBOL_REF)
2918 return 0;
2920 if (call_cookie & CALL_SHORT)
2921 return 0;
2923 if (TARGET_LONG_CALLS)
2925 if (flag_function_sections
2926 || DECL_SECTION_NAME (current_function_decl))
2927 /* c.3 is handled by the definition of the
2928 ARM_DECLARE_FUNCTION_SIZE macro. */
2929 return 1;
2932 if (current_file_function_operand (sym_ref))
2933 return 0;
2935 return (call_cookie & CALL_LONG)
2936 || ENCODED_LONG_CALL_ATTR_P (XSTR (sym_ref, 0))
2937 || TARGET_LONG_CALLS;
2940 /* Return nonzero if it is ok to make a tail-call to DECL. */
2941 static bool
2942 arm_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
2944 int call_type = TARGET_LONG_CALLS ? CALL_LONG : CALL_NORMAL;
2946 if (cfun->machine->sibcall_blocked)
2947 return false;
2949 /* Never tailcall something for which we have no decl, or if we
2950 are in Thumb mode. */
2951 if (decl == NULL || TARGET_THUMB)
2952 return false;
2954 /* Get the calling method. */
2955 if (lookup_attribute ("short_call", TYPE_ATTRIBUTES (TREE_TYPE (decl))))
2956 call_type = CALL_SHORT;
2957 else if (lookup_attribute ("long_call", TYPE_ATTRIBUTES (TREE_TYPE (decl))))
2958 call_type = CALL_LONG;
2960 /* Cannot tail-call to long calls, since these are out of range of
2961 a branch instruction. However, if not compiling PIC, we know
2962 we can reach the symbol if it is in this compilation unit. */
2963 if (call_type == CALL_LONG && (flag_pic || !TREE_ASM_WRITTEN (decl)))
2964 return false;
2966 /* If we are interworking and the function is not declared static
2967 then we can't tail-call it unless we know that it exists in this
2968 compilation unit (since it might be a Thumb routine). */
2969 if (TARGET_INTERWORK && TREE_PUBLIC (decl) && !TREE_ASM_WRITTEN (decl))
2970 return false;
2972 /* Never tailcall from an ISR routine - it needs a special exit sequence. */
2973 if (IS_INTERRUPT (arm_current_func_type ()))
2974 return false;
2976 /* Everything else is ok. */
2977 return true;
2981 /* Addressing mode support functions. */
2983 /* Return nonzero if X is a legitimate immediate operand when compiling
2984 for PIC. */
2986 legitimate_pic_operand_p (rtx x)
2988 if (CONSTANT_P (x)
2989 && flag_pic
2990 && (GET_CODE (x) == SYMBOL_REF
2991 || (GET_CODE (x) == CONST
2992 && GET_CODE (XEXP (x, 0)) == PLUS
2993 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF)))
2994 return 0;
2996 return 1;
3000 legitimize_pic_address (rtx orig, enum machine_mode mode, rtx reg)
3002 if (GET_CODE (orig) == SYMBOL_REF
3003 || GET_CODE (orig) == LABEL_REF)
3005 #ifndef AOF_ASSEMBLER
3006 rtx pic_ref, address;
3007 #endif
3008 rtx insn;
3009 int subregs = 0;
3011 if (reg == 0)
3013 if (no_new_pseudos)
3014 abort ();
3015 else
3016 reg = gen_reg_rtx (Pmode);
3018 subregs = 1;
3021 #ifdef AOF_ASSEMBLER
3022 /* The AOF assembler can generate relocations for these directly, and
3023 understands that the PIC register has to be added into the offset. */
3024 insn = emit_insn (gen_pic_load_addr_based (reg, orig));
3025 #else
3026 if (subregs)
3027 address = gen_reg_rtx (Pmode);
3028 else
3029 address = reg;
3031 if (TARGET_ARM)
3032 emit_insn (gen_pic_load_addr_arm (address, orig));
3033 else
3034 emit_insn (gen_pic_load_addr_thumb (address, orig));
3036 if ((GET_CODE (orig) == LABEL_REF
3037 || (GET_CODE (orig) == SYMBOL_REF &&
3038 SYMBOL_REF_LOCAL_P (orig)))
3039 && NEED_GOT_RELOC)
3040 pic_ref = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, address);
3041 else
3043 pic_ref = gen_const_mem (Pmode,
3044 gen_rtx_PLUS (Pmode, pic_offset_table_rtx,
3045 address));
3048 insn = emit_move_insn (reg, pic_ref);
3049 #endif
3050 current_function_uses_pic_offset_table = 1;
3051 /* Put a REG_EQUAL note on this insn, so that it can be optimized
3052 by loop. */
3053 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EQUAL, orig,
3054 REG_NOTES (insn));
3055 return reg;
3057 else if (GET_CODE (orig) == CONST)
3059 rtx base, offset;
3061 if (GET_CODE (XEXP (orig, 0)) == PLUS
3062 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
3063 return orig;
3065 if (reg == 0)
3067 if (no_new_pseudos)
3068 abort ();
3069 else
3070 reg = gen_reg_rtx (Pmode);
3073 if (GET_CODE (XEXP (orig, 0)) == PLUS)
3075 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
3076 offset = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
3077 base == reg ? 0 : reg);
3079 else
3080 abort ();
3082 if (GET_CODE (offset) == CONST_INT)
3084 /* The base register doesn't really matter, we only want to
3085 test the index for the appropriate mode. */
3086 if (!arm_legitimate_index_p (mode, offset, SET, 0))
3088 if (!no_new_pseudos)
3089 offset = force_reg (Pmode, offset);
3090 else
3091 abort ();
3094 if (GET_CODE (offset) == CONST_INT)
3095 return plus_constant (base, INTVAL (offset));
3098 if (GET_MODE_SIZE (mode) > 4
3099 && (GET_MODE_CLASS (mode) == MODE_INT
3100 || TARGET_SOFT_FLOAT))
3102 emit_insn (gen_addsi3 (reg, base, offset));
3103 return reg;
3106 return gen_rtx_PLUS (Pmode, base, offset);
3109 return orig;
3113 /* Find a spare low register to use during the prolog of a function. */
3115 static int
3116 thumb_find_work_register (unsigned long pushed_regs_mask)
3118 int reg;
3120 /* Check the argument registers first as these are call-used. The
3121 register allocation order means that sometimes r3 might be used
3122 but earlier argument registers might not, so check them all. */
3123 for (reg = LAST_ARG_REGNUM; reg >= 0; reg --)
3124 if (!regs_ever_live[reg])
3125 return reg;
3127 /* Before going on to check the call-saved registers we can try a couple
3128 more ways of deducing that r3 is available. The first is when we are
3129 pushing anonymous arguments onto the stack and we have less than 4
3130 registers worth of fixed arguments(*). In this case r3 will be part of
3131 the variable argument list and so we can be sure that it will be
3132 pushed right at the start of the function. Hence it will be available
3133 for the rest of the prologue.
3134 (*): ie current_function_pretend_args_size is greater than 0. */
3135 if (cfun->machine->uses_anonymous_args
3136 && current_function_pretend_args_size > 0)
3137 return LAST_ARG_REGNUM;
3139 /* The other case is when we have fixed arguments but less than 4 registers
3140 worth. In this case r3 might be used in the body of the function, but
3141 it is not being used to convey an argument into the function. In theory
3142 we could just check current_function_args_size to see how many bytes are
3143 being passed in argument registers, but it seems that it is unreliable.
3144 Sometimes it will have the value 0 when in fact arguments are being
3145 passed. (See testcase execute/20021111-1.c for an example). So we also
3146 check the args_info.nregs field as well. The problem with this field is
3147 that it makes no allowances for arguments that are passed to the
3148 function but which are not used. Hence we could miss an opportunity
3149 when a function has an unused argument in r3. But it is better to be
3150 safe than to be sorry. */
3151 if (! cfun->machine->uses_anonymous_args
3152 && current_function_args_size >= 0
3153 && current_function_args_size <= (LAST_ARG_REGNUM * UNITS_PER_WORD)
3154 && cfun->args_info.nregs < 4)
3155 return LAST_ARG_REGNUM;
3157 /* Otherwise look for a call-saved register that is going to be pushed. */
3158 for (reg = LAST_LO_REGNUM; reg > LAST_ARG_REGNUM; reg --)
3159 if (pushed_regs_mask & (1 << reg))
3160 return reg;
3162 /* Something went wrong - thumb_compute_save_reg_mask()
3163 should have arranged for a suitable register to be pushed. */
3164 abort ();
3168 /* Generate code to load the PIC register. In thumb mode SCRATCH is a
3169 low register. */
3171 void
3172 arm_load_pic_register (unsigned int scratch)
3174 #ifndef AOF_ASSEMBLER
3175 rtx l1, pic_tmp, pic_tmp2, pic_rtx;
3176 rtx global_offset_table;
3178 if (current_function_uses_pic_offset_table == 0 || TARGET_SINGLE_PIC_BASE)
3179 return;
3181 if (!flag_pic)
3182 abort ();
3184 l1 = gen_label_rtx ();
3186 global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
3187 /* On the ARM the PC register contains 'dot + 8' at the time of the
3188 addition, on the Thumb it is 'dot + 4'. */
3189 pic_tmp = plus_constant (gen_rtx_LABEL_REF (Pmode, l1), TARGET_ARM ? 8 : 4);
3190 if (GOT_PCREL)
3191 pic_tmp2 = gen_rtx_CONST (VOIDmode,
3192 gen_rtx_PLUS (Pmode, global_offset_table, pc_rtx));
3193 else
3194 pic_tmp2 = gen_rtx_CONST (VOIDmode, global_offset_table);
3196 pic_rtx = gen_rtx_CONST (Pmode, gen_rtx_MINUS (Pmode, pic_tmp2, pic_tmp));
3198 if (TARGET_ARM)
3200 emit_insn (gen_pic_load_addr_arm (pic_offset_table_rtx, pic_rtx));
3201 emit_insn (gen_pic_add_dot_plus_eight (pic_offset_table_rtx, l1));
3203 else
3205 if (REGNO (pic_offset_table_rtx) > LAST_LO_REGNUM)
3207 /* We will have pushed the pic register, so should always be
3208 able to find a work register. */
3209 pic_tmp = gen_rtx_REG (SImode, scratch);
3210 emit_insn (gen_pic_load_addr_thumb (pic_tmp, pic_rtx));
3211 emit_insn (gen_movsi (pic_offset_table_rtx, pic_tmp));
3213 else
3214 emit_insn (gen_pic_load_addr_thumb (pic_offset_table_rtx, pic_rtx));
3215 emit_insn (gen_pic_add_dot_plus_four (pic_offset_table_rtx, l1));
3218 /* Need to emit this whether or not we obey regdecls,
3219 since setjmp/longjmp can cause life info to screw up. */
3220 emit_insn (gen_rtx_USE (VOIDmode, pic_offset_table_rtx));
3221 #endif /* AOF_ASSEMBLER */
3225 /* Return nonzero if X is valid as an ARM state addressing register. */
3226 static int
3227 arm_address_register_rtx_p (rtx x, int strict_p)
3229 int regno;
3231 if (GET_CODE (x) != REG)
3232 return 0;
3234 regno = REGNO (x);
3236 if (strict_p)
3237 return ARM_REGNO_OK_FOR_BASE_P (regno);
3239 return (regno <= LAST_ARM_REGNUM
3240 || regno >= FIRST_PSEUDO_REGISTER
3241 || regno == FRAME_POINTER_REGNUM
3242 || regno == ARG_POINTER_REGNUM);
3245 /* Return nonzero if X is a valid ARM state address operand. */
3247 arm_legitimate_address_p (enum machine_mode mode, rtx x, RTX_CODE outer,
3248 int strict_p)
3250 bool use_ldrd;
3251 enum rtx_code code = GET_CODE (x);
3253 if (arm_address_register_rtx_p (x, strict_p))
3254 return 1;
3256 use_ldrd = (TARGET_LDRD
3257 && (mode == DImode
3258 || (mode == DFmode && (TARGET_SOFT_FLOAT || TARGET_VFP))));
3260 if (code == POST_INC || code == PRE_DEC
3261 || ((code == PRE_INC || code == POST_DEC)
3262 && (use_ldrd || GET_MODE_SIZE (mode) <= 4)))
3263 return arm_address_register_rtx_p (XEXP (x, 0), strict_p);
3265 else if ((code == POST_MODIFY || code == PRE_MODIFY)
3266 && arm_address_register_rtx_p (XEXP (x, 0), strict_p)
3267 && GET_CODE (XEXP (x, 1)) == PLUS
3268 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
3270 rtx addend = XEXP (XEXP (x, 1), 1);
3272 /* Don't allow ldrd post increment by register because it's hard
3273 to fixup invalid register choices. */
3274 if (use_ldrd
3275 && GET_CODE (x) == POST_MODIFY
3276 && GET_CODE (addend) == REG)
3277 return 0;
3279 return ((use_ldrd || GET_MODE_SIZE (mode) <= 4)
3280 && arm_legitimate_index_p (mode, addend, outer, strict_p));
3283 /* After reload constants split into minipools will have addresses
3284 from a LABEL_REF. */
3285 else if (reload_completed
3286 && (code == LABEL_REF
3287 || (code == CONST
3288 && GET_CODE (XEXP (x, 0)) == PLUS
3289 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
3290 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
3291 return 1;
3293 else if (mode == TImode)
3294 return 0;
3296 else if (code == PLUS)
3298 rtx xop0 = XEXP (x, 0);
3299 rtx xop1 = XEXP (x, 1);
3301 return ((arm_address_register_rtx_p (xop0, strict_p)
3302 && arm_legitimate_index_p (mode, xop1, outer, strict_p))
3303 || (arm_address_register_rtx_p (xop1, strict_p)
3304 && arm_legitimate_index_p (mode, xop0, outer, strict_p)));
3307 #if 0
3308 /* Reload currently can't handle MINUS, so disable this for now */
3309 else if (GET_CODE (x) == MINUS)
3311 rtx xop0 = XEXP (x, 0);
3312 rtx xop1 = XEXP (x, 1);
3314 return (arm_address_register_rtx_p (xop0, strict_p)
3315 && arm_legitimate_index_p (mode, xop1, outer, strict_p));
3317 #endif
3319 else if (GET_MODE_CLASS (mode) != MODE_FLOAT
3320 && code == SYMBOL_REF
3321 && CONSTANT_POOL_ADDRESS_P (x)
3322 && ! (flag_pic
3323 && symbol_mentioned_p (get_pool_constant (x))))
3324 return 1;
3326 return 0;
3329 /* Return nonzero if INDEX is valid for an address index operand in
3330 ARM state. */
3331 static int
3332 arm_legitimate_index_p (enum machine_mode mode, rtx index, RTX_CODE outer,
3333 int strict_p)
3335 HOST_WIDE_INT range;
3336 enum rtx_code code = GET_CODE (index);
3338 /* Standard coprocessor addressing modes. */
3339 if (TARGET_HARD_FLOAT
3340 && (TARGET_FPA || TARGET_MAVERICK)
3341 && (GET_MODE_CLASS (mode) == MODE_FLOAT
3342 || (TARGET_MAVERICK && mode == DImode)))
3343 return (code == CONST_INT && INTVAL (index) < 1024
3344 && INTVAL (index) > -1024
3345 && (INTVAL (index) & 3) == 0);
3347 if (TARGET_REALLY_IWMMXT && VALID_IWMMXT_REG_MODE (mode))
3348 return (code == CONST_INT
3349 && INTVAL (index) < 1024
3350 && INTVAL (index) > -1024
3351 && (INTVAL (index) & 3) == 0);
3353 if (arm_address_register_rtx_p (index, strict_p)
3354 && (GET_MODE_SIZE (mode) <= 4))
3355 return 1;
3357 if (mode == DImode || mode == DFmode)
3359 if (code == CONST_INT)
3361 HOST_WIDE_INT val = INTVAL (index);
3363 if (TARGET_LDRD)
3364 return val > -256 && val < 256;
3365 else
3366 return val > -4096 && val < 4092;
3369 return TARGET_LDRD && arm_address_register_rtx_p (index, strict_p);
3372 if (GET_MODE_SIZE (mode) <= 4
3373 && ! (arm_arch4
3374 && (mode == HImode
3375 || (mode == QImode && outer == SIGN_EXTEND))))
3377 if (code == MULT)
3379 rtx xiop0 = XEXP (index, 0);
3380 rtx xiop1 = XEXP (index, 1);
3382 return ((arm_address_register_rtx_p (xiop0, strict_p)
3383 && power_of_two_operand (xiop1, SImode))
3384 || (arm_address_register_rtx_p (xiop1, strict_p)
3385 && power_of_two_operand (xiop0, SImode)));
3387 else if (code == LSHIFTRT || code == ASHIFTRT
3388 || code == ASHIFT || code == ROTATERT)
3390 rtx op = XEXP (index, 1);
3392 return (arm_address_register_rtx_p (XEXP (index, 0), strict_p)
3393 && GET_CODE (op) == CONST_INT
3394 && INTVAL (op) > 0
3395 && INTVAL (op) <= 31);
3399 /* For ARM v4 we may be doing a sign-extend operation during the
3400 load. */
3401 if (arm_arch4)
3403 if (mode == HImode || (outer == SIGN_EXTEND && mode == QImode))
3404 range = 256;
3405 else
3406 range = 4096;
3408 else
3409 range = (mode == HImode) ? 4095 : 4096;
3411 return (code == CONST_INT
3412 && INTVAL (index) < range
3413 && INTVAL (index) > -range);
3416 /* Return nonzero if X is valid as a Thumb state base register. */
3417 static int
3418 thumb_base_register_rtx_p (rtx x, enum machine_mode mode, int strict_p)
3420 int regno;
3422 if (GET_CODE (x) != REG)
3423 return 0;
3425 regno = REGNO (x);
3427 if (strict_p)
3428 return THUMB_REGNO_MODE_OK_FOR_BASE_P (regno, mode);
3430 return (regno <= LAST_LO_REGNUM
3431 || regno > LAST_VIRTUAL_REGISTER
3432 || regno == FRAME_POINTER_REGNUM
3433 || (GET_MODE_SIZE (mode) >= 4
3434 && (regno == STACK_POINTER_REGNUM
3435 || regno >= FIRST_PSEUDO_REGISTER
3436 || x == hard_frame_pointer_rtx
3437 || x == arg_pointer_rtx)));
3440 /* Return nonzero if x is a legitimate index register. This is the case
3441 for any base register that can access a QImode object. */
3442 inline static int
3443 thumb_index_register_rtx_p (rtx x, int strict_p)
3445 return thumb_base_register_rtx_p (x, QImode, strict_p);
3448 /* Return nonzero if x is a legitimate Thumb-state address.
3450 The AP may be eliminated to either the SP or the FP, so we use the
3451 least common denominator, e.g. SImode, and offsets from 0 to 64.
3453 ??? Verify whether the above is the right approach.
3455 ??? Also, the FP may be eliminated to the SP, so perhaps that
3456 needs special handling also.
3458 ??? Look at how the mips16 port solves this problem. It probably uses
3459 better ways to solve some of these problems.
3461 Although it is not incorrect, we don't accept QImode and HImode
3462 addresses based on the frame pointer or arg pointer until the
3463 reload pass starts. This is so that eliminating such addresses
3464 into stack based ones won't produce impossible code. */
3466 thumb_legitimate_address_p (enum machine_mode mode, rtx x, int strict_p)
3468 /* ??? Not clear if this is right. Experiment. */
3469 if (GET_MODE_SIZE (mode) < 4
3470 && !(reload_in_progress || reload_completed)
3471 && (reg_mentioned_p (frame_pointer_rtx, x)
3472 || reg_mentioned_p (arg_pointer_rtx, x)
3473 || reg_mentioned_p (virtual_incoming_args_rtx, x)
3474 || reg_mentioned_p (virtual_outgoing_args_rtx, x)
3475 || reg_mentioned_p (virtual_stack_dynamic_rtx, x)
3476 || reg_mentioned_p (virtual_stack_vars_rtx, x)))
3477 return 0;
3479 /* Accept any base register. SP only in SImode or larger. */
3480 else if (thumb_base_register_rtx_p (x, mode, strict_p))
3481 return 1;
3483 /* This is PC relative data before arm_reorg runs. */
3484 else if (GET_MODE_SIZE (mode) >= 4 && CONSTANT_P (x)
3485 && GET_CODE (x) == SYMBOL_REF
3486 && CONSTANT_POOL_ADDRESS_P (x) && ! flag_pic)
3487 return 1;
3489 /* This is PC relative data after arm_reorg runs. */
3490 else if (GET_MODE_SIZE (mode) >= 4 && reload_completed
3491 && (GET_CODE (x) == LABEL_REF
3492 || (GET_CODE (x) == CONST
3493 && GET_CODE (XEXP (x, 0)) == PLUS
3494 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
3495 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
3496 return 1;
3498 /* Post-inc indexing only supported for SImode and larger. */
3499 else if (GET_CODE (x) == POST_INC && GET_MODE_SIZE (mode) >= 4
3500 && thumb_index_register_rtx_p (XEXP (x, 0), strict_p))
3501 return 1;
3503 else if (GET_CODE (x) == PLUS)
3505 /* REG+REG address can be any two index registers. */
3506 /* We disallow FRAME+REG addressing since we know that FRAME
3507 will be replaced with STACK, and SP relative addressing only
3508 permits SP+OFFSET. */
3509 if (GET_MODE_SIZE (mode) <= 4
3510 && XEXP (x, 0) != frame_pointer_rtx
3511 && XEXP (x, 1) != frame_pointer_rtx
3512 && thumb_index_register_rtx_p (XEXP (x, 0), strict_p)
3513 && thumb_index_register_rtx_p (XEXP (x, 1), strict_p))
3514 return 1;
3516 /* REG+const has 5-7 bit offset for non-SP registers. */
3517 else if ((thumb_index_register_rtx_p (XEXP (x, 0), strict_p)
3518 || XEXP (x, 0) == arg_pointer_rtx)
3519 && GET_CODE (XEXP (x, 1)) == CONST_INT
3520 && thumb_legitimate_offset_p (mode, INTVAL (XEXP (x, 1))))
3521 return 1;
3523 /* REG+const has 10 bit offset for SP, but only SImode and
3524 larger is supported. */
3525 /* ??? Should probably check for DI/DFmode overflow here
3526 just like GO_IF_LEGITIMATE_OFFSET does. */
3527 else if (GET_CODE (XEXP (x, 0)) == REG
3528 && REGNO (XEXP (x, 0)) == STACK_POINTER_REGNUM
3529 && GET_MODE_SIZE (mode) >= 4
3530 && GET_CODE (XEXP (x, 1)) == CONST_INT
3531 && INTVAL (XEXP (x, 1)) >= 0
3532 && INTVAL (XEXP (x, 1)) + GET_MODE_SIZE (mode) <= 1024
3533 && (INTVAL (XEXP (x, 1)) & 3) == 0)
3534 return 1;
3536 else if (GET_CODE (XEXP (x, 0)) == REG
3537 && REGNO (XEXP (x, 0)) == FRAME_POINTER_REGNUM
3538 && GET_MODE_SIZE (mode) >= 4
3539 && GET_CODE (XEXP (x, 1)) == CONST_INT
3540 && (INTVAL (XEXP (x, 1)) & 3) == 0)
3541 return 1;
3544 else if (GET_MODE_CLASS (mode) != MODE_FLOAT
3545 && GET_MODE_SIZE (mode) == 4
3546 && GET_CODE (x) == SYMBOL_REF
3547 && CONSTANT_POOL_ADDRESS_P (x)
3548 && !(flag_pic
3549 && symbol_mentioned_p (get_pool_constant (x))))
3550 return 1;
3552 return 0;
3555 /* Return nonzero if VAL can be used as an offset in a Thumb-state address
3556 instruction of mode MODE. */
3558 thumb_legitimate_offset_p (enum machine_mode mode, HOST_WIDE_INT val)
3560 switch (GET_MODE_SIZE (mode))
3562 case 1:
3563 return val >= 0 && val < 32;
3565 case 2:
3566 return val >= 0 && val < 64 && (val & 1) == 0;
3568 default:
3569 return (val >= 0
3570 && (val + GET_MODE_SIZE (mode)) <= 128
3571 && (val & 3) == 0);
3575 /* Try machine-dependent ways of modifying an illegitimate address
3576 to be legitimate. If we find one, return the new, valid address. */
3578 arm_legitimize_address (rtx x, rtx orig_x, enum machine_mode mode)
3580 if (GET_CODE (x) == PLUS)
3582 rtx xop0 = XEXP (x, 0);
3583 rtx xop1 = XEXP (x, 1);
3585 if (CONSTANT_P (xop0) && !symbol_mentioned_p (xop0))
3586 xop0 = force_reg (SImode, xop0);
3588 if (CONSTANT_P (xop1) && !symbol_mentioned_p (xop1))
3589 xop1 = force_reg (SImode, xop1);
3591 if (ARM_BASE_REGISTER_RTX_P (xop0)
3592 && GET_CODE (xop1) == CONST_INT)
3594 HOST_WIDE_INT n, low_n;
3595 rtx base_reg, val;
3596 n = INTVAL (xop1);
3598 /* VFP addressing modes actually allow greater offsets, but for
3599 now we just stick with the lowest common denominator. */
3600 if (mode == DImode
3601 || ((TARGET_SOFT_FLOAT || TARGET_VFP) && mode == DFmode))
3603 low_n = n & 0x0f;
3604 n &= ~0x0f;
3605 if (low_n > 4)
3607 n += 16;
3608 low_n -= 16;
3611 else
3613 low_n = ((mode) == TImode ? 0
3614 : n >= 0 ? (n & 0xfff) : -((-n) & 0xfff));
3615 n -= low_n;
3618 base_reg = gen_reg_rtx (SImode);
3619 val = force_operand (gen_rtx_PLUS (SImode, xop0,
3620 GEN_INT (n)), NULL_RTX);
3621 emit_move_insn (base_reg, val);
3622 x = (low_n == 0 ? base_reg
3623 : gen_rtx_PLUS (SImode, base_reg, GEN_INT (low_n)));
3625 else if (xop0 != XEXP (x, 0) || xop1 != XEXP (x, 1))
3626 x = gen_rtx_PLUS (SImode, xop0, xop1);
3629 /* XXX We don't allow MINUS any more -- see comment in
3630 arm_legitimate_address_p (). */
3631 else if (GET_CODE (x) == MINUS)
3633 rtx xop0 = XEXP (x, 0);
3634 rtx xop1 = XEXP (x, 1);
3636 if (CONSTANT_P (xop0))
3637 xop0 = force_reg (SImode, xop0);
3639 if (CONSTANT_P (xop1) && ! symbol_mentioned_p (xop1))
3640 xop1 = force_reg (SImode, xop1);
3642 if (xop0 != XEXP (x, 0) || xop1 != XEXP (x, 1))
3643 x = gen_rtx_MINUS (SImode, xop0, xop1);
3646 if (flag_pic)
3648 /* We need to find and carefully transform any SYMBOL and LABEL
3649 references; so go back to the original address expression. */
3650 rtx new_x = legitimize_pic_address (orig_x, mode, NULL_RTX);
3652 if (new_x != orig_x)
3653 x = new_x;
3656 return x;
3660 /* Try machine-dependent ways of modifying an illegitimate Thumb address
3661 to be legitimate. If we find one, return the new, valid address. */
3663 thumb_legitimize_address (rtx x, rtx orig_x, enum machine_mode mode)
3665 if (GET_CODE (x) == PLUS
3666 && GET_CODE (XEXP (x, 1)) == CONST_INT
3667 && (INTVAL (XEXP (x, 1)) >= 32 * GET_MODE_SIZE (mode)
3668 || INTVAL (XEXP (x, 1)) < 0))
3670 rtx xop0 = XEXP (x, 0);
3671 rtx xop1 = XEXP (x, 1);
3672 HOST_WIDE_INT offset = INTVAL (xop1);
3674 /* Try and fold the offset into a biasing of the base register and
3675 then offsetting that. Don't do this when optimizing for space
3676 since it can cause too many CSEs. */
3677 if (optimize_size && offset >= 0
3678 && offset < 256 + 31 * GET_MODE_SIZE (mode))
3680 HOST_WIDE_INT delta;
3682 if (offset >= 256)
3683 delta = offset - (256 - GET_MODE_SIZE (mode));
3684 else if (offset < 32 * GET_MODE_SIZE (mode) + 8)
3685 delta = 31 * GET_MODE_SIZE (mode);
3686 else
3687 delta = offset & (~31 * GET_MODE_SIZE (mode));
3689 xop0 = force_operand (plus_constant (xop0, offset - delta),
3690 NULL_RTX);
3691 x = plus_constant (xop0, delta);
3693 else if (offset < 0 && offset > -256)
3694 /* Small negative offsets are best done with a subtract before the
3695 dereference, forcing these into a register normally takes two
3696 instructions. */
3697 x = force_operand (x, NULL_RTX);
3698 else
3700 /* For the remaining cases, force the constant into a register. */
3701 xop1 = force_reg (SImode, xop1);
3702 x = gen_rtx_PLUS (SImode, xop0, xop1);
3705 else if (GET_CODE (x) == PLUS
3706 && s_register_operand (XEXP (x, 1), SImode)
3707 && !s_register_operand (XEXP (x, 0), SImode))
3709 rtx xop0 = force_operand (XEXP (x, 0), NULL_RTX);
3711 x = gen_rtx_PLUS (SImode, xop0, XEXP (x, 1));
3714 if (flag_pic)
3716 /* We need to find and carefully transform any SYMBOL and LABEL
3717 references; so go back to the original address expression. */
3718 rtx new_x = legitimize_pic_address (orig_x, mode, NULL_RTX);
3720 if (new_x != orig_x)
3721 x = new_x;
3724 return x;
3729 #define REG_OR_SUBREG_REG(X) \
3730 (GET_CODE (X) == REG \
3731 || (GET_CODE (X) == SUBREG && GET_CODE (SUBREG_REG (X)) == REG))
3733 #define REG_OR_SUBREG_RTX(X) \
3734 (GET_CODE (X) == REG ? (X) : SUBREG_REG (X))
3736 #ifndef COSTS_N_INSNS
3737 #define COSTS_N_INSNS(N) ((N) * 4 - 2)
3738 #endif
3739 static inline int
3740 thumb_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer)
3742 enum machine_mode mode = GET_MODE (x);
3744 switch (code)
3746 case ASHIFT:
3747 case ASHIFTRT:
3748 case LSHIFTRT:
3749 case ROTATERT:
3750 case PLUS:
3751 case MINUS:
3752 case COMPARE:
3753 case NEG:
3754 case NOT:
3755 return COSTS_N_INSNS (1);
3757 case MULT:
3758 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
3760 int cycles = 0;
3761 unsigned HOST_WIDE_INT i = INTVAL (XEXP (x, 1));
3763 while (i)
3765 i >>= 2;
3766 cycles++;
3768 return COSTS_N_INSNS (2) + cycles;
3770 return COSTS_N_INSNS (1) + 16;
3772 case SET:
3773 return (COSTS_N_INSNS (1)
3774 + 4 * ((GET_CODE (SET_SRC (x)) == MEM)
3775 + GET_CODE (SET_DEST (x)) == MEM));
3777 case CONST_INT:
3778 if (outer == SET)
3780 if ((unsigned HOST_WIDE_INT) INTVAL (x) < 256)
3781 return 0;
3782 if (thumb_shiftable_const (INTVAL (x)))
3783 return COSTS_N_INSNS (2);
3784 return COSTS_N_INSNS (3);
3786 else if ((outer == PLUS || outer == COMPARE)
3787 && INTVAL (x) < 256 && INTVAL (x) > -256)
3788 return 0;
3789 else if (outer == AND
3790 && INTVAL (x) < 256 && INTVAL (x) >= -256)
3791 return COSTS_N_INSNS (1);
3792 else if (outer == ASHIFT || outer == ASHIFTRT
3793 || outer == LSHIFTRT)
3794 return 0;
3795 return COSTS_N_INSNS (2);
3797 case CONST:
3798 case CONST_DOUBLE:
3799 case LABEL_REF:
3800 case SYMBOL_REF:
3801 return COSTS_N_INSNS (3);
3803 case UDIV:
3804 case UMOD:
3805 case DIV:
3806 case MOD:
3807 return 100;
3809 case TRUNCATE:
3810 return 99;
3812 case AND:
3813 case XOR:
3814 case IOR:
3815 /* XXX guess. */
3816 return 8;
3818 case MEM:
3819 /* XXX another guess. */
3820 /* Memory costs quite a lot for the first word, but subsequent words
3821 load at the equivalent of a single insn each. */
3822 return (10 + 4 * ((GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD)
3823 + ((GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
3824 ? 4 : 0));
3826 case IF_THEN_ELSE:
3827 /* XXX a guess. */
3828 if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
3829 return 14;
3830 return 2;
3832 case ZERO_EXTEND:
3833 /* XXX still guessing. */
3834 switch (GET_MODE (XEXP (x, 0)))
3836 case QImode:
3837 return (1 + (mode == DImode ? 4 : 0)
3838 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3840 case HImode:
3841 return (4 + (mode == DImode ? 4 : 0)
3842 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3844 case SImode:
3845 return (1 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3847 default:
3848 return 99;
3851 default:
3852 return 99;
3857 /* Worker routine for arm_rtx_costs. */
3858 static inline int
3859 arm_rtx_costs_1 (rtx x, enum rtx_code code, enum rtx_code outer)
3861 enum machine_mode mode = GET_MODE (x);
3862 enum rtx_code subcode;
3863 int extra_cost;
3865 switch (code)
3867 case MEM:
3868 /* Memory costs quite a lot for the first word, but subsequent words
3869 load at the equivalent of a single insn each. */
3870 return (10 + 4 * ((GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD)
3871 + (GET_CODE (x) == SYMBOL_REF
3872 && CONSTANT_POOL_ADDRESS_P (x) ? 4 : 0));
3874 case DIV:
3875 case MOD:
3876 case UDIV:
3877 case UMOD:
3878 return optimize_size ? COSTS_N_INSNS (2) : 100;
3880 case ROTATE:
3881 if (mode == SImode && GET_CODE (XEXP (x, 1)) == REG)
3882 return 4;
3883 /* Fall through */
3884 case ROTATERT:
3885 if (mode != SImode)
3886 return 8;
3887 /* Fall through */
3888 case ASHIFT: case LSHIFTRT: case ASHIFTRT:
3889 if (mode == DImode)
3890 return (8 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : 8)
3891 + ((GET_CODE (XEXP (x, 0)) == REG
3892 || (GET_CODE (XEXP (x, 0)) == SUBREG
3893 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG))
3894 ? 0 : 8));
3895 return (1 + ((GET_CODE (XEXP (x, 0)) == REG
3896 || (GET_CODE (XEXP (x, 0)) == SUBREG
3897 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG))
3898 ? 0 : 4)
3899 + ((GET_CODE (XEXP (x, 1)) == REG
3900 || (GET_CODE (XEXP (x, 1)) == SUBREG
3901 && GET_CODE (SUBREG_REG (XEXP (x, 1))) == REG)
3902 || (GET_CODE (XEXP (x, 1)) == CONST_INT))
3903 ? 0 : 4));
3905 case MINUS:
3906 if (mode == DImode)
3907 return (4 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 8)
3908 + ((REG_OR_SUBREG_REG (XEXP (x, 0))
3909 || (GET_CODE (XEXP (x, 0)) == CONST_INT
3910 && const_ok_for_arm (INTVAL (XEXP (x, 0)))))
3911 ? 0 : 8));
3913 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
3914 return (2 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
3915 || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
3916 && arm_const_double_rtx (XEXP (x, 1))))
3917 ? 0 : 8)
3918 + ((REG_OR_SUBREG_REG (XEXP (x, 0))
3919 || (GET_CODE (XEXP (x, 0)) == CONST_DOUBLE
3920 && arm_const_double_rtx (XEXP (x, 0))))
3921 ? 0 : 8));
3923 if (((GET_CODE (XEXP (x, 0)) == CONST_INT
3924 && const_ok_for_arm (INTVAL (XEXP (x, 0)))
3925 && REG_OR_SUBREG_REG (XEXP (x, 1))))
3926 || (((subcode = GET_CODE (XEXP (x, 1))) == ASHIFT
3927 || subcode == ASHIFTRT || subcode == LSHIFTRT
3928 || subcode == ROTATE || subcode == ROTATERT
3929 || (subcode == MULT
3930 && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
3931 && ((INTVAL (XEXP (XEXP (x, 1), 1)) &
3932 (INTVAL (XEXP (XEXP (x, 1), 1)) - 1)) == 0)))
3933 && REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 0))
3934 && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 1))
3935 || GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT)
3936 && REG_OR_SUBREG_REG (XEXP (x, 0))))
3937 return 1;
3938 /* Fall through */
3940 case PLUS:
3941 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
3942 return (2 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8)
3943 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
3944 || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
3945 && arm_const_double_rtx (XEXP (x, 1))))
3946 ? 0 : 8));
3948 /* Fall through */
3949 case AND: case XOR: case IOR:
3950 extra_cost = 0;
3952 /* Normally the frame registers will be spilt into reg+const during
3953 reload, so it is a bad idea to combine them with other instructions,
3954 since then they might not be moved outside of loops. As a compromise
3955 we allow integration with ops that have a constant as their second
3956 operand. */
3957 if ((REG_OR_SUBREG_REG (XEXP (x, 0))
3958 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0)))
3959 && GET_CODE (XEXP (x, 1)) != CONST_INT)
3960 || (REG_OR_SUBREG_REG (XEXP (x, 0))
3961 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0)))))
3962 extra_cost = 4;
3964 if (mode == DImode)
3965 return (4 + extra_cost + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8)
3966 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
3967 || (GET_CODE (XEXP (x, 1)) == CONST_INT
3968 && const_ok_for_op (INTVAL (XEXP (x, 1)), code)))
3969 ? 0 : 8));
3971 if (REG_OR_SUBREG_REG (XEXP (x, 0)))
3972 return (1 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : extra_cost)
3973 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
3974 || (GET_CODE (XEXP (x, 1)) == CONST_INT
3975 && const_ok_for_op (INTVAL (XEXP (x, 1)), code)))
3976 ? 0 : 4));
3978 else if (REG_OR_SUBREG_REG (XEXP (x, 1)))
3979 return (1 + extra_cost
3980 + ((((subcode = GET_CODE (XEXP (x, 0))) == ASHIFT
3981 || subcode == LSHIFTRT || subcode == ASHIFTRT
3982 || subcode == ROTATE || subcode == ROTATERT
3983 || (subcode == MULT
3984 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
3985 && ((INTVAL (XEXP (XEXP (x, 0), 1)) &
3986 (INTVAL (XEXP (XEXP (x, 0), 1)) - 1)) == 0)))
3987 && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 0)))
3988 && ((REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 1)))
3989 || GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT))
3990 ? 0 : 4));
3992 return 8;
3994 case MULT:
3995 /* This should have been handled by the CPU specific routines. */
3996 abort ();
3998 case TRUNCATE:
3999 if (arm_arch3m && mode == SImode
4000 && GET_CODE (XEXP (x, 0)) == LSHIFTRT
4001 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
4002 && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0))
4003 == GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)))
4004 && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == ZERO_EXTEND
4005 || GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == SIGN_EXTEND))
4006 return 8;
4007 return 99;
4009 case NEG:
4010 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
4011 return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 6);
4012 /* Fall through */
4013 case NOT:
4014 if (mode == DImode)
4015 return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4);
4017 return 1 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4);
4019 case IF_THEN_ELSE:
4020 if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
4021 return 14;
4022 return 2;
4024 case COMPARE:
4025 return 1;
4027 case ABS:
4028 return 4 + (mode == DImode ? 4 : 0);
4030 case SIGN_EXTEND:
4031 if (GET_MODE (XEXP (x, 0)) == QImode)
4032 return (4 + (mode == DImode ? 4 : 0)
4033 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4034 /* Fall through */
4035 case ZERO_EXTEND:
4036 switch (GET_MODE (XEXP (x, 0)))
4038 case QImode:
4039 return (1 + (mode == DImode ? 4 : 0)
4040 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4042 case HImode:
4043 return (4 + (mode == DImode ? 4 : 0)
4044 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4046 case SImode:
4047 return (1 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4049 case V8QImode:
4050 case V4HImode:
4051 case V2SImode:
4052 case V4QImode:
4053 case V2HImode:
4054 return 1;
4056 default:
4057 break;
4059 abort ();
4061 case CONST_INT:
4062 if (const_ok_for_arm (INTVAL (x)))
4063 return outer == SET ? 2 : -1;
4064 else if (outer == AND
4065 && const_ok_for_arm (~INTVAL (x)))
4066 return -1;
4067 else if ((outer == COMPARE
4068 || outer == PLUS || outer == MINUS)
4069 && const_ok_for_arm (-INTVAL (x)))
4070 return -1;
4071 else
4072 return 5;
4074 case CONST:
4075 case LABEL_REF:
4076 case SYMBOL_REF:
4077 return 6;
4079 case CONST_DOUBLE:
4080 if (arm_const_double_rtx (x))
4081 return outer == SET ? 2 : -1;
4082 else if ((outer == COMPARE || outer == PLUS)
4083 && neg_const_double_rtx_ok_for_fpa (x))
4084 return -1;
4085 return 7;
4087 default:
4088 return 99;
4092 /* RTX costs when optimizing for size. */
4093 static bool
4094 arm_size_rtx_costs (rtx x, int code, int outer_code, int *total)
4096 enum machine_mode mode = GET_MODE (x);
4098 if (TARGET_THUMB)
4100 /* XXX TBD. For now, use the standard costs. */
4101 *total = thumb_rtx_costs (x, code, outer_code);
4102 return true;
4105 switch (code)
4107 case MEM:
4108 /* A memory access costs 1 insn if the mode is small, or the address is
4109 a single register, otherwise it costs one insn per word. */
4110 if (REG_P (XEXP (x, 0)))
4111 *total = COSTS_N_INSNS (1);
4112 else
4113 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4114 return true;
4116 case DIV:
4117 case MOD:
4118 case UDIV:
4119 case UMOD:
4120 /* Needs a libcall, so it costs about this. */
4121 *total = COSTS_N_INSNS (2);
4122 return false;
4124 case ROTATE:
4125 if (mode == SImode && GET_CODE (XEXP (x, 1)) == REG)
4127 *total = COSTS_N_INSNS (2) + rtx_cost (XEXP (x, 0), code);
4128 return true;
4130 /* Fall through */
4131 case ROTATERT:
4132 case ASHIFT:
4133 case LSHIFTRT:
4134 case ASHIFTRT:
4135 if (mode == DImode && GET_CODE (XEXP (x, 1)) == CONST_INT)
4137 *total = COSTS_N_INSNS (3) + rtx_cost (XEXP (x, 0), code);
4138 return true;
4140 else if (mode == SImode)
4142 *total = COSTS_N_INSNS (1) + rtx_cost (XEXP (x, 0), code);
4143 /* Slightly disparage register shifts, but not by much. */
4144 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
4145 *total += 1 + rtx_cost (XEXP (x, 1), code);
4146 return true;
4149 /* Needs a libcall. */
4150 *total = COSTS_N_INSNS (2);
4151 return false;
4153 case MINUS:
4154 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4156 *total = COSTS_N_INSNS (1);
4157 return false;
4160 if (mode == SImode)
4162 enum rtx_code subcode0 = GET_CODE (XEXP (x, 0));
4163 enum rtx_code subcode1 = GET_CODE (XEXP (x, 1));
4165 if (subcode0 == ROTATE || subcode0 == ROTATERT || subcode0 == ASHIFT
4166 || subcode0 == LSHIFTRT || subcode0 == ASHIFTRT
4167 || subcode1 == ROTATE || subcode1 == ROTATERT
4168 || subcode1 == ASHIFT || subcode1 == LSHIFTRT
4169 || subcode1 == ASHIFTRT)
4171 /* It's just the cost of the two operands. */
4172 *total = 0;
4173 return false;
4176 *total = COSTS_N_INSNS (1);
4177 return false;
4180 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4181 return false;
4183 case PLUS:
4184 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4186 *total = COSTS_N_INSNS (1);
4187 return false;
4190 /* Fall through */
4191 case AND: case XOR: case IOR:
4192 if (mode == SImode)
4194 enum rtx_code subcode = GET_CODE (XEXP (x, 0));
4196 if (subcode == ROTATE || subcode == ROTATERT || subcode == ASHIFT
4197 || subcode == LSHIFTRT || subcode == ASHIFTRT
4198 || (code == AND && subcode == NOT))
4200 /* It's just the cost of the two operands. */
4201 *total = 0;
4202 return false;
4206 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4207 return false;
4209 case MULT:
4210 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4211 return false;
4213 case NEG:
4214 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4215 *total = COSTS_N_INSNS (1);
4216 /* Fall through */
4217 case NOT:
4218 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4220 return false;
4222 case IF_THEN_ELSE:
4223 *total = 0;
4224 return false;
4226 case COMPARE:
4227 if (cc_register (XEXP (x, 0), VOIDmode))
4228 * total = 0;
4229 else
4230 *total = COSTS_N_INSNS (1);
4231 return false;
4233 case ABS:
4234 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4235 *total = COSTS_N_INSNS (1);
4236 else
4237 *total = COSTS_N_INSNS (1 + ARM_NUM_REGS (mode));
4238 return false;
4240 case SIGN_EXTEND:
4241 *total = 0;
4242 if (GET_MODE_SIZE (GET_MODE (XEXP (x, 0))) < 4)
4244 if (!(arm_arch4 && MEM_P (XEXP (x, 0))))
4245 *total += COSTS_N_INSNS (arm_arch6 ? 1 : 2);
4247 if (mode == DImode)
4248 *total += COSTS_N_INSNS (1);
4249 return false;
4251 case ZERO_EXTEND:
4252 *total = 0;
4253 if (!(arm_arch4 && MEM_P (XEXP (x, 0))))
4255 switch (GET_MODE (XEXP (x, 0)))
4257 case QImode:
4258 *total += COSTS_N_INSNS (1);
4259 break;
4261 case HImode:
4262 *total += COSTS_N_INSNS (arm_arch6 ? 1 : 2);
4264 case SImode:
4265 break;
4267 default:
4268 *total += COSTS_N_INSNS (2);
4272 if (mode == DImode)
4273 *total += COSTS_N_INSNS (1);
4275 return false;
4277 case CONST_INT:
4278 if (const_ok_for_arm (INTVAL (x)))
4279 *total = COSTS_N_INSNS (outer_code == SET ? 1 : 0);
4280 else if (const_ok_for_arm (~INTVAL (x)))
4281 *total = COSTS_N_INSNS (outer_code == AND ? 0 : 1);
4282 else if (const_ok_for_arm (-INTVAL (x)))
4284 if (outer_code == COMPARE || outer_code == PLUS
4285 || outer_code == MINUS)
4286 *total = 0;
4287 else
4288 *total = COSTS_N_INSNS (1);
4290 else
4291 *total = COSTS_N_INSNS (2);
4292 return true;
4294 case CONST:
4295 case LABEL_REF:
4296 case SYMBOL_REF:
4297 *total = COSTS_N_INSNS (2);
4298 return true;
4300 case CONST_DOUBLE:
4301 *total = COSTS_N_INSNS (4);
4302 return true;
4304 default:
4305 if (mode != VOIDmode)
4306 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4307 else
4308 *total = COSTS_N_INSNS (4); /* How knows? */
4309 return false;
4313 /* RTX costs for cores with a slow MUL implementation. */
4315 static bool
4316 arm_slowmul_rtx_costs (rtx x, int code, int outer_code, int *total)
4318 enum machine_mode mode = GET_MODE (x);
4320 if (TARGET_THUMB)
4322 *total = thumb_rtx_costs (x, code, outer_code);
4323 return true;
4326 switch (code)
4328 case MULT:
4329 if (GET_MODE_CLASS (mode) == MODE_FLOAT
4330 || mode == DImode)
4332 *total = 30;
4333 return true;
4336 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
4338 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
4339 & (unsigned HOST_WIDE_INT) 0xffffffff);
4340 int cost, const_ok = const_ok_for_arm (i);
4341 int j, booth_unit_size;
4343 /* Tune as appropriate. */
4344 cost = const_ok ? 4 : 8;
4345 booth_unit_size = 2;
4346 for (j = 0; i && j < 32; j += booth_unit_size)
4348 i >>= booth_unit_size;
4349 cost += 2;
4352 *total = cost;
4353 return true;
4356 *total = 30 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
4357 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
4358 return true;
4360 default:
4361 *total = arm_rtx_costs_1 (x, code, outer_code);
4362 return true;
4367 /* RTX cost for cores with a fast multiply unit (M variants). */
4369 static bool
4370 arm_fastmul_rtx_costs (rtx x, int code, int outer_code, int *total)
4372 enum machine_mode mode = GET_MODE (x);
4374 if (TARGET_THUMB)
4376 *total = thumb_rtx_costs (x, code, outer_code);
4377 return true;
4380 switch (code)
4382 case MULT:
4383 /* There is no point basing this on the tuning, since it is always the
4384 fast variant if it exists at all. */
4385 if (mode == DImode
4386 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
4387 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
4388 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
4390 *total = 8;
4391 return true;
4395 if (GET_MODE_CLASS (mode) == MODE_FLOAT
4396 || mode == DImode)
4398 *total = 30;
4399 return true;
4402 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
4404 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
4405 & (unsigned HOST_WIDE_INT) 0xffffffff);
4406 int cost, const_ok = const_ok_for_arm (i);
4407 int j, booth_unit_size;
4409 /* Tune as appropriate. */
4410 cost = const_ok ? 4 : 8;
4411 booth_unit_size = 8;
4412 for (j = 0; i && j < 32; j += booth_unit_size)
4414 i >>= booth_unit_size;
4415 cost += 2;
4418 *total = cost;
4419 return true;
4422 *total = 8 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
4423 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
4424 return true;
4426 default:
4427 *total = arm_rtx_costs_1 (x, code, outer_code);
4428 return true;
4433 /* RTX cost for XScale CPUs. */
4435 static bool
4436 arm_xscale_rtx_costs (rtx x, int code, int outer_code, int *total)
4438 enum machine_mode mode = GET_MODE (x);
4440 if (TARGET_THUMB)
4442 *total = thumb_rtx_costs (x, code, outer_code);
4443 return true;
4446 switch (code)
4448 case MULT:
4449 /* There is no point basing this on the tuning, since it is always the
4450 fast variant if it exists at all. */
4451 if (mode == DImode
4452 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
4453 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
4454 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
4456 *total = 8;
4457 return true;
4461 if (GET_MODE_CLASS (mode) == MODE_FLOAT
4462 || mode == DImode)
4464 *total = 30;
4465 return true;
4468 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
4470 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
4471 & (unsigned HOST_WIDE_INT) 0xffffffff);
4472 int cost, const_ok = const_ok_for_arm (i);
4473 unsigned HOST_WIDE_INT masked_const;
4475 /* The cost will be related to two insns.
4476 First a load of the constant (MOV or LDR), then a multiply. */
4477 cost = 2;
4478 if (! const_ok)
4479 cost += 1; /* LDR is probably more expensive because
4480 of longer result latency. */
4481 masked_const = i & 0xffff8000;
4482 if (masked_const != 0 && masked_const != 0xffff8000)
4484 masked_const = i & 0xf8000000;
4485 if (masked_const == 0 || masked_const == 0xf8000000)
4486 cost += 1;
4487 else
4488 cost += 2;
4490 *total = cost;
4491 return true;
4494 *total = 8 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
4495 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
4496 return true;
4498 case COMPARE:
4499 /* A COMPARE of a MULT is slow on XScale; the muls instruction
4500 will stall until the multiplication is complete. */
4501 if (GET_CODE (XEXP (x, 0)) == MULT)
4502 *total = 4 + rtx_cost (XEXP (x, 0), code);
4503 else
4504 *total = arm_rtx_costs_1 (x, code, outer_code);
4505 return true;
4507 default:
4508 *total = arm_rtx_costs_1 (x, code, outer_code);
4509 return true;
4514 /* RTX costs for 9e (and later) cores. */
4516 static bool
4517 arm_9e_rtx_costs (rtx x, int code, int outer_code, int *total)
4519 enum machine_mode mode = GET_MODE (x);
4520 int nonreg_cost;
4521 int cost;
4523 if (TARGET_THUMB)
4525 switch (code)
4527 case MULT:
4528 *total = COSTS_N_INSNS (3);
4529 return true;
4531 default:
4532 *total = thumb_rtx_costs (x, code, outer_code);
4533 return true;
4537 switch (code)
4539 case MULT:
4540 /* There is no point basing this on the tuning, since it is always the
4541 fast variant if it exists at all. */
4542 if (mode == DImode
4543 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
4544 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
4545 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
4547 *total = 3;
4548 return true;
4552 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
4554 *total = 30;
4555 return true;
4557 if (mode == DImode)
4559 cost = 7;
4560 nonreg_cost = 8;
4562 else
4564 cost = 2;
4565 nonreg_cost = 4;
4569 *total = cost + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : nonreg_cost)
4570 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : nonreg_cost);
4571 return true;
4573 default:
4574 *total = arm_rtx_costs_1 (x, code, outer_code);
4575 return true;
4578 /* All address computations that can be done are free, but rtx cost returns
4579 the same for practically all of them. So we weight the different types
4580 of address here in the order (most pref first):
4581 PRE/POST_INC/DEC, SHIFT or NON-INT sum, INT sum, REG, MEM or LABEL. */
4582 static inline int
4583 arm_arm_address_cost (rtx x)
4585 enum rtx_code c = GET_CODE (x);
4587 if (c == PRE_INC || c == PRE_DEC || c == POST_INC || c == POST_DEC)
4588 return 0;
4589 if (c == MEM || c == LABEL_REF || c == SYMBOL_REF)
4590 return 10;
4592 if (c == PLUS || c == MINUS)
4594 if (GET_CODE (XEXP (x, 0)) == CONST_INT)
4595 return 2;
4597 if (ARITHMETIC_P (XEXP (x, 0)) || ARITHMETIC_P (XEXP (x, 1)))
4598 return 3;
4600 return 4;
4603 return 6;
4606 static inline int
4607 arm_thumb_address_cost (rtx x)
4609 enum rtx_code c = GET_CODE (x);
4611 if (c == REG)
4612 return 1;
4613 if (c == PLUS
4614 && GET_CODE (XEXP (x, 0)) == REG
4615 && GET_CODE (XEXP (x, 1)) == CONST_INT)
4616 return 1;
4618 return 2;
4621 static int
4622 arm_address_cost (rtx x)
4624 return TARGET_ARM ? arm_arm_address_cost (x) : arm_thumb_address_cost (x);
4627 static int
4628 arm_adjust_cost (rtx insn, rtx link, rtx dep, int cost)
4630 rtx i_pat, d_pat;
4632 /* Some true dependencies can have a higher cost depending
4633 on precisely how certain input operands are used. */
4634 if (arm_tune_xscale
4635 && REG_NOTE_KIND (link) == 0
4636 && recog_memoized (insn) >= 0
4637 && recog_memoized (dep) >= 0)
4639 int shift_opnum = get_attr_shift (insn);
4640 enum attr_type attr_type = get_attr_type (dep);
4642 /* If nonzero, SHIFT_OPNUM contains the operand number of a shifted
4643 operand for INSN. If we have a shifted input operand and the
4644 instruction we depend on is another ALU instruction, then we may
4645 have to account for an additional stall. */
4646 if (shift_opnum != 0
4647 && (attr_type == TYPE_ALU_SHIFT || attr_type == TYPE_ALU_SHIFT_REG))
4649 rtx shifted_operand;
4650 int opno;
4652 /* Get the shifted operand. */
4653 extract_insn (insn);
4654 shifted_operand = recog_data.operand[shift_opnum];
4656 /* Iterate over all the operands in DEP. If we write an operand
4657 that overlaps with SHIFTED_OPERAND, then we have increase the
4658 cost of this dependency. */
4659 extract_insn (dep);
4660 preprocess_constraints ();
4661 for (opno = 0; opno < recog_data.n_operands; opno++)
4663 /* We can ignore strict inputs. */
4664 if (recog_data.operand_type[opno] == OP_IN)
4665 continue;
4667 if (reg_overlap_mentioned_p (recog_data.operand[opno],
4668 shifted_operand))
4669 return 2;
4674 /* XXX This is not strictly true for the FPA. */
4675 if (REG_NOTE_KIND (link) == REG_DEP_ANTI
4676 || REG_NOTE_KIND (link) == REG_DEP_OUTPUT)
4677 return 0;
4679 /* Call insns don't incur a stall, even if they follow a load. */
4680 if (REG_NOTE_KIND (link) == 0
4681 && GET_CODE (insn) == CALL_INSN)
4682 return 1;
4684 if ((i_pat = single_set (insn)) != NULL
4685 && GET_CODE (SET_SRC (i_pat)) == MEM
4686 && (d_pat = single_set (dep)) != NULL
4687 && GET_CODE (SET_DEST (d_pat)) == MEM)
4689 rtx src_mem = XEXP (SET_SRC (i_pat), 0);
4690 /* This is a load after a store, there is no conflict if the load reads
4691 from a cached area. Assume that loads from the stack, and from the
4692 constant pool are cached, and that others will miss. This is a
4693 hack. */
4695 if ((GET_CODE (src_mem) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (src_mem))
4696 || reg_mentioned_p (stack_pointer_rtx, src_mem)
4697 || reg_mentioned_p (frame_pointer_rtx, src_mem)
4698 || reg_mentioned_p (hard_frame_pointer_rtx, src_mem))
4699 return 1;
4702 return cost;
4705 static int fp_consts_inited = 0;
4707 /* Only zero is valid for VFP. Other values are also valid for FPA. */
4708 static const char * const strings_fp[8] =
4710 "0", "1", "2", "3",
4711 "4", "5", "0.5", "10"
4714 static REAL_VALUE_TYPE values_fp[8];
4716 static void
4717 init_fp_table (void)
4719 int i;
4720 REAL_VALUE_TYPE r;
4722 if (TARGET_VFP)
4723 fp_consts_inited = 1;
4724 else
4725 fp_consts_inited = 8;
4727 for (i = 0; i < fp_consts_inited; i++)
4729 r = REAL_VALUE_ATOF (strings_fp[i], DFmode);
4730 values_fp[i] = r;
4734 /* Return TRUE if rtx X is a valid immediate FP constant. */
4736 arm_const_double_rtx (rtx x)
4738 REAL_VALUE_TYPE r;
4739 int i;
4741 if (!fp_consts_inited)
4742 init_fp_table ();
4744 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
4745 if (REAL_VALUE_MINUS_ZERO (r))
4746 return 0;
4748 for (i = 0; i < fp_consts_inited; i++)
4749 if (REAL_VALUES_EQUAL (r, values_fp[i]))
4750 return 1;
4752 return 0;
4755 /* Return TRUE if rtx X is a valid immediate FPA constant. */
4757 neg_const_double_rtx_ok_for_fpa (rtx x)
4759 REAL_VALUE_TYPE r;
4760 int i;
4762 if (!fp_consts_inited)
4763 init_fp_table ();
4765 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
4766 r = REAL_VALUE_NEGATE (r);
4767 if (REAL_VALUE_MINUS_ZERO (r))
4768 return 0;
4770 for (i = 0; i < 8; i++)
4771 if (REAL_VALUES_EQUAL (r, values_fp[i]))
4772 return 1;
4774 return 0;
4777 /* Predicates for `match_operand' and `match_operator'. */
4779 /* Return nonzero if OP is a valid Cirrus memory address pattern. */
4781 cirrus_memory_offset (rtx op)
4783 /* Reject eliminable registers. */
4784 if (! (reload_in_progress || reload_completed)
4785 && ( reg_mentioned_p (frame_pointer_rtx, op)
4786 || reg_mentioned_p (arg_pointer_rtx, op)
4787 || reg_mentioned_p (virtual_incoming_args_rtx, op)
4788 || reg_mentioned_p (virtual_outgoing_args_rtx, op)
4789 || reg_mentioned_p (virtual_stack_dynamic_rtx, op)
4790 || reg_mentioned_p (virtual_stack_vars_rtx, op)))
4791 return 0;
4793 if (GET_CODE (op) == MEM)
4795 rtx ind;
4797 ind = XEXP (op, 0);
4799 /* Match: (mem (reg)). */
4800 if (GET_CODE (ind) == REG)
4801 return 1;
4803 /* Match:
4804 (mem (plus (reg)
4805 (const))). */
4806 if (GET_CODE (ind) == PLUS
4807 && GET_CODE (XEXP (ind, 0)) == REG
4808 && REG_MODE_OK_FOR_BASE_P (XEXP (ind, 0), VOIDmode)
4809 && GET_CODE (XEXP (ind, 1)) == CONST_INT)
4810 return 1;
4813 return 0;
4816 /* Return TRUE if OP is a valid VFP memory address pattern.
4817 WB if true if writeback address modes are allowed. */
4820 arm_coproc_mem_operand (rtx op, bool wb)
4822 rtx ind;
4824 /* Reject eliminable registers. */
4825 if (! (reload_in_progress || reload_completed)
4826 && ( reg_mentioned_p (frame_pointer_rtx, op)
4827 || reg_mentioned_p (arg_pointer_rtx, op)
4828 || reg_mentioned_p (virtual_incoming_args_rtx, op)
4829 || reg_mentioned_p (virtual_outgoing_args_rtx, op)
4830 || reg_mentioned_p (virtual_stack_dynamic_rtx, op)
4831 || reg_mentioned_p (virtual_stack_vars_rtx, op)))
4832 return FALSE;
4834 /* Constants are converted into offsets from labels. */
4835 if (GET_CODE (op) != MEM)
4836 return FALSE;
4838 ind = XEXP (op, 0);
4840 if (reload_completed
4841 && (GET_CODE (ind) == LABEL_REF
4842 || (GET_CODE (ind) == CONST
4843 && GET_CODE (XEXP (ind, 0)) == PLUS
4844 && GET_CODE (XEXP (XEXP (ind, 0), 0)) == LABEL_REF
4845 && GET_CODE (XEXP (XEXP (ind, 0), 1)) == CONST_INT)))
4846 return TRUE;
4848 /* Match: (mem (reg)). */
4849 if (GET_CODE (ind) == REG)
4850 return arm_address_register_rtx_p (ind, 0);
4852 /* Autoincremment addressing modes. */
4853 if (wb
4854 && (GET_CODE (ind) == PRE_INC
4855 || GET_CODE (ind) == POST_INC
4856 || GET_CODE (ind) == PRE_DEC
4857 || GET_CODE (ind) == POST_DEC))
4858 return arm_address_register_rtx_p (XEXP (ind, 0), 0);
4860 if (wb
4861 && (GET_CODE (ind) == POST_MODIFY || GET_CODE (ind) == PRE_MODIFY)
4862 && arm_address_register_rtx_p (XEXP (ind, 0), 0)
4863 && GET_CODE (XEXP (ind, 1)) == PLUS
4864 && rtx_equal_p (XEXP (XEXP (ind, 1), 0), XEXP (ind, 0)))
4865 ind = XEXP (ind, 1);
4867 /* Match:
4868 (plus (reg)
4869 (const)). */
4870 if (GET_CODE (ind) == PLUS
4871 && GET_CODE (XEXP (ind, 0)) == REG
4872 && REG_MODE_OK_FOR_BASE_P (XEXP (ind, 0), VOIDmode)
4873 && GET_CODE (XEXP (ind, 1)) == CONST_INT
4874 && INTVAL (XEXP (ind, 1)) > -1024
4875 && INTVAL (XEXP (ind, 1)) < 1024
4876 && (INTVAL (XEXP (ind, 1)) & 3) == 0)
4877 return TRUE;
4879 return FALSE;
4882 /* Return true if X is a register that will be eliminated later on. */
4884 arm_eliminable_register (rtx x)
4886 return REG_P (x) && (REGNO (x) == FRAME_POINTER_REGNUM
4887 || REGNO (x) == ARG_POINTER_REGNUM
4888 || (REGNO (x) >= FIRST_VIRTUAL_REGISTER
4889 && REGNO (x) <= LAST_VIRTUAL_REGISTER));
4892 /* Return GENERAL_REGS if a scratch register required to reload x to/from
4893 VFP registers. Otherwise return NO_REGS. */
4895 enum reg_class
4896 vfp_secondary_reload_class (enum machine_mode mode, rtx x)
4898 if (arm_coproc_mem_operand (x, FALSE) || s_register_operand (x, mode))
4899 return NO_REGS;
4901 return GENERAL_REGS;
4905 /* Returns TRUE if INSN is an "LDR REG, ADDR" instruction.
4906 Use by the Cirrus Maverick code which has to workaround
4907 a hardware bug triggered by such instructions. */
4908 static bool
4909 arm_memory_load_p (rtx insn)
4911 rtx body, lhs, rhs;;
4913 if (insn == NULL_RTX || GET_CODE (insn) != INSN)
4914 return false;
4916 body = PATTERN (insn);
4918 if (GET_CODE (body) != SET)
4919 return false;
4921 lhs = XEXP (body, 0);
4922 rhs = XEXP (body, 1);
4924 lhs = REG_OR_SUBREG_RTX (lhs);
4926 /* If the destination is not a general purpose
4927 register we do not have to worry. */
4928 if (GET_CODE (lhs) != REG
4929 || REGNO_REG_CLASS (REGNO (lhs)) != GENERAL_REGS)
4930 return false;
4932 /* As well as loads from memory we also have to react
4933 to loads of invalid constants which will be turned
4934 into loads from the minipool. */
4935 return (GET_CODE (rhs) == MEM
4936 || GET_CODE (rhs) == SYMBOL_REF
4937 || note_invalid_constants (insn, -1, false));
4940 /* Return TRUE if INSN is a Cirrus instruction. */
4941 static bool
4942 arm_cirrus_insn_p (rtx insn)
4944 enum attr_cirrus attr;
4946 /* get_attr aborts on USE and CLOBBER. */
4947 if (!insn
4948 || GET_CODE (insn) != INSN
4949 || GET_CODE (PATTERN (insn)) == USE
4950 || GET_CODE (PATTERN (insn)) == CLOBBER)
4951 return 0;
4953 attr = get_attr_cirrus (insn);
4955 return attr != CIRRUS_NOT;
4958 /* Cirrus reorg for invalid instruction combinations. */
4959 static void
4960 cirrus_reorg (rtx first)
4962 enum attr_cirrus attr;
4963 rtx body = PATTERN (first);
4964 rtx t;
4965 int nops;
4967 /* Any branch must be followed by 2 non Cirrus instructions. */
4968 if (GET_CODE (first) == JUMP_INSN && GET_CODE (body) != RETURN)
4970 nops = 0;
4971 t = next_nonnote_insn (first);
4973 if (arm_cirrus_insn_p (t))
4974 ++ nops;
4976 if (arm_cirrus_insn_p (next_nonnote_insn (t)))
4977 ++ nops;
4979 while (nops --)
4980 emit_insn_after (gen_nop (), first);
4982 return;
4985 /* (float (blah)) is in parallel with a clobber. */
4986 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) > 0)
4987 body = XVECEXP (body, 0, 0);
4989 if (GET_CODE (body) == SET)
4991 rtx lhs = XEXP (body, 0), rhs = XEXP (body, 1);
4993 /* cfldrd, cfldr64, cfstrd, cfstr64 must
4994 be followed by a non Cirrus insn. */
4995 if (get_attr_cirrus (first) == CIRRUS_DOUBLE)
4997 if (arm_cirrus_insn_p (next_nonnote_insn (first)))
4998 emit_insn_after (gen_nop (), first);
5000 return;
5002 else if (arm_memory_load_p (first))
5004 unsigned int arm_regno;
5006 /* Any ldr/cfmvdlr, ldr/cfmvdhr, ldr/cfmvsr, ldr/cfmv64lr,
5007 ldr/cfmv64hr combination where the Rd field is the same
5008 in both instructions must be split with a non Cirrus
5009 insn. Example:
5011 ldr r0, blah
5013 cfmvsr mvf0, r0. */
5015 /* Get Arm register number for ldr insn. */
5016 if (GET_CODE (lhs) == REG)
5017 arm_regno = REGNO (lhs);
5018 else if (GET_CODE (rhs) == REG)
5019 arm_regno = REGNO (rhs);
5020 else
5021 abort ();
5023 /* Next insn. */
5024 first = next_nonnote_insn (first);
5026 if (! arm_cirrus_insn_p (first))
5027 return;
5029 body = PATTERN (first);
5031 /* (float (blah)) is in parallel with a clobber. */
5032 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0))
5033 body = XVECEXP (body, 0, 0);
5035 if (GET_CODE (body) == FLOAT)
5036 body = XEXP (body, 0);
5038 if (get_attr_cirrus (first) == CIRRUS_MOVE
5039 && GET_CODE (XEXP (body, 1)) == REG
5040 && arm_regno == REGNO (XEXP (body, 1)))
5041 emit_insn_after (gen_nop (), first);
5043 return;
5047 /* get_attr aborts on USE and CLOBBER. */
5048 if (!first
5049 || GET_CODE (first) != INSN
5050 || GET_CODE (PATTERN (first)) == USE
5051 || GET_CODE (PATTERN (first)) == CLOBBER)
5052 return;
5054 attr = get_attr_cirrus (first);
5056 /* Any coprocessor compare instruction (cfcmps, cfcmpd, ...)
5057 must be followed by a non-coprocessor instruction. */
5058 if (attr == CIRRUS_COMPARE)
5060 nops = 0;
5062 t = next_nonnote_insn (first);
5064 if (arm_cirrus_insn_p (t))
5065 ++ nops;
5067 if (arm_cirrus_insn_p (next_nonnote_insn (t)))
5068 ++ nops;
5070 while (nops --)
5071 emit_insn_after (gen_nop (), first);
5073 return;
5077 /* Return TRUE if X references a SYMBOL_REF. */
5079 symbol_mentioned_p (rtx x)
5081 const char * fmt;
5082 int i;
5084 if (GET_CODE (x) == SYMBOL_REF)
5085 return 1;
5087 fmt = GET_RTX_FORMAT (GET_CODE (x));
5089 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5091 if (fmt[i] == 'E')
5093 int j;
5095 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5096 if (symbol_mentioned_p (XVECEXP (x, i, j)))
5097 return 1;
5099 else if (fmt[i] == 'e' && symbol_mentioned_p (XEXP (x, i)))
5100 return 1;
5103 return 0;
5106 /* Return TRUE if X references a LABEL_REF. */
5108 label_mentioned_p (rtx x)
5110 const char * fmt;
5111 int i;
5113 if (GET_CODE (x) == LABEL_REF)
5114 return 1;
5116 fmt = GET_RTX_FORMAT (GET_CODE (x));
5117 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5119 if (fmt[i] == 'E')
5121 int j;
5123 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5124 if (label_mentioned_p (XVECEXP (x, i, j)))
5125 return 1;
5127 else if (fmt[i] == 'e' && label_mentioned_p (XEXP (x, i)))
5128 return 1;
5131 return 0;
5134 enum rtx_code
5135 minmax_code (rtx x)
5137 enum rtx_code code = GET_CODE (x);
5139 if (code == SMAX)
5140 return GE;
5141 else if (code == SMIN)
5142 return LE;
5143 else if (code == UMIN)
5144 return LEU;
5145 else if (code == UMAX)
5146 return GEU;
5148 abort ();
5151 /* Return 1 if memory locations are adjacent. */
5153 adjacent_mem_locations (rtx a, rtx b)
5155 /* We don't guarantee to preserve the order of these memory refs. */
5156 if (volatile_refs_p (a) || volatile_refs_p (b))
5157 return 0;
5159 if ((GET_CODE (XEXP (a, 0)) == REG
5160 || (GET_CODE (XEXP (a, 0)) == PLUS
5161 && GET_CODE (XEXP (XEXP (a, 0), 1)) == CONST_INT))
5162 && (GET_CODE (XEXP (b, 0)) == REG
5163 || (GET_CODE (XEXP (b, 0)) == PLUS
5164 && GET_CODE (XEXP (XEXP (b, 0), 1)) == CONST_INT)))
5166 HOST_WIDE_INT val0 = 0, val1 = 0;
5167 rtx reg0, reg1;
5168 int val_diff;
5170 if (GET_CODE (XEXP (a, 0)) == PLUS)
5172 reg0 = XEXP (XEXP (a, 0), 0);
5173 val0 = INTVAL (XEXP (XEXP (a, 0), 1));
5175 else
5176 reg0 = XEXP (a, 0);
5178 if (GET_CODE (XEXP (b, 0)) == PLUS)
5180 reg1 = XEXP (XEXP (b, 0), 0);
5181 val1 = INTVAL (XEXP (XEXP (b, 0), 1));
5183 else
5184 reg1 = XEXP (b, 0);
5186 /* Don't accept any offset that will require multiple
5187 instructions to handle, since this would cause the
5188 arith_adjacentmem pattern to output an overlong sequence. */
5189 if (!const_ok_for_op (PLUS, val0) || !const_ok_for_op (PLUS, val1))
5190 return 0;
5192 /* Don't allow an eliminable register: register elimination can make
5193 the offset too large. */
5194 if (arm_eliminable_register (reg0))
5195 return 0;
5197 val_diff = val1 - val0;
5199 if (arm_ld_sched)
5201 /* If the target has load delay slots, then there's no benefit
5202 to using an ldm instruction unless the offset is zero and
5203 we are optimizing for size. */
5204 return (optimize_size && (REGNO (reg0) == REGNO (reg1))
5205 && (val0 == 0 || val1 == 0 || val0 == 4 || val1 == 4)
5206 && (val_diff == 4 || val_diff == -4));
5209 return ((REGNO (reg0) == REGNO (reg1))
5210 && (val_diff == 4 || val_diff == -4));
5213 return 0;
5217 load_multiple_sequence (rtx *operands, int nops, int *regs, int *base,
5218 HOST_WIDE_INT *load_offset)
5220 int unsorted_regs[4];
5221 HOST_WIDE_INT unsorted_offsets[4];
5222 int order[4];
5223 int base_reg = -1;
5224 int i;
5226 /* Can only handle 2, 3, or 4 insns at present,
5227 though could be easily extended if required. */
5228 if (nops < 2 || nops > 4)
5229 abort ();
5231 /* Loop over the operands and check that the memory references are
5232 suitable (i.e. immediate offsets from the same base register). At
5233 the same time, extract the target register, and the memory
5234 offsets. */
5235 for (i = 0; i < nops; i++)
5237 rtx reg;
5238 rtx offset;
5240 /* Convert a subreg of a mem into the mem itself. */
5241 if (GET_CODE (operands[nops + i]) == SUBREG)
5242 operands[nops + i] = alter_subreg (operands + (nops + i));
5244 if (GET_CODE (operands[nops + i]) != MEM)
5245 abort ();
5247 /* Don't reorder volatile memory references; it doesn't seem worth
5248 looking for the case where the order is ok anyway. */
5249 if (MEM_VOLATILE_P (operands[nops + i]))
5250 return 0;
5252 offset = const0_rtx;
5254 if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG
5255 || (GET_CODE (reg) == SUBREG
5256 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5257 || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
5258 && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0))
5259 == REG)
5260 || (GET_CODE (reg) == SUBREG
5261 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5262 && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1))
5263 == CONST_INT)))
5265 if (i == 0)
5267 base_reg = REGNO (reg);
5268 unsorted_regs[0] = (GET_CODE (operands[i]) == REG
5269 ? REGNO (operands[i])
5270 : REGNO (SUBREG_REG (operands[i])));
5271 order[0] = 0;
5273 else
5275 if (base_reg != (int) REGNO (reg))
5276 /* Not addressed from the same base register. */
5277 return 0;
5279 unsorted_regs[i] = (GET_CODE (operands[i]) == REG
5280 ? REGNO (operands[i])
5281 : REGNO (SUBREG_REG (operands[i])));
5282 if (unsorted_regs[i] < unsorted_regs[order[0]])
5283 order[0] = i;
5286 /* If it isn't an integer register, or if it overwrites the
5287 base register but isn't the last insn in the list, then
5288 we can't do this. */
5289 if (unsorted_regs[i] < 0 || unsorted_regs[i] > 14
5290 || (i != nops - 1 && unsorted_regs[i] == base_reg))
5291 return 0;
5293 unsorted_offsets[i] = INTVAL (offset);
5295 else
5296 /* Not a suitable memory address. */
5297 return 0;
5300 /* All the useful information has now been extracted from the
5301 operands into unsorted_regs and unsorted_offsets; additionally,
5302 order[0] has been set to the lowest numbered register in the
5303 list. Sort the registers into order, and check that the memory
5304 offsets are ascending and adjacent. */
5306 for (i = 1; i < nops; i++)
5308 int j;
5310 order[i] = order[i - 1];
5311 for (j = 0; j < nops; j++)
5312 if (unsorted_regs[j] > unsorted_regs[order[i - 1]]
5313 && (order[i] == order[i - 1]
5314 || unsorted_regs[j] < unsorted_regs[order[i]]))
5315 order[i] = j;
5317 /* Have we found a suitable register? if not, one must be used more
5318 than once. */
5319 if (order[i] == order[i - 1])
5320 return 0;
5322 /* Is the memory address adjacent and ascending? */
5323 if (unsorted_offsets[order[i]] != unsorted_offsets[order[i - 1]] + 4)
5324 return 0;
5327 if (base)
5329 *base = base_reg;
5331 for (i = 0; i < nops; i++)
5332 regs[i] = unsorted_regs[order[i]];
5334 *load_offset = unsorted_offsets[order[0]];
5337 if (unsorted_offsets[order[0]] == 0)
5338 return 1; /* ldmia */
5340 if (unsorted_offsets[order[0]] == 4)
5341 return 2; /* ldmib */
5343 if (unsorted_offsets[order[nops - 1]] == 0)
5344 return 3; /* ldmda */
5346 if (unsorted_offsets[order[nops - 1]] == -4)
5347 return 4; /* ldmdb */
5349 /* For ARM8,9 & StrongARM, 2 ldr instructions are faster than an ldm
5350 if the offset isn't small enough. The reason 2 ldrs are faster
5351 is because these ARMs are able to do more than one cache access
5352 in a single cycle. The ARM9 and StrongARM have Harvard caches,
5353 whilst the ARM8 has a double bandwidth cache. This means that
5354 these cores can do both an instruction fetch and a data fetch in
5355 a single cycle, so the trick of calculating the address into a
5356 scratch register (one of the result regs) and then doing a load
5357 multiple actually becomes slower (and no smaller in code size).
5358 That is the transformation
5360 ldr rd1, [rbase + offset]
5361 ldr rd2, [rbase + offset + 4]
5365 add rd1, rbase, offset
5366 ldmia rd1, {rd1, rd2}
5368 produces worse code -- '3 cycles + any stalls on rd2' instead of
5369 '2 cycles + any stalls on rd2'. On ARMs with only one cache
5370 access per cycle, the first sequence could never complete in less
5371 than 6 cycles, whereas the ldm sequence would only take 5 and
5372 would make better use of sequential accesses if not hitting the
5373 cache.
5375 We cheat here and test 'arm_ld_sched' which we currently know to
5376 only be true for the ARM8, ARM9 and StrongARM. If this ever
5377 changes, then the test below needs to be reworked. */
5378 if (nops == 2 && arm_ld_sched)
5379 return 0;
5381 /* Can't do it without setting up the offset, only do this if it takes
5382 no more than one insn. */
5383 return (const_ok_for_arm (unsorted_offsets[order[0]])
5384 || const_ok_for_arm (-unsorted_offsets[order[0]])) ? 5 : 0;
5387 const char *
5388 emit_ldm_seq (rtx *operands, int nops)
5390 int regs[4];
5391 int base_reg;
5392 HOST_WIDE_INT offset;
5393 char buf[100];
5394 int i;
5396 switch (load_multiple_sequence (operands, nops, regs, &base_reg, &offset))
5398 case 1:
5399 strcpy (buf, "ldm%?ia\t");
5400 break;
5402 case 2:
5403 strcpy (buf, "ldm%?ib\t");
5404 break;
5406 case 3:
5407 strcpy (buf, "ldm%?da\t");
5408 break;
5410 case 4:
5411 strcpy (buf, "ldm%?db\t");
5412 break;
5414 case 5:
5415 if (offset >= 0)
5416 sprintf (buf, "add%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX,
5417 reg_names[regs[0]], REGISTER_PREFIX, reg_names[base_reg],
5418 (long) offset);
5419 else
5420 sprintf (buf, "sub%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX,
5421 reg_names[regs[0]], REGISTER_PREFIX, reg_names[base_reg],
5422 (long) -offset);
5423 output_asm_insn (buf, operands);
5424 base_reg = regs[0];
5425 strcpy (buf, "ldm%?ia\t");
5426 break;
5428 default:
5429 abort ();
5432 sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
5433 reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
5435 for (i = 1; i < nops; i++)
5436 sprintf (buf + strlen (buf), ", %s%s", REGISTER_PREFIX,
5437 reg_names[regs[i]]);
5439 strcat (buf, "}\t%@ phole ldm");
5441 output_asm_insn (buf, operands);
5442 return "";
5446 store_multiple_sequence (rtx *operands, int nops, int *regs, int *base,
5447 HOST_WIDE_INT * load_offset)
5449 int unsorted_regs[4];
5450 HOST_WIDE_INT unsorted_offsets[4];
5451 int order[4];
5452 int base_reg = -1;
5453 int i;
5455 /* Can only handle 2, 3, or 4 insns at present, though could be easily
5456 extended if required. */
5457 if (nops < 2 || nops > 4)
5458 abort ();
5460 /* Loop over the operands and check that the memory references are
5461 suitable (i.e. immediate offsets from the same base register). At
5462 the same time, extract the target register, and the memory
5463 offsets. */
5464 for (i = 0; i < nops; i++)
5466 rtx reg;
5467 rtx offset;
5469 /* Convert a subreg of a mem into the mem itself. */
5470 if (GET_CODE (operands[nops + i]) == SUBREG)
5471 operands[nops + i] = alter_subreg (operands + (nops + i));
5473 if (GET_CODE (operands[nops + i]) != MEM)
5474 abort ();
5476 /* Don't reorder volatile memory references; it doesn't seem worth
5477 looking for the case where the order is ok anyway. */
5478 if (MEM_VOLATILE_P (operands[nops + i]))
5479 return 0;
5481 offset = const0_rtx;
5483 if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG
5484 || (GET_CODE (reg) == SUBREG
5485 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5486 || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
5487 && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0))
5488 == REG)
5489 || (GET_CODE (reg) == SUBREG
5490 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5491 && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1))
5492 == CONST_INT)))
5494 if (i == 0)
5496 base_reg = REGNO (reg);
5497 unsorted_regs[0] = (GET_CODE (operands[i]) == REG
5498 ? REGNO (operands[i])
5499 : REGNO (SUBREG_REG (operands[i])));
5500 order[0] = 0;
5502 else
5504 if (base_reg != (int) REGNO (reg))
5505 /* Not addressed from the same base register. */
5506 return 0;
5508 unsorted_regs[i] = (GET_CODE (operands[i]) == REG
5509 ? REGNO (operands[i])
5510 : REGNO (SUBREG_REG (operands[i])));
5511 if (unsorted_regs[i] < unsorted_regs[order[0]])
5512 order[0] = i;
5515 /* If it isn't an integer register, then we can't do this. */
5516 if (unsorted_regs[i] < 0 || unsorted_regs[i] > 14)
5517 return 0;
5519 unsorted_offsets[i] = INTVAL (offset);
5521 else
5522 /* Not a suitable memory address. */
5523 return 0;
5526 /* All the useful information has now been extracted from the
5527 operands into unsorted_regs and unsorted_offsets; additionally,
5528 order[0] has been set to the lowest numbered register in the
5529 list. Sort the registers into order, and check that the memory
5530 offsets are ascending and adjacent. */
5532 for (i = 1; i < nops; i++)
5534 int j;
5536 order[i] = order[i - 1];
5537 for (j = 0; j < nops; j++)
5538 if (unsorted_regs[j] > unsorted_regs[order[i - 1]]
5539 && (order[i] == order[i - 1]
5540 || unsorted_regs[j] < unsorted_regs[order[i]]))
5541 order[i] = j;
5543 /* Have we found a suitable register? if not, one must be used more
5544 than once. */
5545 if (order[i] == order[i - 1])
5546 return 0;
5548 /* Is the memory address adjacent and ascending? */
5549 if (unsorted_offsets[order[i]] != unsorted_offsets[order[i - 1]] + 4)
5550 return 0;
5553 if (base)
5555 *base = base_reg;
5557 for (i = 0; i < nops; i++)
5558 regs[i] = unsorted_regs[order[i]];
5560 *load_offset = unsorted_offsets[order[0]];
5563 if (unsorted_offsets[order[0]] == 0)
5564 return 1; /* stmia */
5566 if (unsorted_offsets[order[0]] == 4)
5567 return 2; /* stmib */
5569 if (unsorted_offsets[order[nops - 1]] == 0)
5570 return 3; /* stmda */
5572 if (unsorted_offsets[order[nops - 1]] == -4)
5573 return 4; /* stmdb */
5575 return 0;
5578 const char *
5579 emit_stm_seq (rtx *operands, int nops)
5581 int regs[4];
5582 int base_reg;
5583 HOST_WIDE_INT offset;
5584 char buf[100];
5585 int i;
5587 switch (store_multiple_sequence (operands, nops, regs, &base_reg, &offset))
5589 case 1:
5590 strcpy (buf, "stm%?ia\t");
5591 break;
5593 case 2:
5594 strcpy (buf, "stm%?ib\t");
5595 break;
5597 case 3:
5598 strcpy (buf, "stm%?da\t");
5599 break;
5601 case 4:
5602 strcpy (buf, "stm%?db\t");
5603 break;
5605 default:
5606 abort ();
5609 sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
5610 reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
5612 for (i = 1; i < nops; i++)
5613 sprintf (buf + strlen (buf), ", %s%s", REGISTER_PREFIX,
5614 reg_names[regs[i]]);
5616 strcat (buf, "}\t%@ phole stm");
5618 output_asm_insn (buf, operands);
5619 return "";
5623 /* Routines for use in generating RTL. */
5626 arm_gen_load_multiple (int base_regno, int count, rtx from, int up,
5627 int write_back, rtx basemem, HOST_WIDE_INT *offsetp)
5629 HOST_WIDE_INT offset = *offsetp;
5630 int i = 0, j;
5631 rtx result;
5632 int sign = up ? 1 : -1;
5633 rtx mem, addr;
5635 /* XScale has load-store double instructions, but they have stricter
5636 alignment requirements than load-store multiple, so we cannot
5637 use them.
5639 For XScale ldm requires 2 + NREGS cycles to complete and blocks
5640 the pipeline until completion.
5642 NREGS CYCLES
5648 An ldr instruction takes 1-3 cycles, but does not block the
5649 pipeline.
5651 NREGS CYCLES
5652 1 1-3
5653 2 2-6
5654 3 3-9
5655 4 4-12
5657 Best case ldr will always win. However, the more ldr instructions
5658 we issue, the less likely we are to be able to schedule them well.
5659 Using ldr instructions also increases code size.
5661 As a compromise, we use ldr for counts of 1 or 2 regs, and ldm
5662 for counts of 3 or 4 regs. */
5663 if (arm_tune_xscale && count <= 2 && ! optimize_size)
5665 rtx seq;
5667 start_sequence ();
5669 for (i = 0; i < count; i++)
5671 addr = plus_constant (from, i * 4 * sign);
5672 mem = adjust_automodify_address (basemem, SImode, addr, offset);
5673 emit_move_insn (gen_rtx_REG (SImode, base_regno + i), mem);
5674 offset += 4 * sign;
5677 if (write_back)
5679 emit_move_insn (from, plus_constant (from, count * 4 * sign));
5680 *offsetp = offset;
5683 seq = get_insns ();
5684 end_sequence ();
5686 return seq;
5689 result = gen_rtx_PARALLEL (VOIDmode,
5690 rtvec_alloc (count + (write_back ? 1 : 0)));
5691 if (write_back)
5693 XVECEXP (result, 0, 0)
5694 = gen_rtx_SET (GET_MODE (from), from,
5695 plus_constant (from, count * 4 * sign));
5696 i = 1;
5697 count++;
5700 for (j = 0; i < count; i++, j++)
5702 addr = plus_constant (from, j * 4 * sign);
5703 mem = adjust_automodify_address_nv (basemem, SImode, addr, offset);
5704 XVECEXP (result, 0, i)
5705 = gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, base_regno + j), mem);
5706 offset += 4 * sign;
5709 if (write_back)
5710 *offsetp = offset;
5712 return result;
5716 arm_gen_store_multiple (int base_regno, int count, rtx to, int up,
5717 int write_back, rtx basemem, HOST_WIDE_INT *offsetp)
5719 HOST_WIDE_INT offset = *offsetp;
5720 int i = 0, j;
5721 rtx result;
5722 int sign = up ? 1 : -1;
5723 rtx mem, addr;
5725 /* See arm_gen_load_multiple for discussion of
5726 the pros/cons of ldm/stm usage for XScale. */
5727 if (arm_tune_xscale && count <= 2 && ! optimize_size)
5729 rtx seq;
5731 start_sequence ();
5733 for (i = 0; i < count; i++)
5735 addr = plus_constant (to, i * 4 * sign);
5736 mem = adjust_automodify_address (basemem, SImode, addr, offset);
5737 emit_move_insn (mem, gen_rtx_REG (SImode, base_regno + i));
5738 offset += 4 * sign;
5741 if (write_back)
5743 emit_move_insn (to, plus_constant (to, count * 4 * sign));
5744 *offsetp = offset;
5747 seq = get_insns ();
5748 end_sequence ();
5750 return seq;
5753 result = gen_rtx_PARALLEL (VOIDmode,
5754 rtvec_alloc (count + (write_back ? 1 : 0)));
5755 if (write_back)
5757 XVECEXP (result, 0, 0)
5758 = gen_rtx_SET (GET_MODE (to), to,
5759 plus_constant (to, count * 4 * sign));
5760 i = 1;
5761 count++;
5764 for (j = 0; i < count; i++, j++)
5766 addr = plus_constant (to, j * 4 * sign);
5767 mem = adjust_automodify_address_nv (basemem, SImode, addr, offset);
5768 XVECEXP (result, 0, i)
5769 = gen_rtx_SET (VOIDmode, mem, gen_rtx_REG (SImode, base_regno + j));
5770 offset += 4 * sign;
5773 if (write_back)
5774 *offsetp = offset;
5776 return result;
5780 arm_gen_movmemqi (rtx *operands)
5782 HOST_WIDE_INT in_words_to_go, out_words_to_go, last_bytes;
5783 HOST_WIDE_INT srcoffset, dstoffset;
5784 int i;
5785 rtx src, dst, srcbase, dstbase;
5786 rtx part_bytes_reg = NULL;
5787 rtx mem;
5789 if (GET_CODE (operands[2]) != CONST_INT
5790 || GET_CODE (operands[3]) != CONST_INT
5791 || INTVAL (operands[2]) > 64
5792 || INTVAL (operands[3]) & 3)
5793 return 0;
5795 dstbase = operands[0];
5796 srcbase = operands[1];
5798 dst = copy_to_mode_reg (SImode, XEXP (dstbase, 0));
5799 src = copy_to_mode_reg (SImode, XEXP (srcbase, 0));
5801 in_words_to_go = ARM_NUM_INTS (INTVAL (operands[2]));
5802 out_words_to_go = INTVAL (operands[2]) / 4;
5803 last_bytes = INTVAL (operands[2]) & 3;
5804 dstoffset = srcoffset = 0;
5806 if (out_words_to_go != in_words_to_go && ((in_words_to_go - 1) & 3) != 0)
5807 part_bytes_reg = gen_rtx_REG (SImode, (in_words_to_go - 1) & 3);
5809 for (i = 0; in_words_to_go >= 2; i+=4)
5811 if (in_words_to_go > 4)
5812 emit_insn (arm_gen_load_multiple (0, 4, src, TRUE, TRUE,
5813 srcbase, &srcoffset));
5814 else
5815 emit_insn (arm_gen_load_multiple (0, in_words_to_go, src, TRUE,
5816 FALSE, srcbase, &srcoffset));
5818 if (out_words_to_go)
5820 if (out_words_to_go > 4)
5821 emit_insn (arm_gen_store_multiple (0, 4, dst, TRUE, TRUE,
5822 dstbase, &dstoffset));
5823 else if (out_words_to_go != 1)
5824 emit_insn (arm_gen_store_multiple (0, out_words_to_go,
5825 dst, TRUE,
5826 (last_bytes == 0
5827 ? FALSE : TRUE),
5828 dstbase, &dstoffset));
5829 else
5831 mem = adjust_automodify_address (dstbase, SImode, dst, dstoffset);
5832 emit_move_insn (mem, gen_rtx_REG (SImode, 0));
5833 if (last_bytes != 0)
5835 emit_insn (gen_addsi3 (dst, dst, GEN_INT (4)));
5836 dstoffset += 4;
5841 in_words_to_go -= in_words_to_go < 4 ? in_words_to_go : 4;
5842 out_words_to_go -= out_words_to_go < 4 ? out_words_to_go : 4;
5845 /* OUT_WORDS_TO_GO will be zero here if there are byte stores to do. */
5846 if (out_words_to_go)
5848 rtx sreg;
5850 mem = adjust_automodify_address (srcbase, SImode, src, srcoffset);
5851 sreg = copy_to_reg (mem);
5853 mem = adjust_automodify_address (dstbase, SImode, dst, dstoffset);
5854 emit_move_insn (mem, sreg);
5855 in_words_to_go--;
5857 if (in_words_to_go) /* Sanity check */
5858 abort ();
5861 if (in_words_to_go)
5863 if (in_words_to_go < 0)
5864 abort ();
5866 mem = adjust_automodify_address (srcbase, SImode, src, srcoffset);
5867 part_bytes_reg = copy_to_mode_reg (SImode, mem);
5870 if (last_bytes && part_bytes_reg == NULL)
5871 abort ();
5873 if (BYTES_BIG_ENDIAN && last_bytes)
5875 rtx tmp = gen_reg_rtx (SImode);
5877 /* The bytes we want are in the top end of the word. */
5878 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg,
5879 GEN_INT (8 * (4 - last_bytes))));
5880 part_bytes_reg = tmp;
5882 while (last_bytes)
5884 mem = adjust_automodify_address (dstbase, QImode,
5885 plus_constant (dst, last_bytes - 1),
5886 dstoffset + last_bytes - 1);
5887 emit_move_insn (mem, gen_lowpart (QImode, part_bytes_reg));
5889 if (--last_bytes)
5891 tmp = gen_reg_rtx (SImode);
5892 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (8)));
5893 part_bytes_reg = tmp;
5898 else
5900 if (last_bytes > 1)
5902 mem = adjust_automodify_address (dstbase, HImode, dst, dstoffset);
5903 emit_move_insn (mem, gen_lowpart (HImode, part_bytes_reg));
5904 last_bytes -= 2;
5905 if (last_bytes)
5907 rtx tmp = gen_reg_rtx (SImode);
5908 emit_insn (gen_addsi3 (dst, dst, const2_rtx));
5909 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (16)));
5910 part_bytes_reg = tmp;
5911 dstoffset += 2;
5915 if (last_bytes)
5917 mem = adjust_automodify_address (dstbase, QImode, dst, dstoffset);
5918 emit_move_insn (mem, gen_lowpart (QImode, part_bytes_reg));
5922 return 1;
5925 /* Generate a memory reference for a half word, such that it will be loaded
5926 into the top 16 bits of the word. We can assume that the address is
5927 known to be alignable and of the form reg, or plus (reg, const). */
5930 arm_gen_rotated_half_load (rtx memref)
5932 HOST_WIDE_INT offset = 0;
5933 rtx base = XEXP (memref, 0);
5935 if (GET_CODE (base) == PLUS)
5937 offset = INTVAL (XEXP (base, 1));
5938 base = XEXP (base, 0);
5941 /* If we aren't allowed to generate unaligned addresses, then fail. */
5942 if ((BYTES_BIG_ENDIAN ? 1 : 0) ^ ((offset & 2) == 0))
5943 return NULL;
5945 base = gen_rtx_MEM (SImode, plus_constant (base, offset & ~2));
5947 if ((BYTES_BIG_ENDIAN ? 1 : 0) ^ ((offset & 2) == 2))
5948 return base;
5950 return gen_rtx_ROTATE (SImode, base, GEN_INT (16));
5953 /* Select a dominance comparison mode if possible for a test of the general
5954 form (OP (COND_OR (X) (Y)) (const_int 0)). We support three forms.
5955 COND_OR == DOM_CC_X_AND_Y => (X && Y)
5956 COND_OR == DOM_CC_NX_OR_Y => ((! X) || Y)
5957 COND_OR == DOM_CC_X_OR_Y => (X || Y)
5958 In all cases OP will be either EQ or NE, but we don't need to know which
5959 here. If we are unable to support a dominance comparison we return
5960 CC mode. This will then fail to match for the RTL expressions that
5961 generate this call. */
5962 enum machine_mode
5963 arm_select_dominance_cc_mode (rtx x, rtx y, HOST_WIDE_INT cond_or)
5965 enum rtx_code cond1, cond2;
5966 int swapped = 0;
5968 /* Currently we will probably get the wrong result if the individual
5969 comparisons are not simple. This also ensures that it is safe to
5970 reverse a comparison if necessary. */
5971 if ((arm_select_cc_mode (cond1 = GET_CODE (x), XEXP (x, 0), XEXP (x, 1))
5972 != CCmode)
5973 || (arm_select_cc_mode (cond2 = GET_CODE (y), XEXP (y, 0), XEXP (y, 1))
5974 != CCmode))
5975 return CCmode;
5977 /* The if_then_else variant of this tests the second condition if the
5978 first passes, but is true if the first fails. Reverse the first
5979 condition to get a true "inclusive-or" expression. */
5980 if (cond_or == DOM_CC_NX_OR_Y)
5981 cond1 = reverse_condition (cond1);
5983 /* If the comparisons are not equal, and one doesn't dominate the other,
5984 then we can't do this. */
5985 if (cond1 != cond2
5986 && !comparison_dominates_p (cond1, cond2)
5987 && (swapped = 1, !comparison_dominates_p (cond2, cond1)))
5988 return CCmode;
5990 if (swapped)
5992 enum rtx_code temp = cond1;
5993 cond1 = cond2;
5994 cond2 = temp;
5997 switch (cond1)
5999 case EQ:
6000 if (cond2 == EQ || cond_or == DOM_CC_X_AND_Y)
6001 return CC_DEQmode;
6003 switch (cond2)
6005 case LE: return CC_DLEmode;
6006 case LEU: return CC_DLEUmode;
6007 case GE: return CC_DGEmode;
6008 case GEU: return CC_DGEUmode;
6009 default: break;
6012 break;
6014 case LT:
6015 if (cond2 == LT || cond_or == DOM_CC_X_AND_Y)
6016 return CC_DLTmode;
6017 if (cond2 == LE)
6018 return CC_DLEmode;
6019 if (cond2 == NE)
6020 return CC_DNEmode;
6021 break;
6023 case GT:
6024 if (cond2 == GT || cond_or == DOM_CC_X_AND_Y)
6025 return CC_DGTmode;
6026 if (cond2 == GE)
6027 return CC_DGEmode;
6028 if (cond2 == NE)
6029 return CC_DNEmode;
6030 break;
6032 case LTU:
6033 if (cond2 == LTU || cond_or == DOM_CC_X_AND_Y)
6034 return CC_DLTUmode;
6035 if (cond2 == LEU)
6036 return CC_DLEUmode;
6037 if (cond2 == NE)
6038 return CC_DNEmode;
6039 break;
6041 case GTU:
6042 if (cond2 == GTU || cond_or == DOM_CC_X_AND_Y)
6043 return CC_DGTUmode;
6044 if (cond2 == GEU)
6045 return CC_DGEUmode;
6046 if (cond2 == NE)
6047 return CC_DNEmode;
6048 break;
6050 /* The remaining cases only occur when both comparisons are the
6051 same. */
6052 case NE:
6053 return CC_DNEmode;
6055 case LE:
6056 return CC_DLEmode;
6058 case GE:
6059 return CC_DGEmode;
6061 case LEU:
6062 return CC_DLEUmode;
6064 case GEU:
6065 return CC_DGEUmode;
6067 default:
6068 break;
6071 abort ();
6074 enum machine_mode
6075 arm_select_cc_mode (enum rtx_code op, rtx x, rtx y)
6077 /* All floating point compares return CCFP if it is an equality
6078 comparison, and CCFPE otherwise. */
6079 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
6081 switch (op)
6083 case EQ:
6084 case NE:
6085 case UNORDERED:
6086 case ORDERED:
6087 case UNLT:
6088 case UNLE:
6089 case UNGT:
6090 case UNGE:
6091 case UNEQ:
6092 case LTGT:
6093 return CCFPmode;
6095 case LT:
6096 case LE:
6097 case GT:
6098 case GE:
6099 if (TARGET_HARD_FLOAT && TARGET_MAVERICK)
6100 return CCFPmode;
6101 return CCFPEmode;
6103 default:
6104 abort ();
6108 /* A compare with a shifted operand. Because of canonicalization, the
6109 comparison will have to be swapped when we emit the assembler. */
6110 if (GET_MODE (y) == SImode && GET_CODE (y) == REG
6111 && (GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
6112 || GET_CODE (x) == LSHIFTRT || GET_CODE (x) == ROTATE
6113 || GET_CODE (x) == ROTATERT))
6114 return CC_SWPmode;
6116 /* This operation is performed swapped, but since we only rely on the Z
6117 flag we don't need an additional mode. */
6118 if (GET_MODE (y) == SImode && REG_P (y)
6119 && GET_CODE (x) == NEG
6120 && (op == EQ || op == NE))
6121 return CC_Zmode;
6123 /* This is a special case that is used by combine to allow a
6124 comparison of a shifted byte load to be split into a zero-extend
6125 followed by a comparison of the shifted integer (only valid for
6126 equalities and unsigned inequalities). */
6127 if (GET_MODE (x) == SImode
6128 && GET_CODE (x) == ASHIFT
6129 && GET_CODE (XEXP (x, 1)) == CONST_INT && INTVAL (XEXP (x, 1)) == 24
6130 && GET_CODE (XEXP (x, 0)) == SUBREG
6131 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == MEM
6132 && GET_MODE (SUBREG_REG (XEXP (x, 0))) == QImode
6133 && (op == EQ || op == NE
6134 || op == GEU || op == GTU || op == LTU || op == LEU)
6135 && GET_CODE (y) == CONST_INT)
6136 return CC_Zmode;
6138 /* A construct for a conditional compare, if the false arm contains
6139 0, then both conditions must be true, otherwise either condition
6140 must be true. Not all conditions are possible, so CCmode is
6141 returned if it can't be done. */
6142 if (GET_CODE (x) == IF_THEN_ELSE
6143 && (XEXP (x, 2) == const0_rtx
6144 || XEXP (x, 2) == const1_rtx)
6145 && COMPARISON_P (XEXP (x, 0))
6146 && COMPARISON_P (XEXP (x, 1)))
6147 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
6148 INTVAL (XEXP (x, 2)));
6150 /* Alternate canonicalizations of the above. These are somewhat cleaner. */
6151 if (GET_CODE (x) == AND
6152 && COMPARISON_P (XEXP (x, 0))
6153 && COMPARISON_P (XEXP (x, 1)))
6154 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
6155 DOM_CC_X_AND_Y);
6157 if (GET_CODE (x) == IOR
6158 && COMPARISON_P (XEXP (x, 0))
6159 && COMPARISON_P (XEXP (x, 1)))
6160 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
6161 DOM_CC_X_OR_Y);
6163 /* An operation (on Thumb) where we want to test for a single bit.
6164 This is done by shifting that bit up into the top bit of a
6165 scratch register; we can then branch on the sign bit. */
6166 if (TARGET_THUMB
6167 && GET_MODE (x) == SImode
6168 && (op == EQ || op == NE)
6169 && (GET_CODE (x) == ZERO_EXTRACT))
6170 return CC_Nmode;
6172 /* An operation that sets the condition codes as a side-effect, the
6173 V flag is not set correctly, so we can only use comparisons where
6174 this doesn't matter. (For LT and GE we can use "mi" and "pl"
6175 instead.) */
6176 if (GET_MODE (x) == SImode
6177 && y == const0_rtx
6178 && (op == EQ || op == NE || op == LT || op == GE)
6179 && (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
6180 || GET_CODE (x) == AND || GET_CODE (x) == IOR
6181 || GET_CODE (x) == XOR || GET_CODE (x) == MULT
6182 || GET_CODE (x) == NOT || GET_CODE (x) == NEG
6183 || GET_CODE (x) == LSHIFTRT
6184 || GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
6185 || GET_CODE (x) == ROTATERT
6186 || (TARGET_ARM && GET_CODE (x) == ZERO_EXTRACT)))
6187 return CC_NOOVmode;
6189 if (GET_MODE (x) == QImode && (op == EQ || op == NE))
6190 return CC_Zmode;
6192 if (GET_MODE (x) == SImode && (op == LTU || op == GEU)
6193 && GET_CODE (x) == PLUS
6194 && (rtx_equal_p (XEXP (x, 0), y) || rtx_equal_p (XEXP (x, 1), y)))
6195 return CC_Cmode;
6197 return CCmode;
6200 /* X and Y are two things to compare using CODE. Emit the compare insn and
6201 return the rtx for register 0 in the proper mode. FP means this is a
6202 floating point compare: I don't think that it is needed on the arm. */
6204 arm_gen_compare_reg (enum rtx_code code, rtx x, rtx y)
6206 enum machine_mode mode = SELECT_CC_MODE (code, x, y);
6207 rtx cc_reg = gen_rtx_REG (mode, CC_REGNUM);
6209 emit_insn (gen_rtx_SET (VOIDmode, cc_reg,
6210 gen_rtx_COMPARE (mode, x, y)));
6212 return cc_reg;
6215 /* Generate a sequence of insns that will generate the correct return
6216 address mask depending on the physical architecture that the program
6217 is running on. */
6219 arm_gen_return_addr_mask (void)
6221 rtx reg = gen_reg_rtx (Pmode);
6223 emit_insn (gen_return_addr_mask (reg));
6224 return reg;
6227 void
6228 arm_reload_in_hi (rtx *operands)
6230 rtx ref = operands[1];
6231 rtx base, scratch;
6232 HOST_WIDE_INT offset = 0;
6234 if (GET_CODE (ref) == SUBREG)
6236 offset = SUBREG_BYTE (ref);
6237 ref = SUBREG_REG (ref);
6240 if (GET_CODE (ref) == REG)
6242 /* We have a pseudo which has been spilt onto the stack; there
6243 are two cases here: the first where there is a simple
6244 stack-slot replacement and a second where the stack-slot is
6245 out of range, or is used as a subreg. */
6246 if (reg_equiv_mem[REGNO (ref)])
6248 ref = reg_equiv_mem[REGNO (ref)];
6249 base = find_replacement (&XEXP (ref, 0));
6251 else
6252 /* The slot is out of range, or was dressed up in a SUBREG. */
6253 base = reg_equiv_address[REGNO (ref)];
6255 else
6256 base = find_replacement (&XEXP (ref, 0));
6258 /* Handle the case where the address is too complex to be offset by 1. */
6259 if (GET_CODE (base) == MINUS
6260 || (GET_CODE (base) == PLUS && GET_CODE (XEXP (base, 1)) != CONST_INT))
6262 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6264 emit_insn (gen_rtx_SET (VOIDmode, base_plus, base));
6265 base = base_plus;
6267 else if (GET_CODE (base) == PLUS)
6269 /* The addend must be CONST_INT, or we would have dealt with it above. */
6270 HOST_WIDE_INT hi, lo;
6272 offset += INTVAL (XEXP (base, 1));
6273 base = XEXP (base, 0);
6275 /* Rework the address into a legal sequence of insns. */
6276 /* Valid range for lo is -4095 -> 4095 */
6277 lo = (offset >= 0
6278 ? (offset & 0xfff)
6279 : -((-offset) & 0xfff));
6281 /* Corner case, if lo is the max offset then we would be out of range
6282 once we have added the additional 1 below, so bump the msb into the
6283 pre-loading insn(s). */
6284 if (lo == 4095)
6285 lo &= 0x7ff;
6287 hi = ((((offset - lo) & (HOST_WIDE_INT) 0xffffffff)
6288 ^ (HOST_WIDE_INT) 0x80000000)
6289 - (HOST_WIDE_INT) 0x80000000);
6291 if (hi + lo != offset)
6292 abort ();
6294 if (hi != 0)
6296 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6298 /* Get the base address; addsi3 knows how to handle constants
6299 that require more than one insn. */
6300 emit_insn (gen_addsi3 (base_plus, base, GEN_INT (hi)));
6301 base = base_plus;
6302 offset = lo;
6306 /* Operands[2] may overlap operands[0] (though it won't overlap
6307 operands[1]), that's why we asked for a DImode reg -- so we can
6308 use the bit that does not overlap. */
6309 if (REGNO (operands[2]) == REGNO (operands[0]))
6310 scratch = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6311 else
6312 scratch = gen_rtx_REG (SImode, REGNO (operands[2]));
6314 emit_insn (gen_zero_extendqisi2 (scratch,
6315 gen_rtx_MEM (QImode,
6316 plus_constant (base,
6317 offset))));
6318 emit_insn (gen_zero_extendqisi2 (gen_rtx_SUBREG (SImode, operands[0], 0),
6319 gen_rtx_MEM (QImode,
6320 plus_constant (base,
6321 offset + 1))));
6322 if (!BYTES_BIG_ENDIAN)
6323 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_SUBREG (SImode, operands[0], 0),
6324 gen_rtx_IOR (SImode,
6325 gen_rtx_ASHIFT
6326 (SImode,
6327 gen_rtx_SUBREG (SImode, operands[0], 0),
6328 GEN_INT (8)),
6329 scratch)));
6330 else
6331 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_SUBREG (SImode, operands[0], 0),
6332 gen_rtx_IOR (SImode,
6333 gen_rtx_ASHIFT (SImode, scratch,
6334 GEN_INT (8)),
6335 gen_rtx_SUBREG (SImode, operands[0],
6336 0))));
6339 /* Handle storing a half-word to memory during reload by synthesizing as two
6340 byte stores. Take care not to clobber the input values until after we
6341 have moved them somewhere safe. This code assumes that if the DImode
6342 scratch in operands[2] overlaps either the input value or output address
6343 in some way, then that value must die in this insn (we absolutely need
6344 two scratch registers for some corner cases). */
6345 void
6346 arm_reload_out_hi (rtx *operands)
6348 rtx ref = operands[0];
6349 rtx outval = operands[1];
6350 rtx base, scratch;
6351 HOST_WIDE_INT offset = 0;
6353 if (GET_CODE (ref) == SUBREG)
6355 offset = SUBREG_BYTE (ref);
6356 ref = SUBREG_REG (ref);
6359 if (GET_CODE (ref) == REG)
6361 /* We have a pseudo which has been spilt onto the stack; there
6362 are two cases here: the first where there is a simple
6363 stack-slot replacement and a second where the stack-slot is
6364 out of range, or is used as a subreg. */
6365 if (reg_equiv_mem[REGNO (ref)])
6367 ref = reg_equiv_mem[REGNO (ref)];
6368 base = find_replacement (&XEXP (ref, 0));
6370 else
6371 /* The slot is out of range, or was dressed up in a SUBREG. */
6372 base = reg_equiv_address[REGNO (ref)];
6374 else
6375 base = find_replacement (&XEXP (ref, 0));
6377 scratch = gen_rtx_REG (SImode, REGNO (operands[2]));
6379 /* Handle the case where the address is too complex to be offset by 1. */
6380 if (GET_CODE (base) == MINUS
6381 || (GET_CODE (base) == PLUS && GET_CODE (XEXP (base, 1)) != CONST_INT))
6383 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6385 /* Be careful not to destroy OUTVAL. */
6386 if (reg_overlap_mentioned_p (base_plus, outval))
6388 /* Updating base_plus might destroy outval, see if we can
6389 swap the scratch and base_plus. */
6390 if (!reg_overlap_mentioned_p (scratch, outval))
6392 rtx tmp = scratch;
6393 scratch = base_plus;
6394 base_plus = tmp;
6396 else
6398 rtx scratch_hi = gen_rtx_REG (HImode, REGNO (operands[2]));
6400 /* Be conservative and copy OUTVAL into the scratch now,
6401 this should only be necessary if outval is a subreg
6402 of something larger than a word. */
6403 /* XXX Might this clobber base? I can't see how it can,
6404 since scratch is known to overlap with OUTVAL, and
6405 must be wider than a word. */
6406 emit_insn (gen_movhi (scratch_hi, outval));
6407 outval = scratch_hi;
6411 emit_insn (gen_rtx_SET (VOIDmode, base_plus, base));
6412 base = base_plus;
6414 else if (GET_CODE (base) == PLUS)
6416 /* The addend must be CONST_INT, or we would have dealt with it above. */
6417 HOST_WIDE_INT hi, lo;
6419 offset += INTVAL (XEXP (base, 1));
6420 base = XEXP (base, 0);
6422 /* Rework the address into a legal sequence of insns. */
6423 /* Valid range for lo is -4095 -> 4095 */
6424 lo = (offset >= 0
6425 ? (offset & 0xfff)
6426 : -((-offset) & 0xfff));
6428 /* Corner case, if lo is the max offset then we would be out of range
6429 once we have added the additional 1 below, so bump the msb into the
6430 pre-loading insn(s). */
6431 if (lo == 4095)
6432 lo &= 0x7ff;
6434 hi = ((((offset - lo) & (HOST_WIDE_INT) 0xffffffff)
6435 ^ (HOST_WIDE_INT) 0x80000000)
6436 - (HOST_WIDE_INT) 0x80000000);
6438 if (hi + lo != offset)
6439 abort ();
6441 if (hi != 0)
6443 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6445 /* Be careful not to destroy OUTVAL. */
6446 if (reg_overlap_mentioned_p (base_plus, outval))
6448 /* Updating base_plus might destroy outval, see if we
6449 can swap the scratch and base_plus. */
6450 if (!reg_overlap_mentioned_p (scratch, outval))
6452 rtx tmp = scratch;
6453 scratch = base_plus;
6454 base_plus = tmp;
6456 else
6458 rtx scratch_hi = gen_rtx_REG (HImode, REGNO (operands[2]));
6460 /* Be conservative and copy outval into scratch now,
6461 this should only be necessary if outval is a
6462 subreg of something larger than a word. */
6463 /* XXX Might this clobber base? I can't see how it
6464 can, since scratch is known to overlap with
6465 outval. */
6466 emit_insn (gen_movhi (scratch_hi, outval));
6467 outval = scratch_hi;
6471 /* Get the base address; addsi3 knows how to handle constants
6472 that require more than one insn. */
6473 emit_insn (gen_addsi3 (base_plus, base, GEN_INT (hi)));
6474 base = base_plus;
6475 offset = lo;
6479 if (BYTES_BIG_ENDIAN)
6481 emit_insn (gen_movqi (gen_rtx_MEM (QImode,
6482 plus_constant (base, offset + 1)),
6483 gen_lowpart (QImode, outval)));
6484 emit_insn (gen_lshrsi3 (scratch,
6485 gen_rtx_SUBREG (SImode, outval, 0),
6486 GEN_INT (8)));
6487 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (base, offset)),
6488 gen_lowpart (QImode, scratch)));
6490 else
6492 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (base, offset)),
6493 gen_lowpart (QImode, outval)));
6494 emit_insn (gen_lshrsi3 (scratch,
6495 gen_rtx_SUBREG (SImode, outval, 0),
6496 GEN_INT (8)));
6497 emit_insn (gen_movqi (gen_rtx_MEM (QImode,
6498 plus_constant (base, offset + 1)),
6499 gen_lowpart (QImode, scratch)));
6503 /* Print a symbolic form of X to the debug file, F. */
6504 static void
6505 arm_print_value (FILE *f, rtx x)
6507 switch (GET_CODE (x))
6509 case CONST_INT:
6510 fprintf (f, HOST_WIDE_INT_PRINT_HEX, INTVAL (x));
6511 return;
6513 case CONST_DOUBLE:
6514 fprintf (f, "<0x%lx,0x%lx>", (long)XWINT (x, 2), (long)XWINT (x, 3));
6515 return;
6517 case CONST_VECTOR:
6519 int i;
6521 fprintf (f, "<");
6522 for (i = 0; i < CONST_VECTOR_NUNITS (x); i++)
6524 fprintf (f, HOST_WIDE_INT_PRINT_HEX, INTVAL (CONST_VECTOR_ELT (x, i)));
6525 if (i < (CONST_VECTOR_NUNITS (x) - 1))
6526 fputc (',', f);
6528 fprintf (f, ">");
6530 return;
6532 case CONST_STRING:
6533 fprintf (f, "\"%s\"", XSTR (x, 0));
6534 return;
6536 case SYMBOL_REF:
6537 fprintf (f, "`%s'", XSTR (x, 0));
6538 return;
6540 case LABEL_REF:
6541 fprintf (f, "L%d", INSN_UID (XEXP (x, 0)));
6542 return;
6544 case CONST:
6545 arm_print_value (f, XEXP (x, 0));
6546 return;
6548 case PLUS:
6549 arm_print_value (f, XEXP (x, 0));
6550 fprintf (f, "+");
6551 arm_print_value (f, XEXP (x, 1));
6552 return;
6554 case PC:
6555 fprintf (f, "pc");
6556 return;
6558 default:
6559 fprintf (f, "????");
6560 return;
6564 /* Routines for manipulation of the constant pool. */
6566 /* Arm instructions cannot load a large constant directly into a
6567 register; they have to come from a pc relative load. The constant
6568 must therefore be placed in the addressable range of the pc
6569 relative load. Depending on the precise pc relative load
6570 instruction the range is somewhere between 256 bytes and 4k. This
6571 means that we often have to dump a constant inside a function, and
6572 generate code to branch around it.
6574 It is important to minimize this, since the branches will slow
6575 things down and make the code larger.
6577 Normally we can hide the table after an existing unconditional
6578 branch so that there is no interruption of the flow, but in the
6579 worst case the code looks like this:
6581 ldr rn, L1
6583 b L2
6584 align
6585 L1: .long value
6589 ldr rn, L3
6591 b L4
6592 align
6593 L3: .long value
6597 We fix this by performing a scan after scheduling, which notices
6598 which instructions need to have their operands fetched from the
6599 constant table and builds the table.
6601 The algorithm starts by building a table of all the constants that
6602 need fixing up and all the natural barriers in the function (places
6603 where a constant table can be dropped without breaking the flow).
6604 For each fixup we note how far the pc-relative replacement will be
6605 able to reach and the offset of the instruction into the function.
6607 Having built the table we then group the fixes together to form
6608 tables that are as large as possible (subject to addressing
6609 constraints) and emit each table of constants after the last
6610 barrier that is within range of all the instructions in the group.
6611 If a group does not contain a barrier, then we forcibly create one
6612 by inserting a jump instruction into the flow. Once the table has
6613 been inserted, the insns are then modified to reference the
6614 relevant entry in the pool.
6616 Possible enhancements to the algorithm (not implemented) are:
6618 1) For some processors and object formats, there may be benefit in
6619 aligning the pools to the start of cache lines; this alignment
6620 would need to be taken into account when calculating addressability
6621 of a pool. */
6623 /* These typedefs are located at the start of this file, so that
6624 they can be used in the prototypes there. This comment is to
6625 remind readers of that fact so that the following structures
6626 can be understood more easily.
6628 typedef struct minipool_node Mnode;
6629 typedef struct minipool_fixup Mfix; */
6631 struct minipool_node
6633 /* Doubly linked chain of entries. */
6634 Mnode * next;
6635 Mnode * prev;
6636 /* The maximum offset into the code that this entry can be placed. While
6637 pushing fixes for forward references, all entries are sorted in order
6638 of increasing max_address. */
6639 HOST_WIDE_INT max_address;
6640 /* Similarly for an entry inserted for a backwards ref. */
6641 HOST_WIDE_INT min_address;
6642 /* The number of fixes referencing this entry. This can become zero
6643 if we "unpush" an entry. In this case we ignore the entry when we
6644 come to emit the code. */
6645 int refcount;
6646 /* The offset from the start of the minipool. */
6647 HOST_WIDE_INT offset;
6648 /* The value in table. */
6649 rtx value;
6650 /* The mode of value. */
6651 enum machine_mode mode;
6652 /* The size of the value. With iWMMXt enabled
6653 sizes > 4 also imply an alignment of 8-bytes. */
6654 int fix_size;
6657 struct minipool_fixup
6659 Mfix * next;
6660 rtx insn;
6661 HOST_WIDE_INT address;
6662 rtx * loc;
6663 enum machine_mode mode;
6664 int fix_size;
6665 rtx value;
6666 Mnode * minipool;
6667 HOST_WIDE_INT forwards;
6668 HOST_WIDE_INT backwards;
6671 /* Fixes less than a word need padding out to a word boundary. */
6672 #define MINIPOOL_FIX_SIZE(mode) \
6673 (GET_MODE_SIZE ((mode)) >= 4 ? GET_MODE_SIZE ((mode)) : 4)
6675 static Mnode * minipool_vector_head;
6676 static Mnode * minipool_vector_tail;
6677 static rtx minipool_vector_label;
6679 /* The linked list of all minipool fixes required for this function. */
6680 Mfix * minipool_fix_head;
6681 Mfix * minipool_fix_tail;
6682 /* The fix entry for the current minipool, once it has been placed. */
6683 Mfix * minipool_barrier;
6685 /* Determines if INSN is the start of a jump table. Returns the end
6686 of the TABLE or NULL_RTX. */
6687 static rtx
6688 is_jump_table (rtx insn)
6690 rtx table;
6692 if (GET_CODE (insn) == JUMP_INSN
6693 && JUMP_LABEL (insn) != NULL
6694 && ((table = next_real_insn (JUMP_LABEL (insn)))
6695 == next_real_insn (insn))
6696 && table != NULL
6697 && GET_CODE (table) == JUMP_INSN
6698 && (GET_CODE (PATTERN (table)) == ADDR_VEC
6699 || GET_CODE (PATTERN (table)) == ADDR_DIFF_VEC))
6700 return table;
6702 return NULL_RTX;
6705 #ifndef JUMP_TABLES_IN_TEXT_SECTION
6706 #define JUMP_TABLES_IN_TEXT_SECTION 0
6707 #endif
6709 static HOST_WIDE_INT
6710 get_jump_table_size (rtx insn)
6712 /* ADDR_VECs only take room if read-only data does into the text
6713 section. */
6714 if (JUMP_TABLES_IN_TEXT_SECTION
6715 #if !defined(READONLY_DATA_SECTION) && !defined(READONLY_DATA_SECTION_ASM_OP)
6716 || 1
6717 #endif
6720 rtx body = PATTERN (insn);
6721 int elt = GET_CODE (body) == ADDR_DIFF_VEC ? 1 : 0;
6723 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, elt);
6726 return 0;
6729 /* Move a minipool fix MP from its current location to before MAX_MP.
6730 If MAX_MP is NULL, then MP doesn't need moving, but the addressing
6731 constraints may need updating. */
6732 static Mnode *
6733 move_minipool_fix_forward_ref (Mnode *mp, Mnode *max_mp,
6734 HOST_WIDE_INT max_address)
6736 /* This should never be true and the code below assumes these are
6737 different. */
6738 if (mp == max_mp)
6739 abort ();
6741 if (max_mp == NULL)
6743 if (max_address < mp->max_address)
6744 mp->max_address = max_address;
6746 else
6748 if (max_address > max_mp->max_address - mp->fix_size)
6749 mp->max_address = max_mp->max_address - mp->fix_size;
6750 else
6751 mp->max_address = max_address;
6753 /* Unlink MP from its current position. Since max_mp is non-null,
6754 mp->prev must be non-null. */
6755 mp->prev->next = mp->next;
6756 if (mp->next != NULL)
6757 mp->next->prev = mp->prev;
6758 else
6759 minipool_vector_tail = mp->prev;
6761 /* Re-insert it before MAX_MP. */
6762 mp->next = max_mp;
6763 mp->prev = max_mp->prev;
6764 max_mp->prev = mp;
6766 if (mp->prev != NULL)
6767 mp->prev->next = mp;
6768 else
6769 minipool_vector_head = mp;
6772 /* Save the new entry. */
6773 max_mp = mp;
6775 /* Scan over the preceding entries and adjust their addresses as
6776 required. */
6777 while (mp->prev != NULL
6778 && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
6780 mp->prev->max_address = mp->max_address - mp->prev->fix_size;
6781 mp = mp->prev;
6784 return max_mp;
6787 /* Add a constant to the minipool for a forward reference. Returns the
6788 node added or NULL if the constant will not fit in this pool. */
6789 static Mnode *
6790 add_minipool_forward_ref (Mfix *fix)
6792 /* If set, max_mp is the first pool_entry that has a lower
6793 constraint than the one we are trying to add. */
6794 Mnode * max_mp = NULL;
6795 HOST_WIDE_INT max_address = fix->address + fix->forwards;
6796 Mnode * mp;
6798 /* If this fix's address is greater than the address of the first
6799 entry, then we can't put the fix in this pool. We subtract the
6800 size of the current fix to ensure that if the table is fully
6801 packed we still have enough room to insert this value by suffling
6802 the other fixes forwards. */
6803 if (minipool_vector_head &&
6804 fix->address >= minipool_vector_head->max_address - fix->fix_size)
6805 return NULL;
6807 /* Scan the pool to see if a constant with the same value has
6808 already been added. While we are doing this, also note the
6809 location where we must insert the constant if it doesn't already
6810 exist. */
6811 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
6813 if (GET_CODE (fix->value) == GET_CODE (mp->value)
6814 && fix->mode == mp->mode
6815 && (GET_CODE (fix->value) != CODE_LABEL
6816 || (CODE_LABEL_NUMBER (fix->value)
6817 == CODE_LABEL_NUMBER (mp->value)))
6818 && rtx_equal_p (fix->value, mp->value))
6820 /* More than one fix references this entry. */
6821 mp->refcount++;
6822 return move_minipool_fix_forward_ref (mp, max_mp, max_address);
6825 /* Note the insertion point if necessary. */
6826 if (max_mp == NULL
6827 && mp->max_address > max_address)
6828 max_mp = mp;
6830 /* If we are inserting an 8-bytes aligned quantity and
6831 we have not already found an insertion point, then
6832 make sure that all such 8-byte aligned quantities are
6833 placed at the start of the pool. */
6834 if (ARM_DOUBLEWORD_ALIGN
6835 && max_mp == NULL
6836 && fix->fix_size == 8
6837 && mp->fix_size != 8)
6839 max_mp = mp;
6840 max_address = mp->max_address;
6844 /* The value is not currently in the minipool, so we need to create
6845 a new entry for it. If MAX_MP is NULL, the entry will be put on
6846 the end of the list since the placement is less constrained than
6847 any existing entry. Otherwise, we insert the new fix before
6848 MAX_MP and, if necessary, adjust the constraints on the other
6849 entries. */
6850 mp = xmalloc (sizeof (* mp));
6851 mp->fix_size = fix->fix_size;
6852 mp->mode = fix->mode;
6853 mp->value = fix->value;
6854 mp->refcount = 1;
6855 /* Not yet required for a backwards ref. */
6856 mp->min_address = -65536;
6858 if (max_mp == NULL)
6860 mp->max_address = max_address;
6861 mp->next = NULL;
6862 mp->prev = minipool_vector_tail;
6864 if (mp->prev == NULL)
6866 minipool_vector_head = mp;
6867 minipool_vector_label = gen_label_rtx ();
6869 else
6870 mp->prev->next = mp;
6872 minipool_vector_tail = mp;
6874 else
6876 if (max_address > max_mp->max_address - mp->fix_size)
6877 mp->max_address = max_mp->max_address - mp->fix_size;
6878 else
6879 mp->max_address = max_address;
6881 mp->next = max_mp;
6882 mp->prev = max_mp->prev;
6883 max_mp->prev = mp;
6884 if (mp->prev != NULL)
6885 mp->prev->next = mp;
6886 else
6887 minipool_vector_head = mp;
6890 /* Save the new entry. */
6891 max_mp = mp;
6893 /* Scan over the preceding entries and adjust their addresses as
6894 required. */
6895 while (mp->prev != NULL
6896 && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
6898 mp->prev->max_address = mp->max_address - mp->prev->fix_size;
6899 mp = mp->prev;
6902 return max_mp;
6905 static Mnode *
6906 move_minipool_fix_backward_ref (Mnode *mp, Mnode *min_mp,
6907 HOST_WIDE_INT min_address)
6909 HOST_WIDE_INT offset;
6911 /* This should never be true, and the code below assumes these are
6912 different. */
6913 if (mp == min_mp)
6914 abort ();
6916 if (min_mp == NULL)
6918 if (min_address > mp->min_address)
6919 mp->min_address = min_address;
6921 else
6923 /* We will adjust this below if it is too loose. */
6924 mp->min_address = min_address;
6926 /* Unlink MP from its current position. Since min_mp is non-null,
6927 mp->next must be non-null. */
6928 mp->next->prev = mp->prev;
6929 if (mp->prev != NULL)
6930 mp->prev->next = mp->next;
6931 else
6932 minipool_vector_head = mp->next;
6934 /* Reinsert it after MIN_MP. */
6935 mp->prev = min_mp;
6936 mp->next = min_mp->next;
6937 min_mp->next = mp;
6938 if (mp->next != NULL)
6939 mp->next->prev = mp;
6940 else
6941 minipool_vector_tail = mp;
6944 min_mp = mp;
6946 offset = 0;
6947 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
6949 mp->offset = offset;
6950 if (mp->refcount > 0)
6951 offset += mp->fix_size;
6953 if (mp->next && mp->next->min_address < mp->min_address + mp->fix_size)
6954 mp->next->min_address = mp->min_address + mp->fix_size;
6957 return min_mp;
6960 /* Add a constant to the minipool for a backward reference. Returns the
6961 node added or NULL if the constant will not fit in this pool.
6963 Note that the code for insertion for a backwards reference can be
6964 somewhat confusing because the calculated offsets for each fix do
6965 not take into account the size of the pool (which is still under
6966 construction. */
6967 static Mnode *
6968 add_minipool_backward_ref (Mfix *fix)
6970 /* If set, min_mp is the last pool_entry that has a lower constraint
6971 than the one we are trying to add. */
6972 Mnode *min_mp = NULL;
6973 /* This can be negative, since it is only a constraint. */
6974 HOST_WIDE_INT min_address = fix->address - fix->backwards;
6975 Mnode *mp;
6977 /* If we can't reach the current pool from this insn, or if we can't
6978 insert this entry at the end of the pool without pushing other
6979 fixes out of range, then we don't try. This ensures that we
6980 can't fail later on. */
6981 if (min_address >= minipool_barrier->address
6982 || (minipool_vector_tail->min_address + fix->fix_size
6983 >= minipool_barrier->address))
6984 return NULL;
6986 /* Scan the pool to see if a constant with the same value has
6987 already been added. While we are doing this, also note the
6988 location where we must insert the constant if it doesn't already
6989 exist. */
6990 for (mp = minipool_vector_tail; mp != NULL; mp = mp->prev)
6992 if (GET_CODE (fix->value) == GET_CODE (mp->value)
6993 && fix->mode == mp->mode
6994 && (GET_CODE (fix->value) != CODE_LABEL
6995 || (CODE_LABEL_NUMBER (fix->value)
6996 == CODE_LABEL_NUMBER (mp->value)))
6997 && rtx_equal_p (fix->value, mp->value)
6998 /* Check that there is enough slack to move this entry to the
6999 end of the table (this is conservative). */
7000 && (mp->max_address
7001 > (minipool_barrier->address
7002 + minipool_vector_tail->offset
7003 + minipool_vector_tail->fix_size)))
7005 mp->refcount++;
7006 return move_minipool_fix_backward_ref (mp, min_mp, min_address);
7009 if (min_mp != NULL)
7010 mp->min_address += fix->fix_size;
7011 else
7013 /* Note the insertion point if necessary. */
7014 if (mp->min_address < min_address)
7016 /* For now, we do not allow the insertion of 8-byte alignment
7017 requiring nodes anywhere but at the start of the pool. */
7018 if (ARM_DOUBLEWORD_ALIGN
7019 && fix->fix_size == 8 && mp->fix_size != 8)
7020 return NULL;
7021 else
7022 min_mp = mp;
7024 else if (mp->max_address
7025 < minipool_barrier->address + mp->offset + fix->fix_size)
7027 /* Inserting before this entry would push the fix beyond
7028 its maximum address (which can happen if we have
7029 re-located a forwards fix); force the new fix to come
7030 after it. */
7031 min_mp = mp;
7032 min_address = mp->min_address + fix->fix_size;
7034 /* If we are inserting an 8-bytes aligned quantity and
7035 we have not already found an insertion point, then
7036 make sure that all such 8-byte aligned quantities are
7037 placed at the start of the pool. */
7038 else if (ARM_DOUBLEWORD_ALIGN
7039 && min_mp == NULL
7040 && fix->fix_size == 8
7041 && mp->fix_size < 8)
7043 min_mp = mp;
7044 min_address = mp->min_address + fix->fix_size;
7049 /* We need to create a new entry. */
7050 mp = xmalloc (sizeof (* mp));
7051 mp->fix_size = fix->fix_size;
7052 mp->mode = fix->mode;
7053 mp->value = fix->value;
7054 mp->refcount = 1;
7055 mp->max_address = minipool_barrier->address + 65536;
7057 mp->min_address = min_address;
7059 if (min_mp == NULL)
7061 mp->prev = NULL;
7062 mp->next = minipool_vector_head;
7064 if (mp->next == NULL)
7066 minipool_vector_tail = mp;
7067 minipool_vector_label = gen_label_rtx ();
7069 else
7070 mp->next->prev = mp;
7072 minipool_vector_head = mp;
7074 else
7076 mp->next = min_mp->next;
7077 mp->prev = min_mp;
7078 min_mp->next = mp;
7080 if (mp->next != NULL)
7081 mp->next->prev = mp;
7082 else
7083 minipool_vector_tail = mp;
7086 /* Save the new entry. */
7087 min_mp = mp;
7089 if (mp->prev)
7090 mp = mp->prev;
7091 else
7092 mp->offset = 0;
7094 /* Scan over the following entries and adjust their offsets. */
7095 while (mp->next != NULL)
7097 if (mp->next->min_address < mp->min_address + mp->fix_size)
7098 mp->next->min_address = mp->min_address + mp->fix_size;
7100 if (mp->refcount)
7101 mp->next->offset = mp->offset + mp->fix_size;
7102 else
7103 mp->next->offset = mp->offset;
7105 mp = mp->next;
7108 return min_mp;
7111 static void
7112 assign_minipool_offsets (Mfix *barrier)
7114 HOST_WIDE_INT offset = 0;
7115 Mnode *mp;
7117 minipool_barrier = barrier;
7119 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7121 mp->offset = offset;
7123 if (mp->refcount > 0)
7124 offset += mp->fix_size;
7128 /* Output the literal table */
7129 static void
7130 dump_minipool (rtx scan)
7132 Mnode * mp;
7133 Mnode * nmp;
7134 int align64 = 0;
7136 if (ARM_DOUBLEWORD_ALIGN)
7137 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7138 if (mp->refcount > 0 && mp->fix_size == 8)
7140 align64 = 1;
7141 break;
7144 if (dump_file)
7145 fprintf (dump_file,
7146 ";; Emitting minipool after insn %u; address %ld; align %d (bytes)\n",
7147 INSN_UID (scan), (unsigned long) minipool_barrier->address, align64 ? 8 : 4);
7149 scan = emit_label_after (gen_label_rtx (), scan);
7150 scan = emit_insn_after (align64 ? gen_align_8 () : gen_align_4 (), scan);
7151 scan = emit_label_after (minipool_vector_label, scan);
7153 for (mp = minipool_vector_head; mp != NULL; mp = nmp)
7155 if (mp->refcount > 0)
7157 if (dump_file)
7159 fprintf (dump_file,
7160 ";; Offset %u, min %ld, max %ld ",
7161 (unsigned) mp->offset, (unsigned long) mp->min_address,
7162 (unsigned long) mp->max_address);
7163 arm_print_value (dump_file, mp->value);
7164 fputc ('\n', dump_file);
7167 switch (mp->fix_size)
7169 #ifdef HAVE_consttable_1
7170 case 1:
7171 scan = emit_insn_after (gen_consttable_1 (mp->value), scan);
7172 break;
7174 #endif
7175 #ifdef HAVE_consttable_2
7176 case 2:
7177 scan = emit_insn_after (gen_consttable_2 (mp->value), scan);
7178 break;
7180 #endif
7181 #ifdef HAVE_consttable_4
7182 case 4:
7183 scan = emit_insn_after (gen_consttable_4 (mp->value), scan);
7184 break;
7186 #endif
7187 #ifdef HAVE_consttable_8
7188 case 8:
7189 scan = emit_insn_after (gen_consttable_8 (mp->value), scan);
7190 break;
7192 #endif
7193 default:
7194 abort ();
7195 break;
7199 nmp = mp->next;
7200 free (mp);
7203 minipool_vector_head = minipool_vector_tail = NULL;
7204 scan = emit_insn_after (gen_consttable_end (), scan);
7205 scan = emit_barrier_after (scan);
7208 /* Return the cost of forcibly inserting a barrier after INSN. */
7209 static int
7210 arm_barrier_cost (rtx insn)
7212 /* Basing the location of the pool on the loop depth is preferable,
7213 but at the moment, the basic block information seems to be
7214 corrupt by this stage of the compilation. */
7215 int base_cost = 50;
7216 rtx next = next_nonnote_insn (insn);
7218 if (next != NULL && GET_CODE (next) == CODE_LABEL)
7219 base_cost -= 20;
7221 switch (GET_CODE (insn))
7223 case CODE_LABEL:
7224 /* It will always be better to place the table before the label, rather
7225 than after it. */
7226 return 50;
7228 case INSN:
7229 case CALL_INSN:
7230 return base_cost;
7232 case JUMP_INSN:
7233 return base_cost - 10;
7235 default:
7236 return base_cost + 10;
7240 /* Find the best place in the insn stream in the range
7241 (FIX->address,MAX_ADDRESS) to forcibly insert a minipool barrier.
7242 Create the barrier by inserting a jump and add a new fix entry for
7243 it. */
7244 static Mfix *
7245 create_fix_barrier (Mfix *fix, HOST_WIDE_INT max_address)
7247 HOST_WIDE_INT count = 0;
7248 rtx barrier;
7249 rtx from = fix->insn;
7250 rtx selected = from;
7251 int selected_cost;
7252 HOST_WIDE_INT selected_address;
7253 Mfix * new_fix;
7254 HOST_WIDE_INT max_count = max_address - fix->address;
7255 rtx label = gen_label_rtx ();
7257 selected_cost = arm_barrier_cost (from);
7258 selected_address = fix->address;
7260 while (from && count < max_count)
7262 rtx tmp;
7263 int new_cost;
7265 /* This code shouldn't have been called if there was a natural barrier
7266 within range. */
7267 if (GET_CODE (from) == BARRIER)
7268 abort ();
7270 /* Count the length of this insn. */
7271 count += get_attr_length (from);
7273 /* If there is a jump table, add its length. */
7274 tmp = is_jump_table (from);
7275 if (tmp != NULL)
7277 count += get_jump_table_size (tmp);
7279 /* Jump tables aren't in a basic block, so base the cost on
7280 the dispatch insn. If we select this location, we will
7281 still put the pool after the table. */
7282 new_cost = arm_barrier_cost (from);
7284 if (count < max_count && new_cost <= selected_cost)
7286 selected = tmp;
7287 selected_cost = new_cost;
7288 selected_address = fix->address + count;
7291 /* Continue after the dispatch table. */
7292 from = NEXT_INSN (tmp);
7293 continue;
7296 new_cost = arm_barrier_cost (from);
7298 if (count < max_count && new_cost <= selected_cost)
7300 selected = from;
7301 selected_cost = new_cost;
7302 selected_address = fix->address + count;
7305 from = NEXT_INSN (from);
7308 /* Create a new JUMP_INSN that branches around a barrier. */
7309 from = emit_jump_insn_after (gen_jump (label), selected);
7310 JUMP_LABEL (from) = label;
7311 barrier = emit_barrier_after (from);
7312 emit_label_after (label, barrier);
7314 /* Create a minipool barrier entry for the new barrier. */
7315 new_fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* new_fix));
7316 new_fix->insn = barrier;
7317 new_fix->address = selected_address;
7318 new_fix->next = fix->next;
7319 fix->next = new_fix;
7321 return new_fix;
7324 /* Record that there is a natural barrier in the insn stream at
7325 ADDRESS. */
7326 static void
7327 push_minipool_barrier (rtx insn, HOST_WIDE_INT address)
7329 Mfix * fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* fix));
7331 fix->insn = insn;
7332 fix->address = address;
7334 fix->next = NULL;
7335 if (minipool_fix_head != NULL)
7336 minipool_fix_tail->next = fix;
7337 else
7338 minipool_fix_head = fix;
7340 minipool_fix_tail = fix;
7343 /* Record INSN, which will need fixing up to load a value from the
7344 minipool. ADDRESS is the offset of the insn since the start of the
7345 function; LOC is a pointer to the part of the insn which requires
7346 fixing; VALUE is the constant that must be loaded, which is of type
7347 MODE. */
7348 static void
7349 push_minipool_fix (rtx insn, HOST_WIDE_INT address, rtx *loc,
7350 enum machine_mode mode, rtx value)
7352 Mfix * fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* fix));
7354 #ifdef AOF_ASSEMBLER
7355 /* PIC symbol references need to be converted into offsets into the
7356 based area. */
7357 /* XXX This shouldn't be done here. */
7358 if (flag_pic && GET_CODE (value) == SYMBOL_REF)
7359 value = aof_pic_entry (value);
7360 #endif /* AOF_ASSEMBLER */
7362 fix->insn = insn;
7363 fix->address = address;
7364 fix->loc = loc;
7365 fix->mode = mode;
7366 fix->fix_size = MINIPOOL_FIX_SIZE (mode);
7367 fix->value = value;
7368 fix->forwards = get_attr_pool_range (insn);
7369 fix->backwards = get_attr_neg_pool_range (insn);
7370 fix->minipool = NULL;
7372 /* If an insn doesn't have a range defined for it, then it isn't
7373 expecting to be reworked by this code. Better to abort now than
7374 to generate duff assembly code. */
7375 if (fix->forwards == 0 && fix->backwards == 0)
7376 abort ();
7378 /* With AAPCS/iWMMXt enabled, the pool is aligned to an 8-byte boundary.
7379 So there might be an empty word before the start of the pool.
7380 Hence we reduce the forward range by 4 to allow for this
7381 possibility. */
7382 if (ARM_DOUBLEWORD_ALIGN && fix->fix_size == 8)
7383 fix->forwards -= 4;
7385 if (dump_file)
7387 fprintf (dump_file,
7388 ";; %smode fixup for i%d; addr %lu, range (%ld,%ld): ",
7389 GET_MODE_NAME (mode),
7390 INSN_UID (insn), (unsigned long) address,
7391 -1 * (long)fix->backwards, (long)fix->forwards);
7392 arm_print_value (dump_file, fix->value);
7393 fprintf (dump_file, "\n");
7396 /* Add it to the chain of fixes. */
7397 fix->next = NULL;
7399 if (minipool_fix_head != NULL)
7400 minipool_fix_tail->next = fix;
7401 else
7402 minipool_fix_head = fix;
7404 minipool_fix_tail = fix;
7407 /* Return the cost of synthesizing a 64-bit constant VAL inline.
7408 Returns the number of insns needed, or 99 if we don't know how to
7409 do it. */
7411 arm_const_double_inline_cost (rtx val)
7413 rtx lowpart, highpart;
7414 enum machine_mode mode;
7416 mode = GET_MODE (val);
7418 if (mode == VOIDmode)
7419 mode = DImode;
7421 gcc_assert (GET_MODE_SIZE (mode) == 8);
7423 lowpart = gen_lowpart (SImode, val);
7424 highpart = gen_highpart_mode (SImode, mode, val);
7426 gcc_assert (GET_CODE (lowpart) == CONST_INT);
7427 gcc_assert (GET_CODE (highpart) == CONST_INT);
7429 return (arm_gen_constant (SET, SImode, NULL_RTX, INTVAL (lowpart),
7430 NULL_RTX, NULL_RTX, 0, 0)
7431 + arm_gen_constant (SET, SImode, NULL_RTX, INTVAL (highpart),
7432 NULL_RTX, NULL_RTX, 0, 0));
7435 /* Return true if it is worthwhile to split a 64-bit constant into two
7436 32-bit operations. This is the case if optimizing for size, or
7437 if we have load delay slots, or if one 32-bit part can be done with
7438 a single data operation. */
7439 bool
7440 arm_const_double_by_parts (rtx val)
7442 enum machine_mode mode = GET_MODE (val);
7443 rtx part;
7445 if (optimize_size || arm_ld_sched)
7446 return true;
7448 if (mode == VOIDmode)
7449 mode = DImode;
7451 part = gen_highpart_mode (SImode, mode, val);
7453 gcc_assert (GET_CODE (part) == CONST_INT);
7455 if (const_ok_for_arm (INTVAL (part))
7456 || const_ok_for_arm (~INTVAL (part)))
7457 return true;
7459 part = gen_lowpart (SImode, val);
7461 gcc_assert (GET_CODE (part) == CONST_INT);
7463 if (const_ok_for_arm (INTVAL (part))
7464 || const_ok_for_arm (~INTVAL (part)))
7465 return true;
7467 return false;
7470 /* Scan INSN and note any of its operands that need fixing.
7471 If DO_PUSHES is false we do not actually push any of the fixups
7472 needed. The function returns TRUE if any fixups were needed/pushed.
7473 This is used by arm_memory_load_p() which needs to know about loads
7474 of constants that will be converted into minipool loads. */
7475 static bool
7476 note_invalid_constants (rtx insn, HOST_WIDE_INT address, int do_pushes)
7478 bool result = false;
7479 int opno;
7481 extract_insn (insn);
7483 if (!constrain_operands (1))
7484 fatal_insn_not_found (insn);
7486 if (recog_data.n_alternatives == 0)
7487 return false;
7489 /* Fill in recog_op_alt with information about the constraints of
7490 this insn. */
7491 preprocess_constraints ();
7493 for (opno = 0; opno < recog_data.n_operands; opno++)
7495 /* Things we need to fix can only occur in inputs. */
7496 if (recog_data.operand_type[opno] != OP_IN)
7497 continue;
7499 /* If this alternative is a memory reference, then any mention
7500 of constants in this alternative is really to fool reload
7501 into allowing us to accept one there. We need to fix them up
7502 now so that we output the right code. */
7503 if (recog_op_alt[opno][which_alternative].memory_ok)
7505 rtx op = recog_data.operand[opno];
7507 if (CONSTANT_P (op))
7509 if (do_pushes)
7510 push_minipool_fix (insn, address, recog_data.operand_loc[opno],
7511 recog_data.operand_mode[opno], op);
7512 result = true;
7514 else if (GET_CODE (op) == MEM
7515 && GET_CODE (XEXP (op, 0)) == SYMBOL_REF
7516 && CONSTANT_POOL_ADDRESS_P (XEXP (op, 0)))
7518 if (do_pushes)
7520 rtx cop = avoid_constant_pool_reference (op);
7522 /* Casting the address of something to a mode narrower
7523 than a word can cause avoid_constant_pool_reference()
7524 to return the pool reference itself. That's no good to
7525 us here. Lets just hope that we can use the
7526 constant pool value directly. */
7527 if (op == cop)
7528 cop = get_pool_constant (XEXP (op, 0));
7530 push_minipool_fix (insn, address,
7531 recog_data.operand_loc[opno],
7532 recog_data.operand_mode[opno], cop);
7535 result = true;
7540 return result;
7543 /* Gcc puts the pool in the wrong place for ARM, since we can only
7544 load addresses a limited distance around the pc. We do some
7545 special munging to move the constant pool values to the correct
7546 point in the code. */
7547 static void
7548 arm_reorg (void)
7550 rtx insn;
7551 HOST_WIDE_INT address = 0;
7552 Mfix * fix;
7554 minipool_fix_head = minipool_fix_tail = NULL;
7556 /* The first insn must always be a note, or the code below won't
7557 scan it properly. */
7558 insn = get_insns ();
7559 if (GET_CODE (insn) != NOTE)
7560 abort ();
7562 /* Scan all the insns and record the operands that will need fixing. */
7563 for (insn = next_nonnote_insn (insn); insn; insn = next_nonnote_insn (insn))
7565 if (TARGET_CIRRUS_FIX_INVALID_INSNS
7566 && (arm_cirrus_insn_p (insn)
7567 || GET_CODE (insn) == JUMP_INSN
7568 || arm_memory_load_p (insn)))
7569 cirrus_reorg (insn);
7571 if (GET_CODE (insn) == BARRIER)
7572 push_minipool_barrier (insn, address);
7573 else if (INSN_P (insn))
7575 rtx table;
7577 note_invalid_constants (insn, address, true);
7578 address += get_attr_length (insn);
7580 /* If the insn is a vector jump, add the size of the table
7581 and skip the table. */
7582 if ((table = is_jump_table (insn)) != NULL)
7584 address += get_jump_table_size (table);
7585 insn = table;
7590 fix = minipool_fix_head;
7592 /* Now scan the fixups and perform the required changes. */
7593 while (fix)
7595 Mfix * ftmp;
7596 Mfix * fdel;
7597 Mfix * last_added_fix;
7598 Mfix * last_barrier = NULL;
7599 Mfix * this_fix;
7601 /* Skip any further barriers before the next fix. */
7602 while (fix && GET_CODE (fix->insn) == BARRIER)
7603 fix = fix->next;
7605 /* No more fixes. */
7606 if (fix == NULL)
7607 break;
7609 last_added_fix = NULL;
7611 for (ftmp = fix; ftmp; ftmp = ftmp->next)
7613 if (GET_CODE (ftmp->insn) == BARRIER)
7615 if (ftmp->address >= minipool_vector_head->max_address)
7616 break;
7618 last_barrier = ftmp;
7620 else if ((ftmp->minipool = add_minipool_forward_ref (ftmp)) == NULL)
7621 break;
7623 last_added_fix = ftmp; /* Keep track of the last fix added. */
7626 /* If we found a barrier, drop back to that; any fixes that we
7627 could have reached but come after the barrier will now go in
7628 the next mini-pool. */
7629 if (last_barrier != NULL)
7631 /* Reduce the refcount for those fixes that won't go into this
7632 pool after all. */
7633 for (fdel = last_barrier->next;
7634 fdel && fdel != ftmp;
7635 fdel = fdel->next)
7637 fdel->minipool->refcount--;
7638 fdel->minipool = NULL;
7641 ftmp = last_barrier;
7643 else
7645 /* ftmp is first fix that we can't fit into this pool and
7646 there no natural barriers that we could use. Insert a
7647 new barrier in the code somewhere between the previous
7648 fix and this one, and arrange to jump around it. */
7649 HOST_WIDE_INT max_address;
7651 /* The last item on the list of fixes must be a barrier, so
7652 we can never run off the end of the list of fixes without
7653 last_barrier being set. */
7654 if (ftmp == NULL)
7655 abort ();
7657 max_address = minipool_vector_head->max_address;
7658 /* Check that there isn't another fix that is in range that
7659 we couldn't fit into this pool because the pool was
7660 already too large: we need to put the pool before such an
7661 instruction. */
7662 if (ftmp->address < max_address)
7663 max_address = ftmp->address;
7665 last_barrier = create_fix_barrier (last_added_fix, max_address);
7668 assign_minipool_offsets (last_barrier);
7670 while (ftmp)
7672 if (GET_CODE (ftmp->insn) != BARRIER
7673 && ((ftmp->minipool = add_minipool_backward_ref (ftmp))
7674 == NULL))
7675 break;
7677 ftmp = ftmp->next;
7680 /* Scan over the fixes we have identified for this pool, fixing them
7681 up and adding the constants to the pool itself. */
7682 for (this_fix = fix; this_fix && ftmp != this_fix;
7683 this_fix = this_fix->next)
7684 if (GET_CODE (this_fix->insn) != BARRIER)
7686 rtx addr
7687 = plus_constant (gen_rtx_LABEL_REF (VOIDmode,
7688 minipool_vector_label),
7689 this_fix->minipool->offset);
7690 *this_fix->loc = gen_rtx_MEM (this_fix->mode, addr);
7693 dump_minipool (last_barrier->insn);
7694 fix = ftmp;
7697 /* From now on we must synthesize any constants that we can't handle
7698 directly. This can happen if the RTL gets split during final
7699 instruction generation. */
7700 after_arm_reorg = 1;
7702 /* Free the minipool memory. */
7703 obstack_free (&minipool_obstack, minipool_startobj);
7706 /* Routines to output assembly language. */
7708 /* If the rtx is the correct value then return the string of the number.
7709 In this way we can ensure that valid double constants are generated even
7710 when cross compiling. */
7711 const char *
7712 fp_immediate_constant (rtx x)
7714 REAL_VALUE_TYPE r;
7715 int i;
7717 if (!fp_consts_inited)
7718 init_fp_table ();
7720 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
7721 for (i = 0; i < 8; i++)
7722 if (REAL_VALUES_EQUAL (r, values_fp[i]))
7723 return strings_fp[i];
7725 abort ();
7728 /* As for fp_immediate_constant, but value is passed directly, not in rtx. */
7729 static const char *
7730 fp_const_from_val (REAL_VALUE_TYPE *r)
7732 int i;
7734 if (!fp_consts_inited)
7735 init_fp_table ();
7737 for (i = 0; i < 8; i++)
7738 if (REAL_VALUES_EQUAL (*r, values_fp[i]))
7739 return strings_fp[i];
7741 abort ();
7744 /* Output the operands of a LDM/STM instruction to STREAM.
7745 MASK is the ARM register set mask of which only bits 0-15 are important.
7746 REG is the base register, either the frame pointer or the stack pointer,
7747 INSTR is the possibly suffixed load or store instruction. */
7749 static void
7750 print_multi_reg (FILE *stream, const char *instr, unsigned reg,
7751 unsigned long mask)
7753 unsigned i;
7754 bool not_first = FALSE;
7756 fputc ('\t', stream);
7757 asm_fprintf (stream, instr, reg);
7758 fputs (", {", stream);
7760 for (i = 0; i <= LAST_ARM_REGNUM; i++)
7761 if (mask & (1 << i))
7763 if (not_first)
7764 fprintf (stream, ", ");
7766 asm_fprintf (stream, "%r", i);
7767 not_first = TRUE;
7770 fprintf (stream, "}\n");
7774 /* Output a FLDMX instruction to STREAM.
7775 BASE if the register containing the address.
7776 REG and COUNT specify the register range.
7777 Extra registers may be added to avoid hardware bugs. */
7779 static void
7780 arm_output_fldmx (FILE * stream, unsigned int base, int reg, int count)
7782 int i;
7784 /* Workaround ARM10 VFPr1 bug. */
7785 if (count == 2 && !arm_arch6)
7787 if (reg == 15)
7788 reg--;
7789 count++;
7792 fputc ('\t', stream);
7793 asm_fprintf (stream, "fldmfdx\t%r!, {", base);
7795 for (i = reg; i < reg + count; i++)
7797 if (i > reg)
7798 fputs (", ", stream);
7799 asm_fprintf (stream, "d%d", i);
7801 fputs ("}\n", stream);
7806 /* Output the assembly for a store multiple. */
7808 const char *
7809 vfp_output_fstmx (rtx * operands)
7811 char pattern[100];
7812 int p;
7813 int base;
7814 int i;
7816 strcpy (pattern, "fstmfdx\t%m0!, {%P1");
7817 p = strlen (pattern);
7819 if (GET_CODE (operands[1]) != REG)
7820 abort ();
7822 base = (REGNO (operands[1]) - FIRST_VFP_REGNUM) / 2;
7823 for (i = 1; i < XVECLEN (operands[2], 0); i++)
7825 p += sprintf (&pattern[p], ", d%d", base + i);
7827 strcpy (&pattern[p], "}");
7829 output_asm_insn (pattern, operands);
7830 return "";
7834 /* Emit RTL to save block of VFP register pairs to the stack. Returns the
7835 number of bytes pushed. */
7837 static int
7838 vfp_emit_fstmx (int base_reg, int count)
7840 rtx par;
7841 rtx dwarf;
7842 rtx tmp, reg;
7843 int i;
7845 /* Workaround ARM10 VFPr1 bug. Data corruption can occur when exactly two
7846 register pairs are stored by a store multiple insn. We avoid this
7847 by pushing an extra pair. */
7848 if (count == 2 && !arm_arch6)
7850 if (base_reg == LAST_VFP_REGNUM - 3)
7851 base_reg -= 2;
7852 count++;
7855 /* ??? The frame layout is implementation defined. We describe
7856 standard format 1 (equivalent to a FSTMD insn and unused pad word).
7857 We really need some way of representing the whole block so that the
7858 unwinder can figure it out at runtime. */
7859 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
7860 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (count + 1));
7862 reg = gen_rtx_REG (DFmode, base_reg);
7863 base_reg += 2;
7865 XVECEXP (par, 0, 0)
7866 = gen_rtx_SET (VOIDmode,
7867 gen_rtx_MEM (BLKmode,
7868 gen_rtx_PRE_DEC (BLKmode, stack_pointer_rtx)),
7869 gen_rtx_UNSPEC (BLKmode,
7870 gen_rtvec (1, reg),
7871 UNSPEC_PUSH_MULT));
7873 tmp = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7874 gen_rtx_PLUS (SImode, stack_pointer_rtx,
7875 GEN_INT (-(count * 8 + 4))));
7876 RTX_FRAME_RELATED_P (tmp) = 1;
7877 XVECEXP (dwarf, 0, 0) = tmp;
7879 tmp = gen_rtx_SET (VOIDmode,
7880 gen_rtx_MEM (DFmode, stack_pointer_rtx),
7881 reg);
7882 RTX_FRAME_RELATED_P (tmp) = 1;
7883 XVECEXP (dwarf, 0, 1) = tmp;
7885 for (i = 1; i < count; i++)
7887 reg = gen_rtx_REG (DFmode, base_reg);
7888 base_reg += 2;
7889 XVECEXP (par, 0, i) = gen_rtx_USE (VOIDmode, reg);
7891 tmp = gen_rtx_SET (VOIDmode,
7892 gen_rtx_MEM (DFmode,
7893 gen_rtx_PLUS (SImode,
7894 stack_pointer_rtx,
7895 GEN_INT (i * 8))),
7896 reg);
7897 RTX_FRAME_RELATED_P (tmp) = 1;
7898 XVECEXP (dwarf, 0, i + 1) = tmp;
7901 par = emit_insn (par);
7902 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
7903 REG_NOTES (par));
7904 RTX_FRAME_RELATED_P (par) = 1;
7906 return count * 8 + 4;
7910 /* Output a 'call' insn. */
7911 const char *
7912 output_call (rtx *operands)
7914 if (arm_arch5)
7915 abort (); /* Patterns should call blx <reg> directly. */
7917 /* Handle calls to lr using ip (which may be clobbered in subr anyway). */
7918 if (REGNO (operands[0]) == LR_REGNUM)
7920 operands[0] = gen_rtx_REG (SImode, IP_REGNUM);
7921 output_asm_insn ("mov%?\t%0, %|lr", operands);
7924 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
7926 if (TARGET_INTERWORK || arm_arch4t)
7927 output_asm_insn ("bx%?\t%0", operands);
7928 else
7929 output_asm_insn ("mov%?\t%|pc, %0", operands);
7931 return "";
7934 /* Output a 'call' insn that is a reference in memory. */
7935 const char *
7936 output_call_mem (rtx *operands)
7938 if (TARGET_INTERWORK && !arm_arch5)
7940 output_asm_insn ("ldr%?\t%|ip, %0", operands);
7941 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
7942 output_asm_insn ("bx%?\t%|ip", operands);
7944 else if (regno_use_in (LR_REGNUM, operands[0]))
7946 /* LR is used in the memory address. We load the address in the
7947 first instruction. It's safe to use IP as the target of the
7948 load since the call will kill it anyway. */
7949 output_asm_insn ("ldr%?\t%|ip, %0", operands);
7950 if (arm_arch5)
7951 output_asm_insn ("blx%?\t%|ip", operands);
7952 else
7954 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
7955 if (arm_arch4t)
7956 output_asm_insn ("bx%?\t%|ip", operands);
7957 else
7958 output_asm_insn ("mov%?\t%|pc, %|ip", operands);
7961 else
7963 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
7964 output_asm_insn ("ldr%?\t%|pc, %0", operands);
7967 return "";
7971 /* Output a move from arm registers to an fpa registers.
7972 OPERANDS[0] is an fpa register.
7973 OPERANDS[1] is the first registers of an arm register pair. */
7974 const char *
7975 output_mov_long_double_fpa_from_arm (rtx *operands)
7977 int arm_reg0 = REGNO (operands[1]);
7978 rtx ops[3];
7980 if (arm_reg0 == IP_REGNUM)
7981 abort ();
7983 ops[0] = gen_rtx_REG (SImode, arm_reg0);
7984 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
7985 ops[2] = gen_rtx_REG (SImode, 2 + arm_reg0);
7987 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1, %2}", ops);
7988 output_asm_insn ("ldf%?e\t%0, [%|sp], #12", operands);
7990 return "";
7993 /* Output a move from an fpa register to arm registers.
7994 OPERANDS[0] is the first registers of an arm register pair.
7995 OPERANDS[1] is an fpa register. */
7996 const char *
7997 output_mov_long_double_arm_from_fpa (rtx *operands)
7999 int arm_reg0 = REGNO (operands[0]);
8000 rtx ops[3];
8002 if (arm_reg0 == IP_REGNUM)
8003 abort ();
8005 ops[0] = gen_rtx_REG (SImode, arm_reg0);
8006 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
8007 ops[2] = gen_rtx_REG (SImode, 2 + arm_reg0);
8009 output_asm_insn ("stf%?e\t%1, [%|sp, #-12]!", operands);
8010 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1, %2}", ops);
8011 return "";
8014 /* Output a move from arm registers to arm registers of a long double
8015 OPERANDS[0] is the destination.
8016 OPERANDS[1] is the source. */
8017 const char *
8018 output_mov_long_double_arm_from_arm (rtx *operands)
8020 /* We have to be careful here because the two might overlap. */
8021 int dest_start = REGNO (operands[0]);
8022 int src_start = REGNO (operands[1]);
8023 rtx ops[2];
8024 int i;
8026 if (dest_start < src_start)
8028 for (i = 0; i < 3; i++)
8030 ops[0] = gen_rtx_REG (SImode, dest_start + i);
8031 ops[1] = gen_rtx_REG (SImode, src_start + i);
8032 output_asm_insn ("mov%?\t%0, %1", ops);
8035 else
8037 for (i = 2; i >= 0; i--)
8039 ops[0] = gen_rtx_REG (SImode, dest_start + i);
8040 ops[1] = gen_rtx_REG (SImode, src_start + i);
8041 output_asm_insn ("mov%?\t%0, %1", ops);
8045 return "";
8049 /* Output a move from arm registers to an fpa registers.
8050 OPERANDS[0] is an fpa register.
8051 OPERANDS[1] is the first registers of an arm register pair. */
8052 const char *
8053 output_mov_double_fpa_from_arm (rtx *operands)
8055 int arm_reg0 = REGNO (operands[1]);
8056 rtx ops[2];
8058 if (arm_reg0 == IP_REGNUM)
8059 abort ();
8061 ops[0] = gen_rtx_REG (SImode, arm_reg0);
8062 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
8063 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1}", ops);
8064 output_asm_insn ("ldf%?d\t%0, [%|sp], #8", operands);
8065 return "";
8068 /* Output a move from an fpa register to arm registers.
8069 OPERANDS[0] is the first registers of an arm register pair.
8070 OPERANDS[1] is an fpa register. */
8071 const char *
8072 output_mov_double_arm_from_fpa (rtx *operands)
8074 int arm_reg0 = REGNO (operands[0]);
8075 rtx ops[2];
8077 if (arm_reg0 == IP_REGNUM)
8078 abort ();
8080 ops[0] = gen_rtx_REG (SImode, arm_reg0);
8081 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
8082 output_asm_insn ("stf%?d\t%1, [%|sp, #-8]!", operands);
8083 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1}", ops);
8084 return "";
8087 /* Output a move between double words.
8088 It must be REG<-REG, REG<-CONST_DOUBLE, REG<-CONST_INT, REG<-MEM
8089 or MEM<-REG and all MEMs must be offsettable addresses. */
8090 const char *
8091 output_move_double (rtx *operands)
8093 enum rtx_code code0 = GET_CODE (operands[0]);
8094 enum rtx_code code1 = GET_CODE (operands[1]);
8095 rtx otherops[3];
8097 if (code0 == REG)
8099 int reg0 = REGNO (operands[0]);
8101 otherops[0] = gen_rtx_REG (SImode, 1 + reg0);
8103 if (code1 == MEM)
8105 switch (GET_CODE (XEXP (operands[1], 0)))
8107 case REG:
8108 output_asm_insn ("ldm%?ia\t%m1, %M0", operands);
8109 break;
8111 case PRE_INC:
8112 if (!TARGET_LDRD)
8113 abort (); /* Should never happen now. */
8114 output_asm_insn ("ldr%?d\t%0, [%m1, #8]!", operands);
8115 break;
8117 case PRE_DEC:
8118 output_asm_insn ("ldm%?db\t%m1!, %M0", operands);
8119 break;
8121 case POST_INC:
8122 output_asm_insn ("ldm%?ia\t%m1!, %M0", operands);
8123 break;
8125 case POST_DEC:
8126 if (!TARGET_LDRD)
8127 abort (); /* Should never happen now. */
8128 output_asm_insn ("ldr%?d\t%0, [%m1], #-8", operands);
8129 break;
8131 case PRE_MODIFY:
8132 case POST_MODIFY:
8133 otherops[0] = operands[0];
8134 otherops[1] = XEXP (XEXP (XEXP (operands[1], 0), 1), 0);
8135 otherops[2] = XEXP (XEXP (XEXP (operands[1], 0), 1), 1);
8137 if (GET_CODE (XEXP (operands[1], 0)) == PRE_MODIFY)
8139 if (reg_overlap_mentioned_p (otherops[0], otherops[2]))
8141 /* Registers overlap so split out the increment. */
8142 output_asm_insn ("add%?\t%1, %1, %2", otherops);
8143 output_asm_insn ("ldr%?d\t%0, [%1] @split", otherops);
8145 else
8146 output_asm_insn ("ldr%?d\t%0, [%1, %2]!", otherops);
8148 else
8150 /* We only allow constant increments, so this is safe. */
8151 output_asm_insn ("ldr%?d\t%0, [%1], %2", otherops);
8153 break;
8155 case LABEL_REF:
8156 case CONST:
8157 output_asm_insn ("adr%?\t%0, %1", operands);
8158 output_asm_insn ("ldm%?ia\t%0, %M0", operands);
8159 break;
8161 default:
8162 if (arm_add_operand (XEXP (XEXP (operands[1], 0), 1),
8163 GET_MODE (XEXP (XEXP (operands[1], 0), 1))))
8165 otherops[0] = operands[0];
8166 otherops[1] = XEXP (XEXP (operands[1], 0), 0);
8167 otherops[2] = XEXP (XEXP (operands[1], 0), 1);
8169 if (GET_CODE (XEXP (operands[1], 0)) == PLUS)
8171 if (GET_CODE (otherops[2]) == CONST_INT)
8173 switch ((int) INTVAL (otherops[2]))
8175 case -8:
8176 output_asm_insn ("ldm%?db\t%1, %M0", otherops);
8177 return "";
8178 case -4:
8179 output_asm_insn ("ldm%?da\t%1, %M0", otherops);
8180 return "";
8181 case 4:
8182 output_asm_insn ("ldm%?ib\t%1, %M0", otherops);
8183 return "";
8186 if (TARGET_LDRD
8187 && (GET_CODE (otherops[2]) == REG
8188 || (GET_CODE (otherops[2]) == CONST_INT
8189 && INTVAL (otherops[2]) > -256
8190 && INTVAL (otherops[2]) < 256)))
8192 if (reg_overlap_mentioned_p (otherops[0],
8193 otherops[2]))
8195 /* Swap base and index registers over to
8196 avoid a conflict. */
8197 otherops[1] = XEXP (XEXP (operands[1], 0), 1);
8198 otherops[2] = XEXP (XEXP (operands[1], 0), 0);
8201 /* If both registers conflict, it will usually
8202 have been fixed by a splitter. */
8203 if (reg_overlap_mentioned_p (otherops[0],
8204 otherops[2]))
8206 output_asm_insn ("add%?\t%1, %1, %2", otherops);
8207 output_asm_insn ("ldr%?d\t%0, [%1]",
8208 otherops);
8209 return "";
8211 else
8213 output_asm_insn ("ldr%?d\t%0, [%1, %2]",
8214 otherops);
8215 return "";
8218 if (GET_CODE (otherops[2]) == CONST_INT)
8220 if (!(const_ok_for_arm (INTVAL (otherops[2]))))
8221 output_asm_insn ("sub%?\t%0, %1, #%n2", otherops);
8222 else
8223 output_asm_insn ("add%?\t%0, %1, %2", otherops);
8225 else
8226 output_asm_insn ("add%?\t%0, %1, %2", otherops);
8228 else
8229 output_asm_insn ("sub%?\t%0, %1, %2", otherops);
8231 return "ldm%?ia\t%0, %M0";
8233 else
8235 otherops[1] = adjust_address (operands[1], SImode, 4);
8236 /* Take care of overlapping base/data reg. */
8237 if (reg_mentioned_p (operands[0], operands[1]))
8239 output_asm_insn ("ldr%?\t%0, %1", otherops);
8240 output_asm_insn ("ldr%?\t%0, %1", operands);
8242 else
8244 output_asm_insn ("ldr%?\t%0, %1", operands);
8245 output_asm_insn ("ldr%?\t%0, %1", otherops);
8250 else
8251 abort (); /* Constraints should prevent this. */
8253 else if (code0 == MEM && code1 == REG)
8255 if (REGNO (operands[1]) == IP_REGNUM)
8256 abort ();
8258 switch (GET_CODE (XEXP (operands[0], 0)))
8260 case REG:
8261 output_asm_insn ("stm%?ia\t%m0, %M1", operands);
8262 break;
8264 case PRE_INC:
8265 if (!TARGET_LDRD)
8266 abort (); /* Should never happen now. */
8267 output_asm_insn ("str%?d\t%1, [%m0, #8]!", operands);
8268 break;
8270 case PRE_DEC:
8271 output_asm_insn ("stm%?db\t%m0!, %M1", operands);
8272 break;
8274 case POST_INC:
8275 output_asm_insn ("stm%?ia\t%m0!, %M1", operands);
8276 break;
8278 case POST_DEC:
8279 if (!TARGET_LDRD)
8280 abort (); /* Should never happen now. */
8281 output_asm_insn ("str%?d\t%1, [%m0], #-8", operands);
8282 break;
8284 case PRE_MODIFY:
8285 case POST_MODIFY:
8286 otherops[0] = operands[1];
8287 otherops[1] = XEXP (XEXP (XEXP (operands[0], 0), 1), 0);
8288 otherops[2] = XEXP (XEXP (XEXP (operands[0], 0), 1), 1);
8290 if (GET_CODE (XEXP (operands[0], 0)) == PRE_MODIFY)
8291 output_asm_insn ("str%?d\t%0, [%1, %2]!", otherops);
8292 else
8293 output_asm_insn ("str%?d\t%0, [%1], %2", otherops);
8294 break;
8296 case PLUS:
8297 otherops[2] = XEXP (XEXP (operands[0], 0), 1);
8298 if (GET_CODE (otherops[2]) == CONST_INT)
8300 switch ((int) INTVAL (XEXP (XEXP (operands[0], 0), 1)))
8302 case -8:
8303 output_asm_insn ("stm%?db\t%m0, %M1", operands);
8304 return "";
8306 case -4:
8307 output_asm_insn ("stm%?da\t%m0, %M1", operands);
8308 return "";
8310 case 4:
8311 output_asm_insn ("stm%?ib\t%m0, %M1", operands);
8312 return "";
8315 if (TARGET_LDRD
8316 && (GET_CODE (otherops[2]) == REG
8317 || (GET_CODE (otherops[2]) == CONST_INT
8318 && INTVAL (otherops[2]) > -256
8319 && INTVAL (otherops[2]) < 256)))
8321 otherops[0] = operands[1];
8322 otherops[1] = XEXP (XEXP (operands[0], 0), 0);
8323 output_asm_insn ("str%?d\t%0, [%1, %2]", otherops);
8324 return "";
8326 /* Fall through */
8328 default:
8329 otherops[0] = adjust_address (operands[0], SImode, 4);
8330 otherops[1] = gen_rtx_REG (SImode, 1 + REGNO (operands[1]));
8331 output_asm_insn ("str%?\t%1, %0", operands);
8332 output_asm_insn ("str%?\t%1, %0", otherops);
8335 else
8336 /* Constraints should prevent this. */
8337 abort ();
8339 return "";
8342 /* Output an ADD r, s, #n where n may be too big for one instruction.
8343 If adding zero to one register, output nothing. */
8344 const char *
8345 output_add_immediate (rtx *operands)
8347 HOST_WIDE_INT n = INTVAL (operands[2]);
8349 if (n != 0 || REGNO (operands[0]) != REGNO (operands[1]))
8351 if (n < 0)
8352 output_multi_immediate (operands,
8353 "sub%?\t%0, %1, %2", "sub%?\t%0, %0, %2", 2,
8354 -n);
8355 else
8356 output_multi_immediate (operands,
8357 "add%?\t%0, %1, %2", "add%?\t%0, %0, %2", 2,
8361 return "";
8364 /* Output a multiple immediate operation.
8365 OPERANDS is the vector of operands referred to in the output patterns.
8366 INSTR1 is the output pattern to use for the first constant.
8367 INSTR2 is the output pattern to use for subsequent constants.
8368 IMMED_OP is the index of the constant slot in OPERANDS.
8369 N is the constant value. */
8370 static const char *
8371 output_multi_immediate (rtx *operands, const char *instr1, const char *instr2,
8372 int immed_op, HOST_WIDE_INT n)
8374 #if HOST_BITS_PER_WIDE_INT > 32
8375 n &= 0xffffffff;
8376 #endif
8378 if (n == 0)
8380 /* Quick and easy output. */
8381 operands[immed_op] = const0_rtx;
8382 output_asm_insn (instr1, operands);
8384 else
8386 int i;
8387 const char * instr = instr1;
8389 /* Note that n is never zero here (which would give no output). */
8390 for (i = 0; i < 32; i += 2)
8392 if (n & (3 << i))
8394 operands[immed_op] = GEN_INT (n & (255 << i));
8395 output_asm_insn (instr, operands);
8396 instr = instr2;
8397 i += 6;
8402 return "";
8405 /* Return the appropriate ARM instruction for the operation code.
8406 The returned result should not be overwritten. OP is the rtx of the
8407 operation. SHIFT_FIRST_ARG is TRUE if the first argument of the operator
8408 was shifted. */
8409 const char *
8410 arithmetic_instr (rtx op, int shift_first_arg)
8412 switch (GET_CODE (op))
8414 case PLUS:
8415 return "add";
8417 case MINUS:
8418 return shift_first_arg ? "rsb" : "sub";
8420 case IOR:
8421 return "orr";
8423 case XOR:
8424 return "eor";
8426 case AND:
8427 return "and";
8429 default:
8430 abort ();
8434 /* Ensure valid constant shifts and return the appropriate shift mnemonic
8435 for the operation code. The returned result should not be overwritten.
8436 OP is the rtx code of the shift.
8437 On exit, *AMOUNTP will be -1 if the shift is by a register, or a constant
8438 shift. */
8439 static const char *
8440 shift_op (rtx op, HOST_WIDE_INT *amountp)
8442 const char * mnem;
8443 enum rtx_code code = GET_CODE (op);
8445 if (GET_CODE (XEXP (op, 1)) == REG || GET_CODE (XEXP (op, 1)) == SUBREG)
8446 *amountp = -1;
8447 else if (GET_CODE (XEXP (op, 1)) == CONST_INT)
8448 *amountp = INTVAL (XEXP (op, 1));
8449 else
8450 abort ();
8452 switch (code)
8454 case ASHIFT:
8455 mnem = "asl";
8456 break;
8458 case ASHIFTRT:
8459 mnem = "asr";
8460 break;
8462 case LSHIFTRT:
8463 mnem = "lsr";
8464 break;
8466 case ROTATE:
8467 if (*amountp == -1)
8468 abort ();
8469 *amountp = 32 - *amountp;
8471 /* Fall through. */
8473 case ROTATERT:
8474 mnem = "ror";
8475 break;
8477 case MULT:
8478 /* We never have to worry about the amount being other than a
8479 power of 2, since this case can never be reloaded from a reg. */
8480 if (*amountp != -1)
8481 *amountp = int_log2 (*amountp);
8482 else
8483 abort ();
8484 return "asl";
8486 default:
8487 abort ();
8490 if (*amountp != -1)
8492 /* This is not 100% correct, but follows from the desire to merge
8493 multiplication by a power of 2 with the recognizer for a
8494 shift. >=32 is not a valid shift for "asl", so we must try and
8495 output a shift that produces the correct arithmetical result.
8496 Using lsr #32 is identical except for the fact that the carry bit
8497 is not set correctly if we set the flags; but we never use the
8498 carry bit from such an operation, so we can ignore that. */
8499 if (code == ROTATERT)
8500 /* Rotate is just modulo 32. */
8501 *amountp &= 31;
8502 else if (*amountp != (*amountp & 31))
8504 if (code == ASHIFT)
8505 mnem = "lsr";
8506 *amountp = 32;
8509 /* Shifts of 0 are no-ops. */
8510 if (*amountp == 0)
8511 return NULL;
8514 return mnem;
8517 /* Obtain the shift from the POWER of two. */
8519 static HOST_WIDE_INT
8520 int_log2 (HOST_WIDE_INT power)
8522 HOST_WIDE_INT shift = 0;
8524 while ((((HOST_WIDE_INT) 1 << shift) & power) == 0)
8526 if (shift > 31)
8527 abort ();
8528 shift++;
8531 return shift;
8534 /* Output a .ascii pseudo-op, keeping track of lengths. This is
8535 because /bin/as is horribly restrictive. The judgement about
8536 whether or not each character is 'printable' (and can be output as
8537 is) or not (and must be printed with an octal escape) must be made
8538 with reference to the *host* character set -- the situation is
8539 similar to that discussed in the comments above pp_c_char in
8540 c-pretty-print.c. */
8542 #define MAX_ASCII_LEN 51
8544 void
8545 output_ascii_pseudo_op (FILE *stream, const unsigned char *p, int len)
8547 int i;
8548 int len_so_far = 0;
8550 fputs ("\t.ascii\t\"", stream);
8552 for (i = 0; i < len; i++)
8554 int c = p[i];
8556 if (len_so_far >= MAX_ASCII_LEN)
8558 fputs ("\"\n\t.ascii\t\"", stream);
8559 len_so_far = 0;
8562 if (ISPRINT (c))
8564 if (c == '\\' || c == '\"')
8566 putc ('\\', stream);
8567 len_so_far++;
8569 putc (c, stream);
8570 len_so_far++;
8572 else
8574 fprintf (stream, "\\%03o", c);
8575 len_so_far += 4;
8579 fputs ("\"\n", stream);
8582 /* Compute the register save mask for registers 0 through 12
8583 inclusive. This code is used by arm_compute_save_reg_mask. */
8585 static unsigned long
8586 arm_compute_save_reg0_reg12_mask (void)
8588 unsigned long func_type = arm_current_func_type ();
8589 unsigned long save_reg_mask = 0;
8590 unsigned int reg;
8592 if (IS_INTERRUPT (func_type))
8594 unsigned int max_reg;
8595 /* Interrupt functions must not corrupt any registers,
8596 even call clobbered ones. If this is a leaf function
8597 we can just examine the registers used by the RTL, but
8598 otherwise we have to assume that whatever function is
8599 called might clobber anything, and so we have to save
8600 all the call-clobbered registers as well. */
8601 if (ARM_FUNC_TYPE (func_type) == ARM_FT_FIQ)
8602 /* FIQ handlers have registers r8 - r12 banked, so
8603 we only need to check r0 - r7, Normal ISRs only
8604 bank r14 and r15, so we must check up to r12.
8605 r13 is the stack pointer which is always preserved,
8606 so we do not need to consider it here. */
8607 max_reg = 7;
8608 else
8609 max_reg = 12;
8611 for (reg = 0; reg <= max_reg; reg++)
8612 if (regs_ever_live[reg]
8613 || (! current_function_is_leaf && call_used_regs [reg]))
8614 save_reg_mask |= (1 << reg);
8616 /* Also save the pic base register if necessary. */
8617 if (flag_pic
8618 && !TARGET_SINGLE_PIC_BASE
8619 && current_function_uses_pic_offset_table)
8620 save_reg_mask |= 1 << PIC_OFFSET_TABLE_REGNUM;
8622 else
8624 /* In the normal case we only need to save those registers
8625 which are call saved and which are used by this function. */
8626 for (reg = 0; reg <= 10; reg++)
8627 if (regs_ever_live[reg] && ! call_used_regs [reg])
8628 save_reg_mask |= (1 << reg);
8630 /* Handle the frame pointer as a special case. */
8631 if (! TARGET_APCS_FRAME
8632 && ! frame_pointer_needed
8633 && regs_ever_live[HARD_FRAME_POINTER_REGNUM]
8634 && ! call_used_regs[HARD_FRAME_POINTER_REGNUM])
8635 save_reg_mask |= 1 << HARD_FRAME_POINTER_REGNUM;
8637 /* If we aren't loading the PIC register,
8638 don't stack it even though it may be live. */
8639 if (flag_pic
8640 && !TARGET_SINGLE_PIC_BASE
8641 && (regs_ever_live[PIC_OFFSET_TABLE_REGNUM]
8642 || current_function_uses_pic_offset_table))
8643 save_reg_mask |= 1 << PIC_OFFSET_TABLE_REGNUM;
8646 /* Save registers so the exception handler can modify them. */
8647 if (current_function_calls_eh_return)
8649 unsigned int i;
8651 for (i = 0; ; i++)
8653 reg = EH_RETURN_DATA_REGNO (i);
8654 if (reg == INVALID_REGNUM)
8655 break;
8656 save_reg_mask |= 1 << reg;
8660 return save_reg_mask;
8663 /* Compute a bit mask of which registers need to be
8664 saved on the stack for the current function. */
8666 static unsigned long
8667 arm_compute_save_reg_mask (void)
8669 unsigned int save_reg_mask = 0;
8670 unsigned long func_type = arm_current_func_type ();
8672 if (IS_NAKED (func_type))
8673 /* This should never really happen. */
8674 return 0;
8676 /* If we are creating a stack frame, then we must save the frame pointer,
8677 IP (which will hold the old stack pointer), LR and the PC. */
8678 if (frame_pointer_needed)
8679 save_reg_mask |=
8680 (1 << ARM_HARD_FRAME_POINTER_REGNUM)
8681 | (1 << IP_REGNUM)
8682 | (1 << LR_REGNUM)
8683 | (1 << PC_REGNUM);
8685 /* Volatile functions do not return, so there
8686 is no need to save any other registers. */
8687 if (IS_VOLATILE (func_type))
8688 return save_reg_mask;
8690 save_reg_mask |= arm_compute_save_reg0_reg12_mask ();
8692 /* Decide if we need to save the link register.
8693 Interrupt routines have their own banked link register,
8694 so they never need to save it.
8695 Otherwise if we do not use the link register we do not need to save
8696 it. If we are pushing other registers onto the stack however, we
8697 can save an instruction in the epilogue by pushing the link register
8698 now and then popping it back into the PC. This incurs extra memory
8699 accesses though, so we only do it when optimizing for size, and only
8700 if we know that we will not need a fancy return sequence. */
8701 if (regs_ever_live [LR_REGNUM]
8702 || (save_reg_mask
8703 && optimize_size
8704 && ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
8705 && !current_function_calls_eh_return))
8706 save_reg_mask |= 1 << LR_REGNUM;
8708 if (cfun->machine->lr_save_eliminated)
8709 save_reg_mask &= ~ (1 << LR_REGNUM);
8711 if (TARGET_REALLY_IWMMXT
8712 && ((bit_count (save_reg_mask)
8713 + ARM_NUM_INTS (current_function_pretend_args_size)) % 2) != 0)
8715 unsigned int reg;
8717 /* The total number of registers that are going to be pushed
8718 onto the stack is odd. We need to ensure that the stack
8719 is 64-bit aligned before we start to save iWMMXt registers,
8720 and also before we start to create locals. (A local variable
8721 might be a double or long long which we will load/store using
8722 an iWMMXt instruction). Therefore we need to push another
8723 ARM register, so that the stack will be 64-bit aligned. We
8724 try to avoid using the arg registers (r0 -r3) as they might be
8725 used to pass values in a tail call. */
8726 for (reg = 4; reg <= 12; reg++)
8727 if ((save_reg_mask & (1 << reg)) == 0)
8728 break;
8730 if (reg <= 12)
8731 save_reg_mask |= (1 << reg);
8732 else
8734 cfun->machine->sibcall_blocked = 1;
8735 save_reg_mask |= (1 << 3);
8739 return save_reg_mask;
8743 /* Compute a bit mask of which registers need to be
8744 saved on the stack for the current function. */
8745 static unsigned long
8746 thumb_compute_save_reg_mask (void)
8748 unsigned long mask;
8749 unsigned reg;
8751 mask = 0;
8752 for (reg = 0; reg < 12; reg ++)
8753 if (regs_ever_live[reg] && !call_used_regs[reg])
8754 mask |= 1 << reg;
8756 if (flag_pic && !TARGET_SINGLE_PIC_BASE)
8757 mask |= (1 << PIC_OFFSET_TABLE_REGNUM);
8759 if (TARGET_SINGLE_PIC_BASE)
8760 mask &= ~(1 << arm_pic_register);
8762 /* See if we might need r11 for calls to _interwork_r11_call_via_rN(). */
8763 if (!frame_pointer_needed && CALLER_INTERWORKING_SLOT_SIZE > 0)
8764 mask |= 1 << ARM_HARD_FRAME_POINTER_REGNUM;
8766 /* LR will also be pushed if any lo regs are pushed. */
8767 if (mask & 0xff || thumb_force_lr_save ())
8768 mask |= (1 << LR_REGNUM);
8770 /* Make sure we have a low work register if we need one.
8771 We will need one if we are going to push a high register,
8772 but we are not currently intending to push a low register. */
8773 if ((mask & 0xff) == 0
8774 && ((mask & 0x0f00) || TARGET_BACKTRACE))
8776 /* Use thumb_find_work_register to choose which register
8777 we will use. If the register is live then we will
8778 have to push it. Use LAST_LO_REGNUM as our fallback
8779 choice for the register to select. */
8780 reg = thumb_find_work_register (1 << LAST_LO_REGNUM);
8782 if (! call_used_regs[reg])
8783 mask |= 1 << reg;
8786 return mask;
8790 /* Return the number of bytes required to save VFP registers. */
8791 static int
8792 arm_get_vfp_saved_size (void)
8794 unsigned int regno;
8795 int count;
8796 int saved;
8798 saved = 0;
8799 /* Space for saved VFP registers. */
8800 if (TARGET_HARD_FLOAT && TARGET_VFP)
8802 count = 0;
8803 for (regno = FIRST_VFP_REGNUM;
8804 regno < LAST_VFP_REGNUM;
8805 regno += 2)
8807 if ((!regs_ever_live[regno] || call_used_regs[regno])
8808 && (!regs_ever_live[regno + 1] || call_used_regs[regno + 1]))
8810 if (count > 0)
8812 /* Workaround ARM10 VFPr1 bug. */
8813 if (count == 2 && !arm_arch6)
8814 count++;
8815 saved += count * 8 + 4;
8817 count = 0;
8819 else
8820 count++;
8822 if (count > 0)
8824 if (count == 2 && !arm_arch6)
8825 count++;
8826 saved += count * 8 + 4;
8829 return saved;
8833 /* Generate a function exit sequence. If REALLY_RETURN is false, then do
8834 everything bar the final return instruction. */
8835 const char *
8836 output_return_instruction (rtx operand, int really_return, int reverse)
8838 char conditional[10];
8839 char instr[100];
8840 unsigned reg;
8841 unsigned long live_regs_mask;
8842 unsigned long func_type;
8843 arm_stack_offsets *offsets;
8845 func_type = arm_current_func_type ();
8847 if (IS_NAKED (func_type))
8848 return "";
8850 if (IS_VOLATILE (func_type) && TARGET_ABORT_NORETURN)
8852 /* If this function was declared non-returning, and we have
8853 found a tail call, then we have to trust that the called
8854 function won't return. */
8855 if (really_return)
8857 rtx ops[2];
8859 /* Otherwise, trap an attempted return by aborting. */
8860 ops[0] = operand;
8861 ops[1] = gen_rtx_SYMBOL_REF (Pmode, NEED_PLT_RELOC ? "abort(PLT)"
8862 : "abort");
8863 assemble_external_libcall (ops[1]);
8864 output_asm_insn (reverse ? "bl%D0\t%a1" : "bl%d0\t%a1", ops);
8867 return "";
8870 if (current_function_calls_alloca && !really_return)
8871 abort ();
8873 sprintf (conditional, "%%?%%%c0", reverse ? 'D' : 'd');
8875 return_used_this_function = 1;
8877 live_regs_mask = arm_compute_save_reg_mask ();
8879 if (live_regs_mask)
8881 const char * return_reg;
8883 /* If we do not have any special requirements for function exit
8884 (e.g. interworking, or ISR) then we can load the return address
8885 directly into the PC. Otherwise we must load it into LR. */
8886 if (really_return
8887 && ! TARGET_INTERWORK)
8888 return_reg = reg_names[PC_REGNUM];
8889 else
8890 return_reg = reg_names[LR_REGNUM];
8892 if ((live_regs_mask & (1 << IP_REGNUM)) == (1 << IP_REGNUM))
8894 /* There are three possible reasons for the IP register
8895 being saved. 1) a stack frame was created, in which case
8896 IP contains the old stack pointer, or 2) an ISR routine
8897 corrupted it, or 3) it was saved to align the stack on
8898 iWMMXt. In case 1, restore IP into SP, otherwise just
8899 restore IP. */
8900 if (frame_pointer_needed)
8902 live_regs_mask &= ~ (1 << IP_REGNUM);
8903 live_regs_mask |= (1 << SP_REGNUM);
8905 else
8907 if (! IS_INTERRUPT (func_type)
8908 && ! TARGET_REALLY_IWMMXT)
8909 abort ();
8913 /* On some ARM architectures it is faster to use LDR rather than
8914 LDM to load a single register. On other architectures, the
8915 cost is the same. In 26 bit mode, or for exception handlers,
8916 we have to use LDM to load the PC so that the CPSR is also
8917 restored. */
8918 for (reg = 0; reg <= LAST_ARM_REGNUM; reg++)
8919 if (live_regs_mask == (1U << reg))
8920 break;
8922 if (reg <= LAST_ARM_REGNUM
8923 && (reg != LR_REGNUM
8924 || ! really_return
8925 || ! IS_INTERRUPT (func_type)))
8927 sprintf (instr, "ldr%s\t%%|%s, [%%|sp], #4", conditional,
8928 (reg == LR_REGNUM) ? return_reg : reg_names[reg]);
8930 else
8932 char *p;
8933 int first = 1;
8935 /* Generate the load multiple instruction to restore the
8936 registers. Note we can get here, even if
8937 frame_pointer_needed is true, but only if sp already
8938 points to the base of the saved core registers. */
8939 if (live_regs_mask & (1 << SP_REGNUM))
8941 unsigned HOST_WIDE_INT stack_adjust;
8943 offsets = arm_get_frame_offsets ();
8944 stack_adjust = offsets->outgoing_args - offsets->saved_regs;
8945 if (stack_adjust != 0 && stack_adjust != 4)
8946 abort ();
8948 if (stack_adjust && arm_arch5)
8949 sprintf (instr, "ldm%sib\t%%|sp, {", conditional);
8950 else
8952 /* If we can't use ldmib (SA110 bug),
8953 then try to pop r3 instead. */
8954 if (stack_adjust)
8955 live_regs_mask |= 1 << 3;
8956 sprintf (instr, "ldm%sfd\t%%|sp, {", conditional);
8959 else
8960 sprintf (instr, "ldm%sfd\t%%|sp!, {", conditional);
8962 p = instr + strlen (instr);
8964 for (reg = 0; reg <= SP_REGNUM; reg++)
8965 if (live_regs_mask & (1 << reg))
8967 int l = strlen (reg_names[reg]);
8969 if (first)
8970 first = 0;
8971 else
8973 memcpy (p, ", ", 2);
8974 p += 2;
8977 memcpy (p, "%|", 2);
8978 memcpy (p + 2, reg_names[reg], l);
8979 p += l + 2;
8982 if (live_regs_mask & (1 << LR_REGNUM))
8984 sprintf (p, "%s%%|%s}", first ? "" : ", ", return_reg);
8985 /* If returning from an interrupt, restore the CPSR. */
8986 if (IS_INTERRUPT (func_type))
8987 strcat (p, "^");
8989 else
8990 strcpy (p, "}");
8993 output_asm_insn (instr, & operand);
8995 /* See if we need to generate an extra instruction to
8996 perform the actual function return. */
8997 if (really_return
8998 && func_type != ARM_FT_INTERWORKED
8999 && (live_regs_mask & (1 << LR_REGNUM)) != 0)
9001 /* The return has already been handled
9002 by loading the LR into the PC. */
9003 really_return = 0;
9007 if (really_return)
9009 switch ((int) ARM_FUNC_TYPE (func_type))
9011 case ARM_FT_ISR:
9012 case ARM_FT_FIQ:
9013 sprintf (instr, "sub%ss\t%%|pc, %%|lr, #4", conditional);
9014 break;
9016 case ARM_FT_INTERWORKED:
9017 sprintf (instr, "bx%s\t%%|lr", conditional);
9018 break;
9020 case ARM_FT_EXCEPTION:
9021 sprintf (instr, "mov%ss\t%%|pc, %%|lr", conditional);
9022 break;
9024 default:
9025 /* Use bx if it's available. */
9026 if (arm_arch5 || arm_arch4t)
9027 sprintf (instr, "bx%s\t%%|lr", conditional);
9028 else
9029 sprintf (instr, "mov%s\t%%|pc, %%|lr", conditional);
9030 break;
9033 output_asm_insn (instr, & operand);
9036 return "";
9039 /* Write the function name into the code section, directly preceding
9040 the function prologue.
9042 Code will be output similar to this:
9044 .ascii "arm_poke_function_name", 0
9045 .align
9047 .word 0xff000000 + (t1 - t0)
9048 arm_poke_function_name
9049 mov ip, sp
9050 stmfd sp!, {fp, ip, lr, pc}
9051 sub fp, ip, #4
9053 When performing a stack backtrace, code can inspect the value
9054 of 'pc' stored at 'fp' + 0. If the trace function then looks
9055 at location pc - 12 and the top 8 bits are set, then we know
9056 that there is a function name embedded immediately preceding this
9057 location and has length ((pc[-3]) & 0xff000000).
9059 We assume that pc is declared as a pointer to an unsigned long.
9061 It is of no benefit to output the function name if we are assembling
9062 a leaf function. These function types will not contain a stack
9063 backtrace structure, therefore it is not possible to determine the
9064 function name. */
9065 void
9066 arm_poke_function_name (FILE *stream, const char *name)
9068 unsigned long alignlength;
9069 unsigned long length;
9070 rtx x;
9072 length = strlen (name) + 1;
9073 alignlength = ROUND_UP_WORD (length);
9075 ASM_OUTPUT_ASCII (stream, name, length);
9076 ASM_OUTPUT_ALIGN (stream, 2);
9077 x = GEN_INT ((unsigned HOST_WIDE_INT) 0xff000000 + alignlength);
9078 assemble_aligned_integer (UNITS_PER_WORD, x);
9081 /* Place some comments into the assembler stream
9082 describing the current function. */
9083 static void
9084 arm_output_function_prologue (FILE *f, HOST_WIDE_INT frame_size)
9086 unsigned long func_type;
9088 if (!TARGET_ARM)
9090 thumb_output_function_prologue (f, frame_size);
9091 return;
9094 /* Sanity check. */
9095 if (arm_ccfsm_state || arm_target_insn)
9096 abort ();
9098 func_type = arm_current_func_type ();
9100 switch ((int) ARM_FUNC_TYPE (func_type))
9102 default:
9103 case ARM_FT_NORMAL:
9104 break;
9105 case ARM_FT_INTERWORKED:
9106 asm_fprintf (f, "\t%@ Function supports interworking.\n");
9107 break;
9108 case ARM_FT_ISR:
9109 asm_fprintf (f, "\t%@ Interrupt Service Routine.\n");
9110 break;
9111 case ARM_FT_FIQ:
9112 asm_fprintf (f, "\t%@ Fast Interrupt Service Routine.\n");
9113 break;
9114 case ARM_FT_EXCEPTION:
9115 asm_fprintf (f, "\t%@ ARM Exception Handler.\n");
9116 break;
9119 if (IS_NAKED (func_type))
9120 asm_fprintf (f, "\t%@ Naked Function: prologue and epilogue provided by programmer.\n");
9122 if (IS_VOLATILE (func_type))
9123 asm_fprintf (f, "\t%@ Volatile: function does not return.\n");
9125 if (IS_NESTED (func_type))
9126 asm_fprintf (f, "\t%@ Nested: function declared inside another function.\n");
9128 asm_fprintf (f, "\t%@ args = %d, pretend = %d, frame = %wd\n",
9129 current_function_args_size,
9130 current_function_pretend_args_size, frame_size);
9132 asm_fprintf (f, "\t%@ frame_needed = %d, uses_anonymous_args = %d\n",
9133 frame_pointer_needed,
9134 cfun->machine->uses_anonymous_args);
9136 if (cfun->machine->lr_save_eliminated)
9137 asm_fprintf (f, "\t%@ link register save eliminated.\n");
9139 if (current_function_calls_eh_return)
9140 asm_fprintf (f, "\t@ Calls __builtin_eh_return.\n");
9142 #ifdef AOF_ASSEMBLER
9143 if (flag_pic)
9144 asm_fprintf (f, "\tmov\t%r, %r\n", IP_REGNUM, PIC_OFFSET_TABLE_REGNUM);
9145 #endif
9147 return_used_this_function = 0;
9150 const char *
9151 arm_output_epilogue (rtx sibling)
9153 int reg;
9154 unsigned long saved_regs_mask;
9155 unsigned long func_type;
9156 /* Floats_offset is the offset from the "virtual" frame. In an APCS
9157 frame that is $fp + 4 for a non-variadic function. */
9158 int floats_offset = 0;
9159 rtx operands[3];
9160 FILE * f = asm_out_file;
9161 unsigned int lrm_count = 0;
9162 int really_return = (sibling == NULL);
9163 int start_reg;
9164 arm_stack_offsets *offsets;
9166 /* If we have already generated the return instruction
9167 then it is futile to generate anything else. */
9168 if (use_return_insn (FALSE, sibling) && return_used_this_function)
9169 return "";
9171 func_type = arm_current_func_type ();
9173 if (IS_NAKED (func_type))
9174 /* Naked functions don't have epilogues. */
9175 return "";
9177 if (IS_VOLATILE (func_type) && TARGET_ABORT_NORETURN)
9179 rtx op;
9181 /* A volatile function should never return. Call abort. */
9182 op = gen_rtx_SYMBOL_REF (Pmode, NEED_PLT_RELOC ? "abort(PLT)" : "abort");
9183 assemble_external_libcall (op);
9184 output_asm_insn ("bl\t%a0", &op);
9186 return "";
9189 if (current_function_calls_eh_return
9190 && ! really_return)
9191 /* If we are throwing an exception, then we really must
9192 be doing a return, so we can't tail-call. */
9193 abort ();
9195 offsets = arm_get_frame_offsets ();
9196 saved_regs_mask = arm_compute_save_reg_mask ();
9198 if (TARGET_IWMMXT)
9199 lrm_count = bit_count (saved_regs_mask);
9201 floats_offset = offsets->saved_args;
9202 /* Compute how far away the floats will be. */
9203 for (reg = 0; reg <= LAST_ARM_REGNUM; reg++)
9204 if (saved_regs_mask & (1 << reg))
9205 floats_offset += 4;
9207 if (frame_pointer_needed)
9209 /* This variable is for the Virtual Frame Pointer, not VFP regs. */
9210 int vfp_offset = offsets->frame;
9212 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
9214 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
9215 if (regs_ever_live[reg] && !call_used_regs[reg])
9217 floats_offset += 12;
9218 asm_fprintf (f, "\tldfe\t%r, [%r, #-%d]\n",
9219 reg, FP_REGNUM, floats_offset - vfp_offset);
9222 else
9224 start_reg = LAST_FPA_REGNUM;
9226 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
9228 if (regs_ever_live[reg] && !call_used_regs[reg])
9230 floats_offset += 12;
9232 /* We can't unstack more than four registers at once. */
9233 if (start_reg - reg == 3)
9235 asm_fprintf (f, "\tlfm\t%r, 4, [%r, #-%d]\n",
9236 reg, FP_REGNUM, floats_offset - vfp_offset);
9237 start_reg = reg - 1;
9240 else
9242 if (reg != start_reg)
9243 asm_fprintf (f, "\tlfm\t%r, %d, [%r, #-%d]\n",
9244 reg + 1, start_reg - reg,
9245 FP_REGNUM, floats_offset - vfp_offset);
9246 start_reg = reg - 1;
9250 /* Just in case the last register checked also needs unstacking. */
9251 if (reg != start_reg)
9252 asm_fprintf (f, "\tlfm\t%r, %d, [%r, #-%d]\n",
9253 reg + 1, start_reg - reg,
9254 FP_REGNUM, floats_offset - vfp_offset);
9257 if (TARGET_HARD_FLOAT && TARGET_VFP)
9259 int saved_size;
9261 /* The fldmx insn does not have base+offset addressing modes,
9262 so we use IP to hold the address. */
9263 saved_size = arm_get_vfp_saved_size ();
9265 if (saved_size > 0)
9267 floats_offset += saved_size;
9268 asm_fprintf (f, "\tsub\t%r, %r, #%d\n", IP_REGNUM,
9269 FP_REGNUM, floats_offset - vfp_offset);
9271 start_reg = FIRST_VFP_REGNUM;
9272 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
9274 if ((!regs_ever_live[reg] || call_used_regs[reg])
9275 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
9277 if (start_reg != reg)
9278 arm_output_fldmx (f, IP_REGNUM,
9279 (start_reg - FIRST_VFP_REGNUM) / 2,
9280 (reg - start_reg) / 2);
9281 start_reg = reg + 2;
9284 if (start_reg != reg)
9285 arm_output_fldmx (f, IP_REGNUM,
9286 (start_reg - FIRST_VFP_REGNUM) / 2,
9287 (reg - start_reg) / 2);
9290 if (TARGET_IWMMXT)
9292 /* The frame pointer is guaranteed to be non-double-word aligned.
9293 This is because it is set to (old_stack_pointer - 4) and the
9294 old_stack_pointer was double word aligned. Thus the offset to
9295 the iWMMXt registers to be loaded must also be non-double-word
9296 sized, so that the resultant address *is* double-word aligned.
9297 We can ignore floats_offset since that was already included in
9298 the live_regs_mask. */
9299 lrm_count += (lrm_count % 2 ? 2 : 1);
9301 for (reg = LAST_IWMMXT_REGNUM; reg >= FIRST_IWMMXT_REGNUM; reg--)
9302 if (regs_ever_live[reg] && !call_used_regs[reg])
9304 asm_fprintf (f, "\twldrd\t%r, [%r, #-%d]\n",
9305 reg, FP_REGNUM, lrm_count * 4);
9306 lrm_count += 2;
9310 /* saved_regs_mask should contain the IP, which at the time of stack
9311 frame generation actually contains the old stack pointer. So a
9312 quick way to unwind the stack is just pop the IP register directly
9313 into the stack pointer. */
9314 if ((saved_regs_mask & (1 << IP_REGNUM)) == 0)
9315 abort ();
9316 saved_regs_mask &= ~ (1 << IP_REGNUM);
9317 saved_regs_mask |= (1 << SP_REGNUM);
9319 /* There are two registers left in saved_regs_mask - LR and PC. We
9320 only need to restore the LR register (the return address), but to
9321 save time we can load it directly into the PC, unless we need a
9322 special function exit sequence, or we are not really returning. */
9323 if (really_return
9324 && ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
9325 && !current_function_calls_eh_return)
9326 /* Delete the LR from the register mask, so that the LR on
9327 the stack is loaded into the PC in the register mask. */
9328 saved_regs_mask &= ~ (1 << LR_REGNUM);
9329 else
9330 saved_regs_mask &= ~ (1 << PC_REGNUM);
9332 /* We must use SP as the base register, because SP is one of the
9333 registers being restored. If an interrupt or page fault
9334 happens in the ldm instruction, the SP might or might not
9335 have been restored. That would be bad, as then SP will no
9336 longer indicate the safe area of stack, and we can get stack
9337 corruption. Using SP as the base register means that it will
9338 be reset correctly to the original value, should an interrupt
9339 occur. If the stack pointer already points at the right
9340 place, then omit the subtraction. */
9341 if (offsets->outgoing_args != (1 + (int) bit_count (saved_regs_mask))
9342 || current_function_calls_alloca)
9343 asm_fprintf (f, "\tsub\t%r, %r, #%d\n", SP_REGNUM, FP_REGNUM,
9344 4 * bit_count (saved_regs_mask));
9345 print_multi_reg (f, "ldmfd\t%r", SP_REGNUM, saved_regs_mask);
9347 if (IS_INTERRUPT (func_type))
9348 /* Interrupt handlers will have pushed the
9349 IP onto the stack, so restore it now. */
9350 print_multi_reg (f, "ldmfd\t%r!", SP_REGNUM, 1 << IP_REGNUM);
9352 else
9354 /* Restore stack pointer if necessary. */
9355 if (offsets->outgoing_args != offsets->saved_regs)
9357 operands[0] = operands[1] = stack_pointer_rtx;
9358 operands[2] = GEN_INT (offsets->outgoing_args - offsets->saved_regs);
9359 output_add_immediate (operands);
9362 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
9364 for (reg = FIRST_FPA_REGNUM; reg <= LAST_FPA_REGNUM; reg++)
9365 if (regs_ever_live[reg] && !call_used_regs[reg])
9366 asm_fprintf (f, "\tldfe\t%r, [%r], #12\n",
9367 reg, SP_REGNUM);
9369 else
9371 start_reg = FIRST_FPA_REGNUM;
9373 for (reg = FIRST_FPA_REGNUM; reg <= LAST_FPA_REGNUM; reg++)
9375 if (regs_ever_live[reg] && !call_used_regs[reg])
9377 if (reg - start_reg == 3)
9379 asm_fprintf (f, "\tlfmfd\t%r, 4, [%r]!\n",
9380 start_reg, SP_REGNUM);
9381 start_reg = reg + 1;
9384 else
9386 if (reg != start_reg)
9387 asm_fprintf (f, "\tlfmfd\t%r, %d, [%r]!\n",
9388 start_reg, reg - start_reg,
9389 SP_REGNUM);
9391 start_reg = reg + 1;
9395 /* Just in case the last register checked also needs unstacking. */
9396 if (reg != start_reg)
9397 asm_fprintf (f, "\tlfmfd\t%r, %d, [%r]!\n",
9398 start_reg, reg - start_reg, SP_REGNUM);
9401 if (TARGET_HARD_FLOAT && TARGET_VFP)
9403 start_reg = FIRST_VFP_REGNUM;
9404 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
9406 if ((!regs_ever_live[reg] || call_used_regs[reg])
9407 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
9409 if (start_reg != reg)
9410 arm_output_fldmx (f, SP_REGNUM,
9411 (start_reg - FIRST_VFP_REGNUM) / 2,
9412 (reg - start_reg) / 2);
9413 start_reg = reg + 2;
9416 if (start_reg != reg)
9417 arm_output_fldmx (f, SP_REGNUM,
9418 (start_reg - FIRST_VFP_REGNUM) / 2,
9419 (reg - start_reg) / 2);
9421 if (TARGET_IWMMXT)
9422 for (reg = FIRST_IWMMXT_REGNUM; reg <= LAST_IWMMXT_REGNUM; reg++)
9423 if (regs_ever_live[reg] && !call_used_regs[reg])
9424 asm_fprintf (f, "\twldrd\t%r, [%r], #8\n", reg, SP_REGNUM);
9426 /* If we can, restore the LR into the PC. */
9427 if (ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
9428 && really_return
9429 && current_function_pretend_args_size == 0
9430 && saved_regs_mask & (1 << LR_REGNUM)
9431 && !current_function_calls_eh_return)
9433 saved_regs_mask &= ~ (1 << LR_REGNUM);
9434 saved_regs_mask |= (1 << PC_REGNUM);
9437 /* Load the registers off the stack. If we only have one register
9438 to load use the LDR instruction - it is faster. */
9439 if (saved_regs_mask == (1 << LR_REGNUM))
9441 asm_fprintf (f, "\tldr\t%r, [%r], #4\n", LR_REGNUM, SP_REGNUM);
9443 else if (saved_regs_mask)
9445 if (saved_regs_mask & (1 << SP_REGNUM))
9446 /* Note - write back to the stack register is not enabled
9447 (i.e. "ldmfd sp!..."). We know that the stack pointer is
9448 in the list of registers and if we add writeback the
9449 instruction becomes UNPREDICTABLE. */
9450 print_multi_reg (f, "ldmfd\t%r", SP_REGNUM, saved_regs_mask);
9451 else
9452 print_multi_reg (f, "ldmfd\t%r!", SP_REGNUM, saved_regs_mask);
9455 if (current_function_pretend_args_size)
9457 /* Unwind the pre-pushed regs. */
9458 operands[0] = operands[1] = stack_pointer_rtx;
9459 operands[2] = GEN_INT (current_function_pretend_args_size);
9460 output_add_immediate (operands);
9464 /* We may have already restored PC directly from the stack. */
9465 if (!really_return || saved_regs_mask & (1 << PC_REGNUM))
9466 return "";
9468 /* Stack adjustment for exception handler. */
9469 if (current_function_calls_eh_return)
9470 asm_fprintf (f, "\tadd\t%r, %r, %r\n", SP_REGNUM, SP_REGNUM,
9471 ARM_EH_STACKADJ_REGNUM);
9473 /* Generate the return instruction. */
9474 switch ((int) ARM_FUNC_TYPE (func_type))
9476 case ARM_FT_ISR:
9477 case ARM_FT_FIQ:
9478 asm_fprintf (f, "\tsubs\t%r, %r, #4\n", PC_REGNUM, LR_REGNUM);
9479 break;
9481 case ARM_FT_EXCEPTION:
9482 asm_fprintf (f, "\tmovs\t%r, %r\n", PC_REGNUM, LR_REGNUM);
9483 break;
9485 case ARM_FT_INTERWORKED:
9486 asm_fprintf (f, "\tbx\t%r\n", LR_REGNUM);
9487 break;
9489 default:
9490 if (arm_arch5 || arm_arch4t)
9491 asm_fprintf (f, "\tbx\t%r\n", LR_REGNUM);
9492 else
9493 asm_fprintf (f, "\tmov\t%r, %r\n", PC_REGNUM, LR_REGNUM);
9494 break;
9497 return "";
9500 static void
9501 arm_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
9502 HOST_WIDE_INT frame_size ATTRIBUTE_UNUSED)
9504 arm_stack_offsets *offsets;
9506 if (TARGET_THUMB)
9508 int regno;
9510 /* Emit any call-via-reg trampolines that are needed for v4t support
9511 of call_reg and call_value_reg type insns. */
9512 for (regno = 0; regno < LR_REGNUM; regno++)
9514 rtx label = cfun->machine->call_via[regno];
9516 if (label != NULL)
9518 function_section (current_function_decl);
9519 targetm.asm_out.internal_label (asm_out_file, "L",
9520 CODE_LABEL_NUMBER (label));
9521 asm_fprintf (asm_out_file, "\tbx\t%r\n", regno);
9525 /* ??? Probably not safe to set this here, since it assumes that a
9526 function will be emitted as assembly immediately after we generate
9527 RTL for it. This does not happen for inline functions. */
9528 return_used_this_function = 0;
9530 else
9532 /* We need to take into account any stack-frame rounding. */
9533 offsets = arm_get_frame_offsets ();
9535 if (use_return_insn (FALSE, NULL)
9536 && return_used_this_function
9537 && offsets->saved_regs != offsets->outgoing_args
9538 && !frame_pointer_needed)
9539 abort ();
9541 /* Reset the ARM-specific per-function variables. */
9542 after_arm_reorg = 0;
9546 /* Generate and emit an insn that we will recognize as a push_multi.
9547 Unfortunately, since this insn does not reflect very well the actual
9548 semantics of the operation, we need to annotate the insn for the benefit
9549 of DWARF2 frame unwind information. */
9550 static rtx
9551 emit_multi_reg_push (unsigned long mask)
9553 int num_regs = 0;
9554 int num_dwarf_regs;
9555 int i, j;
9556 rtx par;
9557 rtx dwarf;
9558 int dwarf_par_index;
9559 rtx tmp, reg;
9561 for (i = 0; i <= LAST_ARM_REGNUM; i++)
9562 if (mask & (1 << i))
9563 num_regs++;
9565 if (num_regs == 0 || num_regs > 16)
9566 abort ();
9568 /* We don't record the PC in the dwarf frame information. */
9569 num_dwarf_regs = num_regs;
9570 if (mask & (1 << PC_REGNUM))
9571 num_dwarf_regs--;
9573 /* For the body of the insn we are going to generate an UNSPEC in
9574 parallel with several USEs. This allows the insn to be recognized
9575 by the push_multi pattern in the arm.md file. The insn looks
9576 something like this:
9578 (parallel [
9579 (set (mem:BLK (pre_dec:BLK (reg:SI sp)))
9580 (unspec:BLK [(reg:SI r4)] UNSPEC_PUSH_MULT))
9581 (use (reg:SI 11 fp))
9582 (use (reg:SI 12 ip))
9583 (use (reg:SI 14 lr))
9584 (use (reg:SI 15 pc))
9587 For the frame note however, we try to be more explicit and actually
9588 show each register being stored into the stack frame, plus a (single)
9589 decrement of the stack pointer. We do it this way in order to be
9590 friendly to the stack unwinding code, which only wants to see a single
9591 stack decrement per instruction. The RTL we generate for the note looks
9592 something like this:
9594 (sequence [
9595 (set (reg:SI sp) (plus:SI (reg:SI sp) (const_int -20)))
9596 (set (mem:SI (reg:SI sp)) (reg:SI r4))
9597 (set (mem:SI (plus:SI (reg:SI sp) (const_int 4))) (reg:SI fp))
9598 (set (mem:SI (plus:SI (reg:SI sp) (const_int 8))) (reg:SI ip))
9599 (set (mem:SI (plus:SI (reg:SI sp) (const_int 12))) (reg:SI lr))
9602 This sequence is used both by the code to support stack unwinding for
9603 exceptions handlers and the code to generate dwarf2 frame debugging. */
9605 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (num_regs));
9606 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (num_dwarf_regs + 1));
9607 dwarf_par_index = 1;
9609 for (i = 0; i <= LAST_ARM_REGNUM; i++)
9611 if (mask & (1 << i))
9613 reg = gen_rtx_REG (SImode, i);
9615 XVECEXP (par, 0, 0)
9616 = gen_rtx_SET (VOIDmode,
9617 gen_rtx_MEM (BLKmode,
9618 gen_rtx_PRE_DEC (BLKmode,
9619 stack_pointer_rtx)),
9620 gen_rtx_UNSPEC (BLKmode,
9621 gen_rtvec (1, reg),
9622 UNSPEC_PUSH_MULT));
9624 if (i != PC_REGNUM)
9626 tmp = gen_rtx_SET (VOIDmode,
9627 gen_rtx_MEM (SImode, stack_pointer_rtx),
9628 reg);
9629 RTX_FRAME_RELATED_P (tmp) = 1;
9630 XVECEXP (dwarf, 0, dwarf_par_index) = tmp;
9631 dwarf_par_index++;
9634 break;
9638 for (j = 1, i++; j < num_regs; i++)
9640 if (mask & (1 << i))
9642 reg = gen_rtx_REG (SImode, i);
9644 XVECEXP (par, 0, j) = gen_rtx_USE (VOIDmode, reg);
9646 if (i != PC_REGNUM)
9648 tmp = gen_rtx_SET (VOIDmode,
9649 gen_rtx_MEM (SImode,
9650 plus_constant (stack_pointer_rtx,
9651 4 * j)),
9652 reg);
9653 RTX_FRAME_RELATED_P (tmp) = 1;
9654 XVECEXP (dwarf, 0, dwarf_par_index++) = tmp;
9657 j++;
9661 par = emit_insn (par);
9663 tmp = gen_rtx_SET (SImode,
9664 stack_pointer_rtx,
9665 gen_rtx_PLUS (SImode,
9666 stack_pointer_rtx,
9667 GEN_INT (-4 * num_regs)));
9668 RTX_FRAME_RELATED_P (tmp) = 1;
9669 XVECEXP (dwarf, 0, 0) = tmp;
9671 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
9672 REG_NOTES (par));
9673 return par;
9676 static rtx
9677 emit_sfm (int base_reg, int count)
9679 rtx par;
9680 rtx dwarf;
9681 rtx tmp, reg;
9682 int i;
9684 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
9685 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (count + 1));
9687 reg = gen_rtx_REG (XFmode, base_reg++);
9689 XVECEXP (par, 0, 0)
9690 = gen_rtx_SET (VOIDmode,
9691 gen_rtx_MEM (BLKmode,
9692 gen_rtx_PRE_DEC (BLKmode, stack_pointer_rtx)),
9693 gen_rtx_UNSPEC (BLKmode,
9694 gen_rtvec (1, reg),
9695 UNSPEC_PUSH_MULT));
9696 tmp = gen_rtx_SET (VOIDmode,
9697 gen_rtx_MEM (XFmode, stack_pointer_rtx), reg);
9698 RTX_FRAME_RELATED_P (tmp) = 1;
9699 XVECEXP (dwarf, 0, 1) = tmp;
9701 for (i = 1; i < count; i++)
9703 reg = gen_rtx_REG (XFmode, base_reg++);
9704 XVECEXP (par, 0, i) = gen_rtx_USE (VOIDmode, reg);
9706 tmp = gen_rtx_SET (VOIDmode,
9707 gen_rtx_MEM (XFmode,
9708 plus_constant (stack_pointer_rtx,
9709 i * 12)),
9710 reg);
9711 RTX_FRAME_RELATED_P (tmp) = 1;
9712 XVECEXP (dwarf, 0, i + 1) = tmp;
9715 tmp = gen_rtx_SET (VOIDmode,
9716 stack_pointer_rtx,
9717 gen_rtx_PLUS (SImode,
9718 stack_pointer_rtx,
9719 GEN_INT (-12 * count)));
9720 RTX_FRAME_RELATED_P (tmp) = 1;
9721 XVECEXP (dwarf, 0, 0) = tmp;
9723 par = emit_insn (par);
9724 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
9725 REG_NOTES (par));
9726 return par;
9730 /* Return true if the current function needs to save/restore LR. */
9732 static bool
9733 thumb_force_lr_save (void)
9735 return !cfun->machine->lr_save_eliminated
9736 && (!leaf_function_p ()
9737 || thumb_far_jump_used_p ()
9738 || regs_ever_live [LR_REGNUM]);
9742 /* Compute the distance from register FROM to register TO.
9743 These can be the arg pointer (26), the soft frame pointer (25),
9744 the stack pointer (13) or the hard frame pointer (11).
9745 In thumb mode r7 is used as the soft frame pointer, if needed.
9746 Typical stack layout looks like this:
9748 old stack pointer -> | |
9749 ----
9750 | | \
9751 | | saved arguments for
9752 | | vararg functions
9753 | | /
9755 hard FP & arg pointer -> | | \
9756 | | stack
9757 | | frame
9758 | | /
9760 | | \
9761 | | call saved
9762 | | registers
9763 soft frame pointer -> | | /
9765 | | \
9766 | | local
9767 | | variables
9768 | | /
9770 | | \
9771 | | outgoing
9772 | | arguments
9773 current stack pointer -> | | /
9776 For a given function some or all of these stack components
9777 may not be needed, giving rise to the possibility of
9778 eliminating some of the registers.
9780 The values returned by this function must reflect the behavior
9781 of arm_expand_prologue() and arm_compute_save_reg_mask().
9783 The sign of the number returned reflects the direction of stack
9784 growth, so the values are positive for all eliminations except
9785 from the soft frame pointer to the hard frame pointer.
9787 SFP may point just inside the local variables block to ensure correct
9788 alignment. */
9791 /* Calculate stack offsets. These are used to calculate register elimination
9792 offsets and in prologue/epilogue code. */
9794 static arm_stack_offsets *
9795 arm_get_frame_offsets (void)
9797 struct arm_stack_offsets *offsets;
9798 unsigned long func_type;
9799 int leaf;
9800 int saved;
9801 HOST_WIDE_INT frame_size;
9803 offsets = &cfun->machine->stack_offsets;
9805 /* We need to know if we are a leaf function. Unfortunately, it
9806 is possible to be called after start_sequence has been called,
9807 which causes get_insns to return the insns for the sequence,
9808 not the function, which will cause leaf_function_p to return
9809 the incorrect result.
9811 to know about leaf functions once reload has completed, and the
9812 frame size cannot be changed after that time, so we can safely
9813 use the cached value. */
9815 if (reload_completed)
9816 return offsets;
9818 /* Initially this is the size of the local variables. It will translated
9819 into an offset once we have determined the size of preceding data. */
9820 frame_size = ROUND_UP_WORD (get_frame_size ());
9822 leaf = leaf_function_p ();
9824 /* Space for variadic functions. */
9825 offsets->saved_args = current_function_pretend_args_size;
9827 offsets->frame = offsets->saved_args + (frame_pointer_needed ? 4 : 0);
9829 if (TARGET_ARM)
9831 unsigned int regno;
9833 saved = bit_count (arm_compute_save_reg_mask ()) * 4;
9835 /* We know that SP will be doubleword aligned on entry, and we must
9836 preserve that condition at any subroutine call. We also require the
9837 soft frame pointer to be doubleword aligned. */
9839 if (TARGET_REALLY_IWMMXT)
9841 /* Check for the call-saved iWMMXt registers. */
9842 for (regno = FIRST_IWMMXT_REGNUM;
9843 regno <= LAST_IWMMXT_REGNUM;
9844 regno++)
9845 if (regs_ever_live [regno] && ! call_used_regs [regno])
9846 saved += 8;
9849 func_type = arm_current_func_type ();
9850 if (! IS_VOLATILE (func_type))
9852 /* Space for saved FPA registers. */
9853 for (regno = FIRST_FPA_REGNUM; regno <= LAST_FPA_REGNUM; regno++)
9854 if (regs_ever_live[regno] && ! call_used_regs[regno])
9855 saved += 12;
9857 /* Space for saved VFP registers. */
9858 if (TARGET_HARD_FLOAT && TARGET_VFP)
9859 saved += arm_get_vfp_saved_size ();
9862 else /* TARGET_THUMB */
9864 saved = bit_count (thumb_compute_save_reg_mask ()) * 4;
9865 if (TARGET_BACKTRACE)
9866 saved += 16;
9869 /* Saved registers include the stack frame. */
9870 offsets->saved_regs = offsets->saved_args + saved;
9871 offsets->soft_frame = offsets->saved_regs + CALLER_INTERWORKING_SLOT_SIZE;
9872 /* A leaf function does not need any stack alignment if it has nothing
9873 on the stack. */
9874 if (leaf && frame_size == 0)
9876 offsets->outgoing_args = offsets->soft_frame;
9877 return offsets;
9880 /* Ensure SFP has the correct alignment. */
9881 if (ARM_DOUBLEWORD_ALIGN
9882 && (offsets->soft_frame & 7))
9883 offsets->soft_frame += 4;
9885 offsets->outgoing_args = offsets->soft_frame + frame_size
9886 + current_function_outgoing_args_size;
9888 if (ARM_DOUBLEWORD_ALIGN)
9890 /* Ensure SP remains doubleword aligned. */
9891 if (offsets->outgoing_args & 7)
9892 offsets->outgoing_args += 4;
9893 if (offsets->outgoing_args & 7)
9894 abort ();
9897 return offsets;
9901 /* Calculate the relative offsets for the different stack pointers. Positive
9902 offsets are in the direction of stack growth. */
9904 HOST_WIDE_INT
9905 arm_compute_initial_elimination_offset (unsigned int from, unsigned int to)
9907 arm_stack_offsets *offsets;
9909 offsets = arm_get_frame_offsets ();
9911 /* OK, now we have enough information to compute the distances.
9912 There must be an entry in these switch tables for each pair
9913 of registers in ELIMINABLE_REGS, even if some of the entries
9914 seem to be redundant or useless. */
9915 switch (from)
9917 case ARG_POINTER_REGNUM:
9918 switch (to)
9920 case THUMB_HARD_FRAME_POINTER_REGNUM:
9921 return 0;
9923 case FRAME_POINTER_REGNUM:
9924 /* This is the reverse of the soft frame pointer
9925 to hard frame pointer elimination below. */
9926 return offsets->soft_frame - offsets->saved_args;
9928 case ARM_HARD_FRAME_POINTER_REGNUM:
9929 /* If there is no stack frame then the hard
9930 frame pointer and the arg pointer coincide. */
9931 if (offsets->frame == offsets->saved_regs)
9932 return 0;
9933 /* FIXME: Not sure about this. Maybe we should always return 0 ? */
9934 return (frame_pointer_needed
9935 && cfun->static_chain_decl != NULL
9936 && ! cfun->machine->uses_anonymous_args) ? 4 : 0;
9938 case STACK_POINTER_REGNUM:
9939 /* If nothing has been pushed on the stack at all
9940 then this will return -4. This *is* correct! */
9941 return offsets->outgoing_args - (offsets->saved_args + 4);
9943 default:
9944 abort ();
9946 break;
9948 case FRAME_POINTER_REGNUM:
9949 switch (to)
9951 case THUMB_HARD_FRAME_POINTER_REGNUM:
9952 return 0;
9954 case ARM_HARD_FRAME_POINTER_REGNUM:
9955 /* The hard frame pointer points to the top entry in the
9956 stack frame. The soft frame pointer to the bottom entry
9957 in the stack frame. If there is no stack frame at all,
9958 then they are identical. */
9960 return offsets->frame - offsets->soft_frame;
9962 case STACK_POINTER_REGNUM:
9963 return offsets->outgoing_args - offsets->soft_frame;
9965 default:
9966 abort ();
9968 break;
9970 default:
9971 /* You cannot eliminate from the stack pointer.
9972 In theory you could eliminate from the hard frame
9973 pointer to the stack pointer, but this will never
9974 happen, since if a stack frame is not needed the
9975 hard frame pointer will never be used. */
9976 abort ();
9981 /* Generate the prologue instructions for entry into an ARM function. */
9982 void
9983 arm_expand_prologue (void)
9985 int reg;
9986 rtx amount;
9987 rtx insn;
9988 rtx ip_rtx;
9989 unsigned long live_regs_mask;
9990 unsigned long func_type;
9991 int fp_offset = 0;
9992 int saved_pretend_args = 0;
9993 int saved_regs = 0;
9994 unsigned HOST_WIDE_INT args_to_push;
9995 arm_stack_offsets *offsets;
9997 func_type = arm_current_func_type ();
9999 /* Naked functions don't have prologues. */
10000 if (IS_NAKED (func_type))
10001 return;
10003 /* Make a copy of c_f_p_a_s as we may need to modify it locally. */
10004 args_to_push = current_function_pretend_args_size;
10006 /* Compute which register we will have to save onto the stack. */
10007 live_regs_mask = arm_compute_save_reg_mask ();
10009 ip_rtx = gen_rtx_REG (SImode, IP_REGNUM);
10011 if (frame_pointer_needed)
10013 if (IS_INTERRUPT (func_type))
10015 /* Interrupt functions must not corrupt any registers.
10016 Creating a frame pointer however, corrupts the IP
10017 register, so we must push it first. */
10018 insn = emit_multi_reg_push (1 << IP_REGNUM);
10020 /* Do not set RTX_FRAME_RELATED_P on this insn.
10021 The dwarf stack unwinding code only wants to see one
10022 stack decrement per function, and this is not it. If
10023 this instruction is labeled as being part of the frame
10024 creation sequence then dwarf2out_frame_debug_expr will
10025 abort when it encounters the assignment of IP to FP
10026 later on, since the use of SP here establishes SP as
10027 the CFA register and not IP.
10029 Anyway this instruction is not really part of the stack
10030 frame creation although it is part of the prologue. */
10032 else if (IS_NESTED (func_type))
10034 /* The Static chain register is the same as the IP register
10035 used as a scratch register during stack frame creation.
10036 To get around this need to find somewhere to store IP
10037 whilst the frame is being created. We try the following
10038 places in order:
10040 1. The last argument register.
10041 2. A slot on the stack above the frame. (This only
10042 works if the function is not a varargs function).
10043 3. Register r3, after pushing the argument registers
10044 onto the stack.
10046 Note - we only need to tell the dwarf2 backend about the SP
10047 adjustment in the second variant; the static chain register
10048 doesn't need to be unwound, as it doesn't contain a value
10049 inherited from the caller. */
10051 if (regs_ever_live[3] == 0)
10053 insn = gen_rtx_REG (SImode, 3);
10054 insn = gen_rtx_SET (SImode, insn, ip_rtx);
10055 insn = emit_insn (insn);
10057 else if (args_to_push == 0)
10059 rtx dwarf;
10060 insn = gen_rtx_PRE_DEC (SImode, stack_pointer_rtx);
10061 insn = gen_rtx_MEM (SImode, insn);
10062 insn = gen_rtx_SET (VOIDmode, insn, ip_rtx);
10063 insn = emit_insn (insn);
10065 fp_offset = 4;
10067 /* Just tell the dwarf backend that we adjusted SP. */
10068 dwarf = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
10069 gen_rtx_PLUS (SImode, stack_pointer_rtx,
10070 GEN_INT (-fp_offset)));
10071 RTX_FRAME_RELATED_P (insn) = 1;
10072 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
10073 dwarf, REG_NOTES (insn));
10075 else
10077 /* Store the args on the stack. */
10078 if (cfun->machine->uses_anonymous_args)
10079 insn = emit_multi_reg_push
10080 ((0xf0 >> (args_to_push / 4)) & 0xf);
10081 else
10082 insn = emit_insn
10083 (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10084 GEN_INT (- args_to_push)));
10086 RTX_FRAME_RELATED_P (insn) = 1;
10088 saved_pretend_args = 1;
10089 fp_offset = args_to_push;
10090 args_to_push = 0;
10092 /* Now reuse r3 to preserve IP. */
10093 insn = gen_rtx_REG (SImode, 3);
10094 insn = gen_rtx_SET (SImode, insn, ip_rtx);
10095 (void) emit_insn (insn);
10099 if (fp_offset)
10101 insn = gen_rtx_PLUS (SImode, stack_pointer_rtx, GEN_INT (fp_offset));
10102 insn = gen_rtx_SET (SImode, ip_rtx, insn);
10104 else
10105 insn = gen_movsi (ip_rtx, stack_pointer_rtx);
10107 insn = emit_insn (insn);
10108 RTX_FRAME_RELATED_P (insn) = 1;
10111 if (args_to_push)
10113 /* Push the argument registers, or reserve space for them. */
10114 if (cfun->machine->uses_anonymous_args)
10115 insn = emit_multi_reg_push
10116 ((0xf0 >> (args_to_push / 4)) & 0xf);
10117 else
10118 insn = emit_insn
10119 (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10120 GEN_INT (- args_to_push)));
10121 RTX_FRAME_RELATED_P (insn) = 1;
10124 /* If this is an interrupt service routine, and the link register
10125 is going to be pushed, and we are not creating a stack frame,
10126 (which would involve an extra push of IP and a pop in the epilogue)
10127 subtracting four from LR now will mean that the function return
10128 can be done with a single instruction. */
10129 if ((func_type == ARM_FT_ISR || func_type == ARM_FT_FIQ)
10130 && (live_regs_mask & (1 << LR_REGNUM)) != 0
10131 && ! frame_pointer_needed)
10132 emit_insn (gen_rtx_SET (SImode,
10133 gen_rtx_REG (SImode, LR_REGNUM),
10134 gen_rtx_PLUS (SImode,
10135 gen_rtx_REG (SImode, LR_REGNUM),
10136 GEN_INT (-4))));
10138 if (live_regs_mask)
10140 insn = emit_multi_reg_push (live_regs_mask);
10141 saved_regs += bit_count (live_regs_mask) * 4;
10142 RTX_FRAME_RELATED_P (insn) = 1;
10145 if (TARGET_IWMMXT)
10146 for (reg = LAST_IWMMXT_REGNUM; reg >= FIRST_IWMMXT_REGNUM; reg--)
10147 if (regs_ever_live[reg] && ! call_used_regs [reg])
10149 insn = gen_rtx_PRE_DEC (V2SImode, stack_pointer_rtx);
10150 insn = gen_rtx_MEM (V2SImode, insn);
10151 insn = emit_insn (gen_rtx_SET (VOIDmode, insn,
10152 gen_rtx_REG (V2SImode, reg)));
10153 RTX_FRAME_RELATED_P (insn) = 1;
10154 saved_regs += 8;
10157 if (! IS_VOLATILE (func_type))
10159 int start_reg;
10161 /* Save any floating point call-saved registers used by this
10162 function. */
10163 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
10165 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
10166 if (regs_ever_live[reg] && !call_used_regs[reg])
10168 insn = gen_rtx_PRE_DEC (XFmode, stack_pointer_rtx);
10169 insn = gen_rtx_MEM (XFmode, insn);
10170 insn = emit_insn (gen_rtx_SET (VOIDmode, insn,
10171 gen_rtx_REG (XFmode, reg)));
10172 RTX_FRAME_RELATED_P (insn) = 1;
10173 saved_regs += 12;
10176 else
10178 start_reg = LAST_FPA_REGNUM;
10180 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
10182 if (regs_ever_live[reg] && !call_used_regs[reg])
10184 if (start_reg - reg == 3)
10186 insn = emit_sfm (reg, 4);
10187 RTX_FRAME_RELATED_P (insn) = 1;
10188 saved_regs += 48;
10189 start_reg = reg - 1;
10192 else
10194 if (start_reg != reg)
10196 insn = emit_sfm (reg + 1, start_reg - reg);
10197 RTX_FRAME_RELATED_P (insn) = 1;
10198 saved_regs += (start_reg - reg) * 12;
10200 start_reg = reg - 1;
10204 if (start_reg != reg)
10206 insn = emit_sfm (reg + 1, start_reg - reg);
10207 saved_regs += (start_reg - reg) * 12;
10208 RTX_FRAME_RELATED_P (insn) = 1;
10211 if (TARGET_HARD_FLOAT && TARGET_VFP)
10213 start_reg = FIRST_VFP_REGNUM;
10215 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
10217 if ((!regs_ever_live[reg] || call_used_regs[reg])
10218 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
10220 if (start_reg != reg)
10221 saved_regs += vfp_emit_fstmx (start_reg,
10222 (reg - start_reg) / 2);
10223 start_reg = reg + 2;
10226 if (start_reg != reg)
10227 saved_regs += vfp_emit_fstmx (start_reg,
10228 (reg - start_reg) / 2);
10232 if (frame_pointer_needed)
10234 /* Create the new frame pointer. */
10235 insn = GEN_INT (-(4 + args_to_push + fp_offset));
10236 insn = emit_insn (gen_addsi3 (hard_frame_pointer_rtx, ip_rtx, insn));
10237 RTX_FRAME_RELATED_P (insn) = 1;
10239 if (IS_NESTED (func_type))
10241 /* Recover the static chain register. */
10242 if (regs_ever_live [3] == 0
10243 || saved_pretend_args)
10244 insn = gen_rtx_REG (SImode, 3);
10245 else /* if (current_function_pretend_args_size == 0) */
10247 insn = gen_rtx_PLUS (SImode, hard_frame_pointer_rtx,
10248 GEN_INT (4));
10249 insn = gen_rtx_MEM (SImode, insn);
10252 emit_insn (gen_rtx_SET (SImode, ip_rtx, insn));
10253 /* Add a USE to stop propagate_one_insn() from barfing. */
10254 emit_insn (gen_prologue_use (ip_rtx));
10258 offsets = arm_get_frame_offsets ();
10259 if (offsets->outgoing_args != offsets->saved_args + saved_regs)
10261 /* This add can produce multiple insns for a large constant, so we
10262 need to get tricky. */
10263 rtx last = get_last_insn ();
10265 amount = GEN_INT (offsets->saved_args + saved_regs
10266 - offsets->outgoing_args);
10268 insn = emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10269 amount));
10272 last = last ? NEXT_INSN (last) : get_insns ();
10273 RTX_FRAME_RELATED_P (last) = 1;
10275 while (last != insn);
10277 /* If the frame pointer is needed, emit a special barrier that
10278 will prevent the scheduler from moving stores to the frame
10279 before the stack adjustment. */
10280 if (frame_pointer_needed)
10281 insn = emit_insn (gen_stack_tie (stack_pointer_rtx,
10282 hard_frame_pointer_rtx));
10286 if (flag_pic)
10287 arm_load_pic_register (INVALID_REGNUM);
10289 /* If we are profiling, make sure no instructions are scheduled before
10290 the call to mcount. Similarly if the user has requested no
10291 scheduling in the prolog. */
10292 if (current_function_profile || TARGET_NO_SCHED_PRO)
10293 emit_insn (gen_blockage ());
10295 /* If the link register is being kept alive, with the return address in it,
10296 then make sure that it does not get reused by the ce2 pass. */
10297 if ((live_regs_mask & (1 << LR_REGNUM)) == 0)
10299 emit_insn (gen_prologue_use (gen_rtx_REG (SImode, LR_REGNUM)));
10300 cfun->machine->lr_save_eliminated = 1;
10304 /* If CODE is 'd', then the X is a condition operand and the instruction
10305 should only be executed if the condition is true.
10306 if CODE is 'D', then the X is a condition operand and the instruction
10307 should only be executed if the condition is false: however, if the mode
10308 of the comparison is CCFPEmode, then always execute the instruction -- we
10309 do this because in these circumstances !GE does not necessarily imply LT;
10310 in these cases the instruction pattern will take care to make sure that
10311 an instruction containing %d will follow, thereby undoing the effects of
10312 doing this instruction unconditionally.
10313 If CODE is 'N' then X is a floating point operand that must be negated
10314 before output.
10315 If CODE is 'B' then output a bitwise inverted value of X (a const int).
10316 If X is a REG and CODE is `M', output a ldm/stm style multi-reg. */
10317 void
10318 arm_print_operand (FILE *stream, rtx x, int code)
10320 switch (code)
10322 case '@':
10323 fputs (ASM_COMMENT_START, stream);
10324 return;
10326 case '_':
10327 fputs (user_label_prefix, stream);
10328 return;
10330 case '|':
10331 fputs (REGISTER_PREFIX, stream);
10332 return;
10334 case '?':
10335 if (arm_ccfsm_state == 3 || arm_ccfsm_state == 4)
10337 if (TARGET_THUMB)
10339 output_operand_lossage ("predicated Thumb instruction");
10340 break;
10342 if (current_insn_predicate != NULL)
10344 output_operand_lossage
10345 ("predicated instruction in conditional sequence");
10346 break;
10349 fputs (arm_condition_codes[arm_current_cc], stream);
10351 else if (current_insn_predicate)
10353 enum arm_cond_code code;
10355 if (TARGET_THUMB)
10357 output_operand_lossage ("predicated Thumb instruction");
10358 break;
10361 code = get_arm_condition_code (current_insn_predicate);
10362 fputs (arm_condition_codes[code], stream);
10364 return;
10366 case 'N':
10368 REAL_VALUE_TYPE r;
10369 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
10370 r = REAL_VALUE_NEGATE (r);
10371 fprintf (stream, "%s", fp_const_from_val (&r));
10373 return;
10375 case 'B':
10376 if (GET_CODE (x) == CONST_INT)
10378 HOST_WIDE_INT val;
10379 val = ARM_SIGN_EXTEND (~INTVAL (x));
10380 fprintf (stream, HOST_WIDE_INT_PRINT_DEC, val);
10382 else
10384 putc ('~', stream);
10385 output_addr_const (stream, x);
10387 return;
10389 case 'i':
10390 fprintf (stream, "%s", arithmetic_instr (x, 1));
10391 return;
10393 /* Truncate Cirrus shift counts. */
10394 case 's':
10395 if (GET_CODE (x) == CONST_INT)
10397 fprintf (stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0x3f);
10398 return;
10400 arm_print_operand (stream, x, 0);
10401 return;
10403 case 'I':
10404 fprintf (stream, "%s", arithmetic_instr (x, 0));
10405 return;
10407 case 'S':
10409 HOST_WIDE_INT val;
10410 const char * shift = shift_op (x, &val);
10412 if (shift)
10414 fprintf (stream, ", %s ", shift_op (x, &val));
10415 if (val == -1)
10416 arm_print_operand (stream, XEXP (x, 1), 0);
10417 else
10418 fprintf (stream, "#" HOST_WIDE_INT_PRINT_DEC, val);
10421 return;
10423 /* An explanation of the 'Q', 'R' and 'H' register operands:
10425 In a pair of registers containing a DI or DF value the 'Q'
10426 operand returns the register number of the register containing
10427 the least significant part of the value. The 'R' operand returns
10428 the register number of the register containing the most
10429 significant part of the value.
10431 The 'H' operand returns the higher of the two register numbers.
10432 On a run where WORDS_BIG_ENDIAN is true the 'H' operand is the
10433 same as the 'Q' operand, since the most significant part of the
10434 value is held in the lower number register. The reverse is true
10435 on systems where WORDS_BIG_ENDIAN is false.
10437 The purpose of these operands is to distinguish between cases
10438 where the endian-ness of the values is important (for example
10439 when they are added together), and cases where the endian-ness
10440 is irrelevant, but the order of register operations is important.
10441 For example when loading a value from memory into a register
10442 pair, the endian-ness does not matter. Provided that the value
10443 from the lower memory address is put into the lower numbered
10444 register, and the value from the higher address is put into the
10445 higher numbered register, the load will work regardless of whether
10446 the value being loaded is big-wordian or little-wordian. The
10447 order of the two register loads can matter however, if the address
10448 of the memory location is actually held in one of the registers
10449 being overwritten by the load. */
10450 case 'Q':
10451 if (GET_CODE (x) != REG || REGNO (x) > LAST_ARM_REGNUM)
10453 output_operand_lossage ("invalid operand for code '%c'", code);
10454 return;
10457 asm_fprintf (stream, "%r", REGNO (x) + (WORDS_BIG_ENDIAN ? 1 : 0));
10458 return;
10460 case 'R':
10461 if (GET_CODE (x) != REG || REGNO (x) > LAST_ARM_REGNUM)
10463 output_operand_lossage ("invalid operand for code '%c'", code);
10464 return;
10467 asm_fprintf (stream, "%r", REGNO (x) + (WORDS_BIG_ENDIAN ? 0 : 1));
10468 return;
10470 case 'H':
10471 if (GET_CODE (x) != REG || REGNO (x) > LAST_ARM_REGNUM)
10473 output_operand_lossage ("invalid operand for code '%c'", code);
10474 return;
10477 asm_fprintf (stream, "%r", REGNO (x) + 1);
10478 return;
10480 case 'm':
10481 asm_fprintf (stream, "%r",
10482 GET_CODE (XEXP (x, 0)) == REG
10483 ? REGNO (XEXP (x, 0)) : REGNO (XEXP (XEXP (x, 0), 0)));
10484 return;
10486 case 'M':
10487 asm_fprintf (stream, "{%r-%r}",
10488 REGNO (x),
10489 REGNO (x) + ARM_NUM_REGS (GET_MODE (x)) - 1);
10490 return;
10492 case 'd':
10493 /* CONST_TRUE_RTX means always -- that's the default. */
10494 if (x == const_true_rtx)
10495 return;
10497 if (!COMPARISON_P (x))
10499 output_operand_lossage ("invalid operand for code '%c'", code);
10500 return;
10503 fputs (arm_condition_codes[get_arm_condition_code (x)],
10504 stream);
10505 return;
10507 case 'D':
10508 /* CONST_TRUE_RTX means not always -- i.e. never. We shouldn't ever
10509 want to do that. */
10510 if (x == const_true_rtx)
10512 output_operand_lossage ("instruction never exectued");
10513 return;
10515 if (!COMPARISON_P (x))
10517 output_operand_lossage ("invalid operand for code '%c'", code);
10518 return;
10521 fputs (arm_condition_codes[ARM_INVERSE_CONDITION_CODE
10522 (get_arm_condition_code (x))],
10523 stream);
10524 return;
10526 /* Cirrus registers can be accessed in a variety of ways:
10527 single floating point (f)
10528 double floating point (d)
10529 32bit integer (fx)
10530 64bit integer (dx). */
10531 case 'W': /* Cirrus register in F mode. */
10532 case 'X': /* Cirrus register in D mode. */
10533 case 'Y': /* Cirrus register in FX mode. */
10534 case 'Z': /* Cirrus register in DX mode. */
10535 if (GET_CODE (x) != REG || REGNO_REG_CLASS (REGNO (x)) != CIRRUS_REGS)
10536 abort ();
10538 fprintf (stream, "mv%s%s",
10539 code == 'W' ? "f"
10540 : code == 'X' ? "d"
10541 : code == 'Y' ? "fx" : "dx", reg_names[REGNO (x)] + 2);
10543 return;
10545 /* Print cirrus register in the mode specified by the register's mode. */
10546 case 'V':
10548 int mode = GET_MODE (x);
10550 if (GET_CODE (x) != REG || REGNO_REG_CLASS (REGNO (x)) != CIRRUS_REGS)
10552 output_operand_lossage ("invalid operand for code '%c'", code);
10553 return;
10556 fprintf (stream, "mv%s%s",
10557 mode == DFmode ? "d"
10558 : mode == SImode ? "fx"
10559 : mode == DImode ? "dx"
10560 : "f", reg_names[REGNO (x)] + 2);
10562 return;
10565 case 'U':
10566 if (GET_CODE (x) != REG
10567 || REGNO (x) < FIRST_IWMMXT_GR_REGNUM
10568 || REGNO (x) > LAST_IWMMXT_GR_REGNUM)
10569 /* Bad value for wCG register number. */
10571 output_operand_lossage ("invalid operand for code '%c'", code);
10572 return;
10575 else
10576 fprintf (stream, "%d", REGNO (x) - FIRST_IWMMXT_GR_REGNUM);
10577 return;
10579 /* Print an iWMMXt control register name. */
10580 case 'w':
10581 if (GET_CODE (x) != CONST_INT
10582 || INTVAL (x) < 0
10583 || INTVAL (x) >= 16)
10584 /* Bad value for wC register number. */
10586 output_operand_lossage ("invalid operand for code '%c'", code);
10587 return;
10590 else
10592 static const char * wc_reg_names [16] =
10594 "wCID", "wCon", "wCSSF", "wCASF",
10595 "wC4", "wC5", "wC6", "wC7",
10596 "wCGR0", "wCGR1", "wCGR2", "wCGR3",
10597 "wC12", "wC13", "wC14", "wC15"
10600 fprintf (stream, wc_reg_names [INTVAL (x)]);
10602 return;
10604 /* Print a VFP double precision register name. */
10605 case 'P':
10607 int mode = GET_MODE (x);
10608 int num;
10610 if (mode != DImode && mode != DFmode)
10612 output_operand_lossage ("invalid operand for code '%c'", code);
10613 return;
10616 if (GET_CODE (x) != REG
10617 || !IS_VFP_REGNUM (REGNO (x)))
10619 output_operand_lossage ("invalid operand for code '%c'", code);
10620 return;
10623 num = REGNO(x) - FIRST_VFP_REGNUM;
10624 if (num & 1)
10626 output_operand_lossage ("invalid operand for code '%c'", code);
10627 return;
10630 fprintf (stream, "d%d", num >> 1);
10632 return;
10634 default:
10635 if (x == 0)
10637 output_operand_lossage ("missing operand");
10638 return;
10641 if (GET_CODE (x) == REG)
10642 asm_fprintf (stream, "%r", REGNO (x));
10643 else if (GET_CODE (x) == MEM)
10645 output_memory_reference_mode = GET_MODE (x);
10646 output_address (XEXP (x, 0));
10648 else if (GET_CODE (x) == CONST_DOUBLE)
10649 fprintf (stream, "#%s", fp_immediate_constant (x));
10650 else if (GET_CODE (x) == NEG)
10651 abort (); /* This should never happen now. */
10652 else
10654 fputc ('#', stream);
10655 output_addr_const (stream, x);
10660 #ifndef AOF_ASSEMBLER
10661 /* Target hook for assembling integer objects. The ARM version needs to
10662 handle word-sized values specially. */
10663 static bool
10664 arm_assemble_integer (rtx x, unsigned int size, int aligned_p)
10666 if (size == UNITS_PER_WORD && aligned_p)
10668 fputs ("\t.word\t", asm_out_file);
10669 output_addr_const (asm_out_file, x);
10671 /* Mark symbols as position independent. We only do this in the
10672 .text segment, not in the .data segment. */
10673 if (NEED_GOT_RELOC && flag_pic && making_const_table &&
10674 (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF))
10676 if (GET_CODE (x) == SYMBOL_REF
10677 && (CONSTANT_POOL_ADDRESS_P (x)
10678 || SYMBOL_REF_LOCAL_P (x)))
10679 fputs ("(GOTOFF)", asm_out_file);
10680 else if (GET_CODE (x) == LABEL_REF)
10681 fputs ("(GOTOFF)", asm_out_file);
10682 else
10683 fputs ("(GOT)", asm_out_file);
10685 fputc ('\n', asm_out_file);
10686 return true;
10689 if (arm_vector_mode_supported_p (GET_MODE (x)))
10691 int i, units;
10693 if (GET_CODE (x) != CONST_VECTOR)
10694 abort ();
10696 units = CONST_VECTOR_NUNITS (x);
10698 switch (GET_MODE (x))
10700 case V2SImode: size = 4; break;
10701 case V4HImode: size = 2; break;
10702 case V8QImode: size = 1; break;
10703 default:
10704 abort ();
10707 for (i = 0; i < units; i++)
10709 rtx elt;
10711 elt = CONST_VECTOR_ELT (x, i);
10712 assemble_integer
10713 (elt, size, i == 0 ? BIGGEST_ALIGNMENT : size * BITS_PER_UNIT, 1);
10716 return true;
10719 return default_assemble_integer (x, size, aligned_p);
10721 #endif
10723 /* A finite state machine takes care of noticing whether or not instructions
10724 can be conditionally executed, and thus decrease execution time and code
10725 size by deleting branch instructions. The fsm is controlled by
10726 final_prescan_insn, and controls the actions of ASM_OUTPUT_OPCODE. */
10728 /* The state of the fsm controlling condition codes are:
10729 0: normal, do nothing special
10730 1: make ASM_OUTPUT_OPCODE not output this instruction
10731 2: make ASM_OUTPUT_OPCODE not output this instruction
10732 3: make instructions conditional
10733 4: make instructions conditional
10735 State transitions (state->state by whom under condition):
10736 0 -> 1 final_prescan_insn if the `target' is a label
10737 0 -> 2 final_prescan_insn if the `target' is an unconditional branch
10738 1 -> 3 ASM_OUTPUT_OPCODE after not having output the conditional branch
10739 2 -> 4 ASM_OUTPUT_OPCODE after not having output the conditional branch
10740 3 -> 0 (*targetm.asm_out.internal_label) if the `target' label is reached
10741 (the target label has CODE_LABEL_NUMBER equal to arm_target_label).
10742 4 -> 0 final_prescan_insn if the `target' unconditional branch is reached
10743 (the target insn is arm_target_insn).
10745 If the jump clobbers the conditions then we use states 2 and 4.
10747 A similar thing can be done with conditional return insns.
10749 XXX In case the `target' is an unconditional branch, this conditionalising
10750 of the instructions always reduces code size, but not always execution
10751 time. But then, I want to reduce the code size to somewhere near what
10752 /bin/cc produces. */
10754 /* Returns the index of the ARM condition code string in
10755 `arm_condition_codes'. COMPARISON should be an rtx like
10756 `(eq (...) (...))'. */
10757 static enum arm_cond_code
10758 get_arm_condition_code (rtx comparison)
10760 enum machine_mode mode = GET_MODE (XEXP (comparison, 0));
10761 int code;
10762 enum rtx_code comp_code = GET_CODE (comparison);
10764 if (GET_MODE_CLASS (mode) != MODE_CC)
10765 mode = SELECT_CC_MODE (comp_code, XEXP (comparison, 0),
10766 XEXP (comparison, 1));
10768 switch (mode)
10770 case CC_DNEmode: code = ARM_NE; goto dominance;
10771 case CC_DEQmode: code = ARM_EQ; goto dominance;
10772 case CC_DGEmode: code = ARM_GE; goto dominance;
10773 case CC_DGTmode: code = ARM_GT; goto dominance;
10774 case CC_DLEmode: code = ARM_LE; goto dominance;
10775 case CC_DLTmode: code = ARM_LT; goto dominance;
10776 case CC_DGEUmode: code = ARM_CS; goto dominance;
10777 case CC_DGTUmode: code = ARM_HI; goto dominance;
10778 case CC_DLEUmode: code = ARM_LS; goto dominance;
10779 case CC_DLTUmode: code = ARM_CC;
10781 dominance:
10782 if (comp_code != EQ && comp_code != NE)
10783 abort ();
10785 if (comp_code == EQ)
10786 return ARM_INVERSE_CONDITION_CODE (code);
10787 return code;
10789 case CC_NOOVmode:
10790 switch (comp_code)
10792 case NE: return ARM_NE;
10793 case EQ: return ARM_EQ;
10794 case GE: return ARM_PL;
10795 case LT: return ARM_MI;
10796 default: abort ();
10799 case CC_Zmode:
10800 switch (comp_code)
10802 case NE: return ARM_NE;
10803 case EQ: return ARM_EQ;
10804 default: abort ();
10807 case CC_Nmode:
10808 switch (comp_code)
10810 case NE: return ARM_MI;
10811 case EQ: return ARM_PL;
10812 default: abort ();
10815 case CCFPEmode:
10816 case CCFPmode:
10817 /* These encodings assume that AC=1 in the FPA system control
10818 byte. This allows us to handle all cases except UNEQ and
10819 LTGT. */
10820 switch (comp_code)
10822 case GE: return ARM_GE;
10823 case GT: return ARM_GT;
10824 case LE: return ARM_LS;
10825 case LT: return ARM_MI;
10826 case NE: return ARM_NE;
10827 case EQ: return ARM_EQ;
10828 case ORDERED: return ARM_VC;
10829 case UNORDERED: return ARM_VS;
10830 case UNLT: return ARM_LT;
10831 case UNLE: return ARM_LE;
10832 case UNGT: return ARM_HI;
10833 case UNGE: return ARM_PL;
10834 /* UNEQ and LTGT do not have a representation. */
10835 case UNEQ: /* Fall through. */
10836 case LTGT: /* Fall through. */
10837 default: abort ();
10840 case CC_SWPmode:
10841 switch (comp_code)
10843 case NE: return ARM_NE;
10844 case EQ: return ARM_EQ;
10845 case GE: return ARM_LE;
10846 case GT: return ARM_LT;
10847 case LE: return ARM_GE;
10848 case LT: return ARM_GT;
10849 case GEU: return ARM_LS;
10850 case GTU: return ARM_CC;
10851 case LEU: return ARM_CS;
10852 case LTU: return ARM_HI;
10853 default: abort ();
10856 case CC_Cmode:
10857 switch (comp_code)
10859 case LTU: return ARM_CS;
10860 case GEU: return ARM_CC;
10861 default: abort ();
10864 case CCmode:
10865 switch (comp_code)
10867 case NE: return ARM_NE;
10868 case EQ: return ARM_EQ;
10869 case GE: return ARM_GE;
10870 case GT: return ARM_GT;
10871 case LE: return ARM_LE;
10872 case LT: return ARM_LT;
10873 case GEU: return ARM_CS;
10874 case GTU: return ARM_HI;
10875 case LEU: return ARM_LS;
10876 case LTU: return ARM_CC;
10877 default: abort ();
10880 default: abort ();
10883 abort ();
10886 void
10887 arm_final_prescan_insn (rtx insn)
10889 /* BODY will hold the body of INSN. */
10890 rtx body = PATTERN (insn);
10892 /* This will be 1 if trying to repeat the trick, and things need to be
10893 reversed if it appears to fail. */
10894 int reverse = 0;
10896 /* JUMP_CLOBBERS will be one implies that the conditions if a branch is
10897 taken are clobbered, even if the rtl suggests otherwise. It also
10898 means that we have to grub around within the jump expression to find
10899 out what the conditions are when the jump isn't taken. */
10900 int jump_clobbers = 0;
10902 /* If we start with a return insn, we only succeed if we find another one. */
10903 int seeking_return = 0;
10905 /* START_INSN will hold the insn from where we start looking. This is the
10906 first insn after the following code_label if REVERSE is true. */
10907 rtx start_insn = insn;
10909 /* If in state 4, check if the target branch is reached, in order to
10910 change back to state 0. */
10911 if (arm_ccfsm_state == 4)
10913 if (insn == arm_target_insn)
10915 arm_target_insn = NULL;
10916 arm_ccfsm_state = 0;
10918 return;
10921 /* If in state 3, it is possible to repeat the trick, if this insn is an
10922 unconditional branch to a label, and immediately following this branch
10923 is the previous target label which is only used once, and the label this
10924 branch jumps to is not too far off. */
10925 if (arm_ccfsm_state == 3)
10927 if (simplejump_p (insn))
10929 start_insn = next_nonnote_insn (start_insn);
10930 if (GET_CODE (start_insn) == BARRIER)
10932 /* XXX Isn't this always a barrier? */
10933 start_insn = next_nonnote_insn (start_insn);
10935 if (GET_CODE (start_insn) == CODE_LABEL
10936 && CODE_LABEL_NUMBER (start_insn) == arm_target_label
10937 && LABEL_NUSES (start_insn) == 1)
10938 reverse = TRUE;
10939 else
10940 return;
10942 else if (GET_CODE (body) == RETURN)
10944 start_insn = next_nonnote_insn (start_insn);
10945 if (GET_CODE (start_insn) == BARRIER)
10946 start_insn = next_nonnote_insn (start_insn);
10947 if (GET_CODE (start_insn) == CODE_LABEL
10948 && CODE_LABEL_NUMBER (start_insn) == arm_target_label
10949 && LABEL_NUSES (start_insn) == 1)
10951 reverse = TRUE;
10952 seeking_return = 1;
10954 else
10955 return;
10957 else
10958 return;
10961 if (arm_ccfsm_state != 0 && !reverse)
10962 abort ();
10963 if (GET_CODE (insn) != JUMP_INSN)
10964 return;
10966 /* This jump might be paralleled with a clobber of the condition codes
10967 the jump should always come first */
10968 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) > 0)
10969 body = XVECEXP (body, 0, 0);
10971 if (reverse
10972 || (GET_CODE (body) == SET && GET_CODE (SET_DEST (body)) == PC
10973 && GET_CODE (SET_SRC (body)) == IF_THEN_ELSE))
10975 int insns_skipped;
10976 int fail = FALSE, succeed = FALSE;
10977 /* Flag which part of the IF_THEN_ELSE is the LABEL_REF. */
10978 int then_not_else = TRUE;
10979 rtx this_insn = start_insn, label = 0;
10981 /* If the jump cannot be done with one instruction, we cannot
10982 conditionally execute the instruction in the inverse case. */
10983 if (get_attr_conds (insn) == CONDS_JUMP_CLOB)
10985 jump_clobbers = 1;
10986 return;
10989 /* Register the insn jumped to. */
10990 if (reverse)
10992 if (!seeking_return)
10993 label = XEXP (SET_SRC (body), 0);
10995 else if (GET_CODE (XEXP (SET_SRC (body), 1)) == LABEL_REF)
10996 label = XEXP (XEXP (SET_SRC (body), 1), 0);
10997 else if (GET_CODE (XEXP (SET_SRC (body), 2)) == LABEL_REF)
10999 label = XEXP (XEXP (SET_SRC (body), 2), 0);
11000 then_not_else = FALSE;
11002 else if (GET_CODE (XEXP (SET_SRC (body), 1)) == RETURN)
11003 seeking_return = 1;
11004 else if (GET_CODE (XEXP (SET_SRC (body), 2)) == RETURN)
11006 seeking_return = 1;
11007 then_not_else = FALSE;
11009 else
11010 abort ();
11012 /* See how many insns this branch skips, and what kind of insns. If all
11013 insns are okay, and the label or unconditional branch to the same
11014 label is not too far away, succeed. */
11015 for (insns_skipped = 0;
11016 !fail && !succeed && insns_skipped++ < max_insns_skipped;)
11018 rtx scanbody;
11020 this_insn = next_nonnote_insn (this_insn);
11021 if (!this_insn)
11022 break;
11024 switch (GET_CODE (this_insn))
11026 case CODE_LABEL:
11027 /* Succeed if it is the target label, otherwise fail since
11028 control falls in from somewhere else. */
11029 if (this_insn == label)
11031 if (jump_clobbers)
11033 arm_ccfsm_state = 2;
11034 this_insn = next_nonnote_insn (this_insn);
11036 else
11037 arm_ccfsm_state = 1;
11038 succeed = TRUE;
11040 else
11041 fail = TRUE;
11042 break;
11044 case BARRIER:
11045 /* Succeed if the following insn is the target label.
11046 Otherwise fail.
11047 If return insns are used then the last insn in a function
11048 will be a barrier. */
11049 this_insn = next_nonnote_insn (this_insn);
11050 if (this_insn && this_insn == label)
11052 if (jump_clobbers)
11054 arm_ccfsm_state = 2;
11055 this_insn = next_nonnote_insn (this_insn);
11057 else
11058 arm_ccfsm_state = 1;
11059 succeed = TRUE;
11061 else
11062 fail = TRUE;
11063 break;
11065 case CALL_INSN:
11066 /* The AAPCS says that conditional calls should not be
11067 used since they make interworking inefficient (the
11068 linker can't transform BL<cond> into BLX). That's
11069 only a problem if the machine has BLX. */
11070 if (arm_arch5)
11072 fail = TRUE;
11073 break;
11076 /* Succeed if the following insn is the target label, or
11077 if the following two insns are a barrier and the
11078 target label. */
11079 this_insn = next_nonnote_insn (this_insn);
11080 if (this_insn && GET_CODE (this_insn) == BARRIER)
11081 this_insn = next_nonnote_insn (this_insn);
11083 if (this_insn && this_insn == label
11084 && insns_skipped < max_insns_skipped)
11086 if (jump_clobbers)
11088 arm_ccfsm_state = 2;
11089 this_insn = next_nonnote_insn (this_insn);
11091 else
11092 arm_ccfsm_state = 1;
11093 succeed = TRUE;
11095 else
11096 fail = TRUE;
11097 break;
11099 case JUMP_INSN:
11100 /* If this is an unconditional branch to the same label, succeed.
11101 If it is to another label, do nothing. If it is conditional,
11102 fail. */
11103 /* XXX Probably, the tests for SET and the PC are
11104 unnecessary. */
11106 scanbody = PATTERN (this_insn);
11107 if (GET_CODE (scanbody) == SET
11108 && GET_CODE (SET_DEST (scanbody)) == PC)
11110 if (GET_CODE (SET_SRC (scanbody)) == LABEL_REF
11111 && XEXP (SET_SRC (scanbody), 0) == label && !reverse)
11113 arm_ccfsm_state = 2;
11114 succeed = TRUE;
11116 else if (GET_CODE (SET_SRC (scanbody)) == IF_THEN_ELSE)
11117 fail = TRUE;
11119 /* Fail if a conditional return is undesirable (e.g. on a
11120 StrongARM), but still allow this if optimizing for size. */
11121 else if (GET_CODE (scanbody) == RETURN
11122 && !use_return_insn (TRUE, NULL)
11123 && !optimize_size)
11124 fail = TRUE;
11125 else if (GET_CODE (scanbody) == RETURN
11126 && seeking_return)
11128 arm_ccfsm_state = 2;
11129 succeed = TRUE;
11131 else if (GET_CODE (scanbody) == PARALLEL)
11133 switch (get_attr_conds (this_insn))
11135 case CONDS_NOCOND:
11136 break;
11137 default:
11138 fail = TRUE;
11139 break;
11142 else
11143 fail = TRUE; /* Unrecognized jump (e.g. epilogue). */
11145 break;
11147 case INSN:
11148 /* Instructions using or affecting the condition codes make it
11149 fail. */
11150 scanbody = PATTERN (this_insn);
11151 if (!(GET_CODE (scanbody) == SET
11152 || GET_CODE (scanbody) == PARALLEL)
11153 || get_attr_conds (this_insn) != CONDS_NOCOND)
11154 fail = TRUE;
11156 /* A conditional cirrus instruction must be followed by
11157 a non Cirrus instruction. However, since we
11158 conditionalize instructions in this function and by
11159 the time we get here we can't add instructions
11160 (nops), because shorten_branches() has already been
11161 called, we will disable conditionalizing Cirrus
11162 instructions to be safe. */
11163 if (GET_CODE (scanbody) != USE
11164 && GET_CODE (scanbody) != CLOBBER
11165 && get_attr_cirrus (this_insn) != CIRRUS_NOT)
11166 fail = TRUE;
11167 break;
11169 default:
11170 break;
11173 if (succeed)
11175 if ((!seeking_return) && (arm_ccfsm_state == 1 || reverse))
11176 arm_target_label = CODE_LABEL_NUMBER (label);
11177 else if (seeking_return || arm_ccfsm_state == 2)
11179 while (this_insn && GET_CODE (PATTERN (this_insn)) == USE)
11181 this_insn = next_nonnote_insn (this_insn);
11182 if (this_insn && (GET_CODE (this_insn) == BARRIER
11183 || GET_CODE (this_insn) == CODE_LABEL))
11184 abort ();
11186 if (!this_insn)
11188 /* Oh, dear! we ran off the end.. give up. */
11189 recog (PATTERN (insn), insn, NULL);
11190 arm_ccfsm_state = 0;
11191 arm_target_insn = NULL;
11192 return;
11194 arm_target_insn = this_insn;
11196 else
11197 abort ();
11198 if (jump_clobbers)
11200 if (reverse)
11201 abort ();
11202 arm_current_cc =
11203 get_arm_condition_code (XEXP (XEXP (XEXP (SET_SRC (body),
11204 0), 0), 1));
11205 if (GET_CODE (XEXP (XEXP (SET_SRC (body), 0), 0)) == AND)
11206 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
11207 if (GET_CODE (XEXP (SET_SRC (body), 0)) == NE)
11208 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
11210 else
11212 /* If REVERSE is true, ARM_CURRENT_CC needs to be inverted from
11213 what it was. */
11214 if (!reverse)
11215 arm_current_cc = get_arm_condition_code (XEXP (SET_SRC (body),
11216 0));
11219 if (reverse || then_not_else)
11220 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
11223 /* Restore recog_data (getting the attributes of other insns can
11224 destroy this array, but final.c assumes that it remains intact
11225 across this call; since the insn has been recognized already we
11226 call recog direct). */
11227 recog (PATTERN (insn), insn, NULL);
11231 /* Returns true if REGNO is a valid register
11232 for holding a quantity of type MODE. */
11234 arm_hard_regno_mode_ok (unsigned int regno, enum machine_mode mode)
11236 if (GET_MODE_CLASS (mode) == MODE_CC)
11237 return regno == CC_REGNUM || regno == VFPCC_REGNUM;
11239 if (TARGET_THUMB)
11240 /* For the Thumb we only allow values bigger than SImode in
11241 registers 0 - 6, so that there is always a second low
11242 register available to hold the upper part of the value.
11243 We probably we ought to ensure that the register is the
11244 start of an even numbered register pair. */
11245 return (ARM_NUM_REGS (mode) < 2) || (regno < LAST_LO_REGNUM);
11247 if (IS_CIRRUS_REGNUM (regno))
11248 /* We have outlawed SI values in Cirrus registers because they
11249 reside in the lower 32 bits, but SF values reside in the
11250 upper 32 bits. This causes gcc all sorts of grief. We can't
11251 even split the registers into pairs because Cirrus SI values
11252 get sign extended to 64bits-- aldyh. */
11253 return (GET_MODE_CLASS (mode) == MODE_FLOAT) || (mode == DImode);
11255 if (IS_VFP_REGNUM (regno))
11257 if (mode == SFmode || mode == SImode)
11258 return TRUE;
11260 /* DFmode values are only valid in even register pairs. */
11261 if (mode == DFmode)
11262 return ((regno - FIRST_VFP_REGNUM) & 1) == 0;
11263 return FALSE;
11266 if (IS_IWMMXT_GR_REGNUM (regno))
11267 return mode == SImode;
11269 if (IS_IWMMXT_REGNUM (regno))
11270 return VALID_IWMMXT_REG_MODE (mode);
11272 /* We allow any value to be stored in the general registers.
11273 Restrict doubleword quantities to even register pairs so that we can
11274 use ldrd. */
11275 if (regno <= LAST_ARM_REGNUM)
11276 return !(TARGET_LDRD && GET_MODE_SIZE (mode) > 4 && (regno & 1) != 0);
11278 if ( regno == FRAME_POINTER_REGNUM
11279 || regno == ARG_POINTER_REGNUM)
11280 /* We only allow integers in the fake hard registers. */
11281 return GET_MODE_CLASS (mode) == MODE_INT;
11283 /* The only registers left are the FPA registers
11284 which we only allow to hold FP values. */
11285 return GET_MODE_CLASS (mode) == MODE_FLOAT
11286 && regno >= FIRST_FPA_REGNUM
11287 && regno <= LAST_FPA_REGNUM;
11291 arm_regno_class (int regno)
11293 if (TARGET_THUMB)
11295 if (regno == STACK_POINTER_REGNUM)
11296 return STACK_REG;
11297 if (regno == CC_REGNUM)
11298 return CC_REG;
11299 if (regno < 8)
11300 return LO_REGS;
11301 return HI_REGS;
11304 if ( regno <= LAST_ARM_REGNUM
11305 || regno == FRAME_POINTER_REGNUM
11306 || regno == ARG_POINTER_REGNUM)
11307 return GENERAL_REGS;
11309 if (regno == CC_REGNUM || regno == VFPCC_REGNUM)
11310 return NO_REGS;
11312 if (IS_CIRRUS_REGNUM (regno))
11313 return CIRRUS_REGS;
11315 if (IS_VFP_REGNUM (regno))
11316 return VFP_REGS;
11318 if (IS_IWMMXT_REGNUM (regno))
11319 return IWMMXT_REGS;
11321 if (IS_IWMMXT_GR_REGNUM (regno))
11322 return IWMMXT_GR_REGS;
11324 return FPA_REGS;
11327 /* Handle a special case when computing the offset
11328 of an argument from the frame pointer. */
11330 arm_debugger_arg_offset (int value, rtx addr)
11332 rtx insn;
11334 /* We are only interested if dbxout_parms() failed to compute the offset. */
11335 if (value != 0)
11336 return 0;
11338 /* We can only cope with the case where the address is held in a register. */
11339 if (GET_CODE (addr) != REG)
11340 return 0;
11342 /* If we are using the frame pointer to point at the argument, then
11343 an offset of 0 is correct. */
11344 if (REGNO (addr) == (unsigned) HARD_FRAME_POINTER_REGNUM)
11345 return 0;
11347 /* If we are using the stack pointer to point at the
11348 argument, then an offset of 0 is correct. */
11349 if ((TARGET_THUMB || !frame_pointer_needed)
11350 && REGNO (addr) == SP_REGNUM)
11351 return 0;
11353 /* Oh dear. The argument is pointed to by a register rather
11354 than being held in a register, or being stored at a known
11355 offset from the frame pointer. Since GDB only understands
11356 those two kinds of argument we must translate the address
11357 held in the register into an offset from the frame pointer.
11358 We do this by searching through the insns for the function
11359 looking to see where this register gets its value. If the
11360 register is initialized from the frame pointer plus an offset
11361 then we are in luck and we can continue, otherwise we give up.
11363 This code is exercised by producing debugging information
11364 for a function with arguments like this:
11366 double func (double a, double b, int c, double d) {return d;}
11368 Without this code the stab for parameter 'd' will be set to
11369 an offset of 0 from the frame pointer, rather than 8. */
11371 /* The if() statement says:
11373 If the insn is a normal instruction
11374 and if the insn is setting the value in a register
11375 and if the register being set is the register holding the address of the argument
11376 and if the address is computing by an addition
11377 that involves adding to a register
11378 which is the frame pointer
11379 a constant integer
11381 then... */
11383 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
11385 if ( GET_CODE (insn) == INSN
11386 && GET_CODE (PATTERN (insn)) == SET
11387 && REGNO (XEXP (PATTERN (insn), 0)) == REGNO (addr)
11388 && GET_CODE (XEXP (PATTERN (insn), 1)) == PLUS
11389 && GET_CODE (XEXP (XEXP (PATTERN (insn), 1), 0)) == REG
11390 && REGNO (XEXP (XEXP (PATTERN (insn), 1), 0)) == (unsigned) HARD_FRAME_POINTER_REGNUM
11391 && GET_CODE (XEXP (XEXP (PATTERN (insn), 1), 1)) == CONST_INT
11394 value = INTVAL (XEXP (XEXP (PATTERN (insn), 1), 1));
11396 break;
11400 if (value == 0)
11402 debug_rtx (addr);
11403 warning (0, "unable to compute real location of stacked parameter");
11404 value = 8; /* XXX magic hack */
11407 return value;
11410 #define def_mbuiltin(MASK, NAME, TYPE, CODE) \
11411 do \
11413 if ((MASK) & insn_flags) \
11414 lang_hooks.builtin_function ((NAME), (TYPE), (CODE), \
11415 BUILT_IN_MD, NULL, NULL_TREE); \
11417 while (0)
11419 struct builtin_description
11421 const unsigned int mask;
11422 const enum insn_code icode;
11423 const char * const name;
11424 const enum arm_builtins code;
11425 const enum rtx_code comparison;
11426 const unsigned int flag;
11429 static const struct builtin_description bdesc_2arg[] =
11431 #define IWMMXT_BUILTIN(code, string, builtin) \
11432 { FL_IWMMXT, CODE_FOR_##code, "__builtin_arm_" string, \
11433 ARM_BUILTIN_##builtin, 0, 0 },
11435 IWMMXT_BUILTIN (addv8qi3, "waddb", WADDB)
11436 IWMMXT_BUILTIN (addv4hi3, "waddh", WADDH)
11437 IWMMXT_BUILTIN (addv2si3, "waddw", WADDW)
11438 IWMMXT_BUILTIN (subv8qi3, "wsubb", WSUBB)
11439 IWMMXT_BUILTIN (subv4hi3, "wsubh", WSUBH)
11440 IWMMXT_BUILTIN (subv2si3, "wsubw", WSUBW)
11441 IWMMXT_BUILTIN (ssaddv8qi3, "waddbss", WADDSSB)
11442 IWMMXT_BUILTIN (ssaddv4hi3, "waddhss", WADDSSH)
11443 IWMMXT_BUILTIN (ssaddv2si3, "waddwss", WADDSSW)
11444 IWMMXT_BUILTIN (sssubv8qi3, "wsubbss", WSUBSSB)
11445 IWMMXT_BUILTIN (sssubv4hi3, "wsubhss", WSUBSSH)
11446 IWMMXT_BUILTIN (sssubv2si3, "wsubwss", WSUBSSW)
11447 IWMMXT_BUILTIN (usaddv8qi3, "waddbus", WADDUSB)
11448 IWMMXT_BUILTIN (usaddv4hi3, "waddhus", WADDUSH)
11449 IWMMXT_BUILTIN (usaddv2si3, "waddwus", WADDUSW)
11450 IWMMXT_BUILTIN (ussubv8qi3, "wsubbus", WSUBUSB)
11451 IWMMXT_BUILTIN (ussubv4hi3, "wsubhus", WSUBUSH)
11452 IWMMXT_BUILTIN (ussubv2si3, "wsubwus", WSUBUSW)
11453 IWMMXT_BUILTIN (mulv4hi3, "wmulul", WMULUL)
11454 IWMMXT_BUILTIN (smulv4hi3_highpart, "wmulsm", WMULSM)
11455 IWMMXT_BUILTIN (umulv4hi3_highpart, "wmulum", WMULUM)
11456 IWMMXT_BUILTIN (eqv8qi3, "wcmpeqb", WCMPEQB)
11457 IWMMXT_BUILTIN (eqv4hi3, "wcmpeqh", WCMPEQH)
11458 IWMMXT_BUILTIN (eqv2si3, "wcmpeqw", WCMPEQW)
11459 IWMMXT_BUILTIN (gtuv8qi3, "wcmpgtub", WCMPGTUB)
11460 IWMMXT_BUILTIN (gtuv4hi3, "wcmpgtuh", WCMPGTUH)
11461 IWMMXT_BUILTIN (gtuv2si3, "wcmpgtuw", WCMPGTUW)
11462 IWMMXT_BUILTIN (gtv8qi3, "wcmpgtsb", WCMPGTSB)
11463 IWMMXT_BUILTIN (gtv4hi3, "wcmpgtsh", WCMPGTSH)
11464 IWMMXT_BUILTIN (gtv2si3, "wcmpgtsw", WCMPGTSW)
11465 IWMMXT_BUILTIN (umaxv8qi3, "wmaxub", WMAXUB)
11466 IWMMXT_BUILTIN (smaxv8qi3, "wmaxsb", WMAXSB)
11467 IWMMXT_BUILTIN (umaxv4hi3, "wmaxuh", WMAXUH)
11468 IWMMXT_BUILTIN (smaxv4hi3, "wmaxsh", WMAXSH)
11469 IWMMXT_BUILTIN (umaxv2si3, "wmaxuw", WMAXUW)
11470 IWMMXT_BUILTIN (smaxv2si3, "wmaxsw", WMAXSW)
11471 IWMMXT_BUILTIN (uminv8qi3, "wminub", WMINUB)
11472 IWMMXT_BUILTIN (sminv8qi3, "wminsb", WMINSB)
11473 IWMMXT_BUILTIN (uminv4hi3, "wminuh", WMINUH)
11474 IWMMXT_BUILTIN (sminv4hi3, "wminsh", WMINSH)
11475 IWMMXT_BUILTIN (uminv2si3, "wminuw", WMINUW)
11476 IWMMXT_BUILTIN (sminv2si3, "wminsw", WMINSW)
11477 IWMMXT_BUILTIN (iwmmxt_anddi3, "wand", WAND)
11478 IWMMXT_BUILTIN (iwmmxt_nanddi3, "wandn", WANDN)
11479 IWMMXT_BUILTIN (iwmmxt_iordi3, "wor", WOR)
11480 IWMMXT_BUILTIN (iwmmxt_xordi3, "wxor", WXOR)
11481 IWMMXT_BUILTIN (iwmmxt_uavgv8qi3, "wavg2b", WAVG2B)
11482 IWMMXT_BUILTIN (iwmmxt_uavgv4hi3, "wavg2h", WAVG2H)
11483 IWMMXT_BUILTIN (iwmmxt_uavgrndv8qi3, "wavg2br", WAVG2BR)
11484 IWMMXT_BUILTIN (iwmmxt_uavgrndv4hi3, "wavg2hr", WAVG2HR)
11485 IWMMXT_BUILTIN (iwmmxt_wunpckilb, "wunpckilb", WUNPCKILB)
11486 IWMMXT_BUILTIN (iwmmxt_wunpckilh, "wunpckilh", WUNPCKILH)
11487 IWMMXT_BUILTIN (iwmmxt_wunpckilw, "wunpckilw", WUNPCKILW)
11488 IWMMXT_BUILTIN (iwmmxt_wunpckihb, "wunpckihb", WUNPCKIHB)
11489 IWMMXT_BUILTIN (iwmmxt_wunpckihh, "wunpckihh", WUNPCKIHH)
11490 IWMMXT_BUILTIN (iwmmxt_wunpckihw, "wunpckihw", WUNPCKIHW)
11491 IWMMXT_BUILTIN (iwmmxt_wmadds, "wmadds", WMADDS)
11492 IWMMXT_BUILTIN (iwmmxt_wmaddu, "wmaddu", WMADDU)
11494 #define IWMMXT_BUILTIN2(code, builtin) \
11495 { FL_IWMMXT, CODE_FOR_##code, NULL, ARM_BUILTIN_##builtin, 0, 0 },
11497 IWMMXT_BUILTIN2 (iwmmxt_wpackhss, WPACKHSS)
11498 IWMMXT_BUILTIN2 (iwmmxt_wpackwss, WPACKWSS)
11499 IWMMXT_BUILTIN2 (iwmmxt_wpackdss, WPACKDSS)
11500 IWMMXT_BUILTIN2 (iwmmxt_wpackhus, WPACKHUS)
11501 IWMMXT_BUILTIN2 (iwmmxt_wpackwus, WPACKWUS)
11502 IWMMXT_BUILTIN2 (iwmmxt_wpackdus, WPACKDUS)
11503 IWMMXT_BUILTIN2 (ashlv4hi3_di, WSLLH)
11504 IWMMXT_BUILTIN2 (ashlv4hi3, WSLLHI)
11505 IWMMXT_BUILTIN2 (ashlv2si3_di, WSLLW)
11506 IWMMXT_BUILTIN2 (ashlv2si3, WSLLWI)
11507 IWMMXT_BUILTIN2 (ashldi3_di, WSLLD)
11508 IWMMXT_BUILTIN2 (ashldi3_iwmmxt, WSLLDI)
11509 IWMMXT_BUILTIN2 (lshrv4hi3_di, WSRLH)
11510 IWMMXT_BUILTIN2 (lshrv4hi3, WSRLHI)
11511 IWMMXT_BUILTIN2 (lshrv2si3_di, WSRLW)
11512 IWMMXT_BUILTIN2 (lshrv2si3, WSRLWI)
11513 IWMMXT_BUILTIN2 (lshrdi3_di, WSRLD)
11514 IWMMXT_BUILTIN2 (lshrdi3_iwmmxt, WSRLDI)
11515 IWMMXT_BUILTIN2 (ashrv4hi3_di, WSRAH)
11516 IWMMXT_BUILTIN2 (ashrv4hi3, WSRAHI)
11517 IWMMXT_BUILTIN2 (ashrv2si3_di, WSRAW)
11518 IWMMXT_BUILTIN2 (ashrv2si3, WSRAWI)
11519 IWMMXT_BUILTIN2 (ashrdi3_di, WSRAD)
11520 IWMMXT_BUILTIN2 (ashrdi3_iwmmxt, WSRADI)
11521 IWMMXT_BUILTIN2 (rorv4hi3_di, WRORH)
11522 IWMMXT_BUILTIN2 (rorv4hi3, WRORHI)
11523 IWMMXT_BUILTIN2 (rorv2si3_di, WRORW)
11524 IWMMXT_BUILTIN2 (rorv2si3, WRORWI)
11525 IWMMXT_BUILTIN2 (rordi3_di, WRORD)
11526 IWMMXT_BUILTIN2 (rordi3, WRORDI)
11527 IWMMXT_BUILTIN2 (iwmmxt_wmacuz, WMACUZ)
11528 IWMMXT_BUILTIN2 (iwmmxt_wmacsz, WMACSZ)
11531 static const struct builtin_description bdesc_1arg[] =
11533 IWMMXT_BUILTIN (iwmmxt_tmovmskb, "tmovmskb", TMOVMSKB)
11534 IWMMXT_BUILTIN (iwmmxt_tmovmskh, "tmovmskh", TMOVMSKH)
11535 IWMMXT_BUILTIN (iwmmxt_tmovmskw, "tmovmskw", TMOVMSKW)
11536 IWMMXT_BUILTIN (iwmmxt_waccb, "waccb", WACCB)
11537 IWMMXT_BUILTIN (iwmmxt_wacch, "wacch", WACCH)
11538 IWMMXT_BUILTIN (iwmmxt_waccw, "waccw", WACCW)
11539 IWMMXT_BUILTIN (iwmmxt_wunpckehub, "wunpckehub", WUNPCKEHUB)
11540 IWMMXT_BUILTIN (iwmmxt_wunpckehuh, "wunpckehuh", WUNPCKEHUH)
11541 IWMMXT_BUILTIN (iwmmxt_wunpckehuw, "wunpckehuw", WUNPCKEHUW)
11542 IWMMXT_BUILTIN (iwmmxt_wunpckehsb, "wunpckehsb", WUNPCKEHSB)
11543 IWMMXT_BUILTIN (iwmmxt_wunpckehsh, "wunpckehsh", WUNPCKEHSH)
11544 IWMMXT_BUILTIN (iwmmxt_wunpckehsw, "wunpckehsw", WUNPCKEHSW)
11545 IWMMXT_BUILTIN (iwmmxt_wunpckelub, "wunpckelub", WUNPCKELUB)
11546 IWMMXT_BUILTIN (iwmmxt_wunpckeluh, "wunpckeluh", WUNPCKELUH)
11547 IWMMXT_BUILTIN (iwmmxt_wunpckeluw, "wunpckeluw", WUNPCKELUW)
11548 IWMMXT_BUILTIN (iwmmxt_wunpckelsb, "wunpckelsb", WUNPCKELSB)
11549 IWMMXT_BUILTIN (iwmmxt_wunpckelsh, "wunpckelsh", WUNPCKELSH)
11550 IWMMXT_BUILTIN (iwmmxt_wunpckelsw, "wunpckelsw", WUNPCKELSW)
11553 /* Set up all the iWMMXt builtins. This is
11554 not called if TARGET_IWMMXT is zero. */
11556 static void
11557 arm_init_iwmmxt_builtins (void)
11559 const struct builtin_description * d;
11560 size_t i;
11561 tree endlink = void_list_node;
11563 tree V2SI_type_node = build_vector_type_for_mode (intSI_type_node, V2SImode);
11564 tree V4HI_type_node = build_vector_type_for_mode (intHI_type_node, V4HImode);
11565 tree V8QI_type_node = build_vector_type_for_mode (intQI_type_node, V8QImode);
11567 tree int_ftype_int
11568 = build_function_type (integer_type_node,
11569 tree_cons (NULL_TREE, integer_type_node, endlink));
11570 tree v8qi_ftype_v8qi_v8qi_int
11571 = build_function_type (V8QI_type_node,
11572 tree_cons (NULL_TREE, V8QI_type_node,
11573 tree_cons (NULL_TREE, V8QI_type_node,
11574 tree_cons (NULL_TREE,
11575 integer_type_node,
11576 endlink))));
11577 tree v4hi_ftype_v4hi_int
11578 = build_function_type (V4HI_type_node,
11579 tree_cons (NULL_TREE, V4HI_type_node,
11580 tree_cons (NULL_TREE, integer_type_node,
11581 endlink)));
11582 tree v2si_ftype_v2si_int
11583 = build_function_type (V2SI_type_node,
11584 tree_cons (NULL_TREE, V2SI_type_node,
11585 tree_cons (NULL_TREE, integer_type_node,
11586 endlink)));
11587 tree v2si_ftype_di_di
11588 = build_function_type (V2SI_type_node,
11589 tree_cons (NULL_TREE, long_long_integer_type_node,
11590 tree_cons (NULL_TREE, long_long_integer_type_node,
11591 endlink)));
11592 tree di_ftype_di_int
11593 = build_function_type (long_long_integer_type_node,
11594 tree_cons (NULL_TREE, long_long_integer_type_node,
11595 tree_cons (NULL_TREE, integer_type_node,
11596 endlink)));
11597 tree di_ftype_di_int_int
11598 = build_function_type (long_long_integer_type_node,
11599 tree_cons (NULL_TREE, long_long_integer_type_node,
11600 tree_cons (NULL_TREE, integer_type_node,
11601 tree_cons (NULL_TREE,
11602 integer_type_node,
11603 endlink))));
11604 tree int_ftype_v8qi
11605 = build_function_type (integer_type_node,
11606 tree_cons (NULL_TREE, V8QI_type_node,
11607 endlink));
11608 tree int_ftype_v4hi
11609 = build_function_type (integer_type_node,
11610 tree_cons (NULL_TREE, V4HI_type_node,
11611 endlink));
11612 tree int_ftype_v2si
11613 = build_function_type (integer_type_node,
11614 tree_cons (NULL_TREE, V2SI_type_node,
11615 endlink));
11616 tree int_ftype_v8qi_int
11617 = build_function_type (integer_type_node,
11618 tree_cons (NULL_TREE, V8QI_type_node,
11619 tree_cons (NULL_TREE, integer_type_node,
11620 endlink)));
11621 tree int_ftype_v4hi_int
11622 = build_function_type (integer_type_node,
11623 tree_cons (NULL_TREE, V4HI_type_node,
11624 tree_cons (NULL_TREE, integer_type_node,
11625 endlink)));
11626 tree int_ftype_v2si_int
11627 = build_function_type (integer_type_node,
11628 tree_cons (NULL_TREE, V2SI_type_node,
11629 tree_cons (NULL_TREE, integer_type_node,
11630 endlink)));
11631 tree v8qi_ftype_v8qi_int_int
11632 = build_function_type (V8QI_type_node,
11633 tree_cons (NULL_TREE, V8QI_type_node,
11634 tree_cons (NULL_TREE, integer_type_node,
11635 tree_cons (NULL_TREE,
11636 integer_type_node,
11637 endlink))));
11638 tree v4hi_ftype_v4hi_int_int
11639 = build_function_type (V4HI_type_node,
11640 tree_cons (NULL_TREE, V4HI_type_node,
11641 tree_cons (NULL_TREE, integer_type_node,
11642 tree_cons (NULL_TREE,
11643 integer_type_node,
11644 endlink))));
11645 tree v2si_ftype_v2si_int_int
11646 = build_function_type (V2SI_type_node,
11647 tree_cons (NULL_TREE, V2SI_type_node,
11648 tree_cons (NULL_TREE, integer_type_node,
11649 tree_cons (NULL_TREE,
11650 integer_type_node,
11651 endlink))));
11652 /* Miscellaneous. */
11653 tree v8qi_ftype_v4hi_v4hi
11654 = build_function_type (V8QI_type_node,
11655 tree_cons (NULL_TREE, V4HI_type_node,
11656 tree_cons (NULL_TREE, V4HI_type_node,
11657 endlink)));
11658 tree v4hi_ftype_v2si_v2si
11659 = build_function_type (V4HI_type_node,
11660 tree_cons (NULL_TREE, V2SI_type_node,
11661 tree_cons (NULL_TREE, V2SI_type_node,
11662 endlink)));
11663 tree v2si_ftype_v4hi_v4hi
11664 = build_function_type (V2SI_type_node,
11665 tree_cons (NULL_TREE, V4HI_type_node,
11666 tree_cons (NULL_TREE, V4HI_type_node,
11667 endlink)));
11668 tree v2si_ftype_v8qi_v8qi
11669 = build_function_type (V2SI_type_node,
11670 tree_cons (NULL_TREE, V8QI_type_node,
11671 tree_cons (NULL_TREE, V8QI_type_node,
11672 endlink)));
11673 tree v4hi_ftype_v4hi_di
11674 = build_function_type (V4HI_type_node,
11675 tree_cons (NULL_TREE, V4HI_type_node,
11676 tree_cons (NULL_TREE,
11677 long_long_integer_type_node,
11678 endlink)));
11679 tree v2si_ftype_v2si_di
11680 = build_function_type (V2SI_type_node,
11681 tree_cons (NULL_TREE, V2SI_type_node,
11682 tree_cons (NULL_TREE,
11683 long_long_integer_type_node,
11684 endlink)));
11685 tree void_ftype_int_int
11686 = build_function_type (void_type_node,
11687 tree_cons (NULL_TREE, integer_type_node,
11688 tree_cons (NULL_TREE, integer_type_node,
11689 endlink)));
11690 tree di_ftype_void
11691 = build_function_type (long_long_unsigned_type_node, endlink);
11692 tree di_ftype_v8qi
11693 = build_function_type (long_long_integer_type_node,
11694 tree_cons (NULL_TREE, V8QI_type_node,
11695 endlink));
11696 tree di_ftype_v4hi
11697 = build_function_type (long_long_integer_type_node,
11698 tree_cons (NULL_TREE, V4HI_type_node,
11699 endlink));
11700 tree di_ftype_v2si
11701 = build_function_type (long_long_integer_type_node,
11702 tree_cons (NULL_TREE, V2SI_type_node,
11703 endlink));
11704 tree v2si_ftype_v4hi
11705 = build_function_type (V2SI_type_node,
11706 tree_cons (NULL_TREE, V4HI_type_node,
11707 endlink));
11708 tree v4hi_ftype_v8qi
11709 = build_function_type (V4HI_type_node,
11710 tree_cons (NULL_TREE, V8QI_type_node,
11711 endlink));
11713 tree di_ftype_di_v4hi_v4hi
11714 = build_function_type (long_long_unsigned_type_node,
11715 tree_cons (NULL_TREE,
11716 long_long_unsigned_type_node,
11717 tree_cons (NULL_TREE, V4HI_type_node,
11718 tree_cons (NULL_TREE,
11719 V4HI_type_node,
11720 endlink))));
11722 tree di_ftype_v4hi_v4hi
11723 = build_function_type (long_long_unsigned_type_node,
11724 tree_cons (NULL_TREE, V4HI_type_node,
11725 tree_cons (NULL_TREE, V4HI_type_node,
11726 endlink)));
11728 /* Normal vector binops. */
11729 tree v8qi_ftype_v8qi_v8qi
11730 = build_function_type (V8QI_type_node,
11731 tree_cons (NULL_TREE, V8QI_type_node,
11732 tree_cons (NULL_TREE, V8QI_type_node,
11733 endlink)));
11734 tree v4hi_ftype_v4hi_v4hi
11735 = build_function_type (V4HI_type_node,
11736 tree_cons (NULL_TREE, V4HI_type_node,
11737 tree_cons (NULL_TREE, V4HI_type_node,
11738 endlink)));
11739 tree v2si_ftype_v2si_v2si
11740 = build_function_type (V2SI_type_node,
11741 tree_cons (NULL_TREE, V2SI_type_node,
11742 tree_cons (NULL_TREE, V2SI_type_node,
11743 endlink)));
11744 tree di_ftype_di_di
11745 = build_function_type (long_long_unsigned_type_node,
11746 tree_cons (NULL_TREE, long_long_unsigned_type_node,
11747 tree_cons (NULL_TREE,
11748 long_long_unsigned_type_node,
11749 endlink)));
11751 /* Add all builtins that are more or less simple operations on two
11752 operands. */
11753 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
11755 /* Use one of the operands; the target can have a different mode for
11756 mask-generating compares. */
11757 enum machine_mode mode;
11758 tree type;
11760 if (d->name == 0)
11761 continue;
11763 mode = insn_data[d->icode].operand[1].mode;
11765 switch (mode)
11767 case V8QImode:
11768 type = v8qi_ftype_v8qi_v8qi;
11769 break;
11770 case V4HImode:
11771 type = v4hi_ftype_v4hi_v4hi;
11772 break;
11773 case V2SImode:
11774 type = v2si_ftype_v2si_v2si;
11775 break;
11776 case DImode:
11777 type = di_ftype_di_di;
11778 break;
11780 default:
11781 abort ();
11784 def_mbuiltin (d->mask, d->name, type, d->code);
11787 /* Add the remaining MMX insns with somewhat more complicated types. */
11788 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wzero", di_ftype_void, ARM_BUILTIN_WZERO);
11789 def_mbuiltin (FL_IWMMXT, "__builtin_arm_setwcx", void_ftype_int_int, ARM_BUILTIN_SETWCX);
11790 def_mbuiltin (FL_IWMMXT, "__builtin_arm_getwcx", int_ftype_int, ARM_BUILTIN_GETWCX);
11792 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSLLH);
11793 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllw", v2si_ftype_v2si_di, ARM_BUILTIN_WSLLW);
11794 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wslld", di_ftype_di_di, ARM_BUILTIN_WSLLD);
11795 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSLLHI);
11796 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllwi", v2si_ftype_v2si_int, ARM_BUILTIN_WSLLWI);
11797 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wslldi", di_ftype_di_int, ARM_BUILTIN_WSLLDI);
11799 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSRLH);
11800 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlw", v2si_ftype_v2si_di, ARM_BUILTIN_WSRLW);
11801 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrld", di_ftype_di_di, ARM_BUILTIN_WSRLD);
11802 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSRLHI);
11803 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlwi", v2si_ftype_v2si_int, ARM_BUILTIN_WSRLWI);
11804 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrldi", di_ftype_di_int, ARM_BUILTIN_WSRLDI);
11806 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrah", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSRAH);
11807 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsraw", v2si_ftype_v2si_di, ARM_BUILTIN_WSRAW);
11808 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrad", di_ftype_di_di, ARM_BUILTIN_WSRAD);
11809 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrahi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSRAHI);
11810 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrawi", v2si_ftype_v2si_int, ARM_BUILTIN_WSRAWI);
11811 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsradi", di_ftype_di_int, ARM_BUILTIN_WSRADI);
11813 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WRORH);
11814 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorw", v2si_ftype_v2si_di, ARM_BUILTIN_WRORW);
11815 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrord", di_ftype_di_di, ARM_BUILTIN_WRORD);
11816 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WRORHI);
11817 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorwi", v2si_ftype_v2si_int, ARM_BUILTIN_WRORWI);
11818 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrordi", di_ftype_di_int, ARM_BUILTIN_WRORDI);
11820 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wshufh", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSHUFH);
11822 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadb", v2si_ftype_v8qi_v8qi, ARM_BUILTIN_WSADB);
11823 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadh", v2si_ftype_v4hi_v4hi, ARM_BUILTIN_WSADH);
11824 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadbz", v2si_ftype_v8qi_v8qi, ARM_BUILTIN_WSADBZ);
11825 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadhz", v2si_ftype_v4hi_v4hi, ARM_BUILTIN_WSADHZ);
11827 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsb", int_ftype_v8qi_int, ARM_BUILTIN_TEXTRMSB);
11828 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsh", int_ftype_v4hi_int, ARM_BUILTIN_TEXTRMSH);
11829 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsw", int_ftype_v2si_int, ARM_BUILTIN_TEXTRMSW);
11830 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmub", int_ftype_v8qi_int, ARM_BUILTIN_TEXTRMUB);
11831 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmuh", int_ftype_v4hi_int, ARM_BUILTIN_TEXTRMUH);
11832 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmuw", int_ftype_v2si_int, ARM_BUILTIN_TEXTRMUW);
11833 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrb", v8qi_ftype_v8qi_int_int, ARM_BUILTIN_TINSRB);
11834 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrh", v4hi_ftype_v4hi_int_int, ARM_BUILTIN_TINSRH);
11835 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrw", v2si_ftype_v2si_int_int, ARM_BUILTIN_TINSRW);
11837 def_mbuiltin (FL_IWMMXT, "__builtin_arm_waccb", di_ftype_v8qi, ARM_BUILTIN_WACCB);
11838 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wacch", di_ftype_v4hi, ARM_BUILTIN_WACCH);
11839 def_mbuiltin (FL_IWMMXT, "__builtin_arm_waccw", di_ftype_v2si, ARM_BUILTIN_WACCW);
11841 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskb", int_ftype_v8qi, ARM_BUILTIN_TMOVMSKB);
11842 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskh", int_ftype_v4hi, ARM_BUILTIN_TMOVMSKH);
11843 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskw", int_ftype_v2si, ARM_BUILTIN_TMOVMSKW);
11845 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackhss", v8qi_ftype_v4hi_v4hi, ARM_BUILTIN_WPACKHSS);
11846 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackhus", v8qi_ftype_v4hi_v4hi, ARM_BUILTIN_WPACKHUS);
11847 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackwus", v4hi_ftype_v2si_v2si, ARM_BUILTIN_WPACKWUS);
11848 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackwss", v4hi_ftype_v2si_v2si, ARM_BUILTIN_WPACKWSS);
11849 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackdus", v2si_ftype_di_di, ARM_BUILTIN_WPACKDUS);
11850 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackdss", v2si_ftype_di_di, ARM_BUILTIN_WPACKDSS);
11852 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehub", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKEHUB);
11853 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehuh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKEHUH);
11854 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehuw", di_ftype_v2si, ARM_BUILTIN_WUNPCKEHUW);
11855 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsb", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKEHSB);
11856 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKEHSH);
11857 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsw", di_ftype_v2si, ARM_BUILTIN_WUNPCKEHSW);
11858 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelub", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKELUB);
11859 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckeluh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKELUH);
11860 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckeluw", di_ftype_v2si, ARM_BUILTIN_WUNPCKELUW);
11861 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsb", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKELSB);
11862 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKELSH);
11863 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsw", di_ftype_v2si, ARM_BUILTIN_WUNPCKELSW);
11865 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacs", di_ftype_di_v4hi_v4hi, ARM_BUILTIN_WMACS);
11866 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacsz", di_ftype_v4hi_v4hi, ARM_BUILTIN_WMACSZ);
11867 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacu", di_ftype_di_v4hi_v4hi, ARM_BUILTIN_WMACU);
11868 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacuz", di_ftype_v4hi_v4hi, ARM_BUILTIN_WMACUZ);
11870 def_mbuiltin (FL_IWMMXT, "__builtin_arm_walign", v8qi_ftype_v8qi_v8qi_int, ARM_BUILTIN_WALIGN);
11871 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmia", di_ftype_di_int_int, ARM_BUILTIN_TMIA);
11872 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiaph", di_ftype_di_int_int, ARM_BUILTIN_TMIAPH);
11873 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiabb", di_ftype_di_int_int, ARM_BUILTIN_TMIABB);
11874 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiabt", di_ftype_di_int_int, ARM_BUILTIN_TMIABT);
11875 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiatb", di_ftype_di_int_int, ARM_BUILTIN_TMIATB);
11876 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiatt", di_ftype_di_int_int, ARM_BUILTIN_TMIATT);
11879 static void
11880 arm_init_builtins (void)
11882 if (TARGET_REALLY_IWMMXT)
11883 arm_init_iwmmxt_builtins ();
11886 /* Errors in the source file can cause expand_expr to return const0_rtx
11887 where we expect a vector. To avoid crashing, use one of the vector
11888 clear instructions. */
11890 static rtx
11891 safe_vector_operand (rtx x, enum machine_mode mode)
11893 if (x != const0_rtx)
11894 return x;
11895 x = gen_reg_rtx (mode);
11897 emit_insn (gen_iwmmxt_clrdi (mode == DImode ? x
11898 : gen_rtx_SUBREG (DImode, x, 0)));
11899 return x;
11902 /* Subroutine of arm_expand_builtin to take care of binop insns. */
11904 static rtx
11905 arm_expand_binop_builtin (enum insn_code icode,
11906 tree arglist, rtx target)
11908 rtx pat;
11909 tree arg0 = TREE_VALUE (arglist);
11910 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
11911 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
11912 rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
11913 enum machine_mode tmode = insn_data[icode].operand[0].mode;
11914 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
11915 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
11917 if (VECTOR_MODE_P (mode0))
11918 op0 = safe_vector_operand (op0, mode0);
11919 if (VECTOR_MODE_P (mode1))
11920 op1 = safe_vector_operand (op1, mode1);
11922 if (! target
11923 || GET_MODE (target) != tmode
11924 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
11925 target = gen_reg_rtx (tmode);
11927 /* In case the insn wants input operands in modes different from
11928 the result, abort. */
11929 if (GET_MODE (op0) != mode0 || GET_MODE (op1) != mode1)
11930 abort ();
11932 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
11933 op0 = copy_to_mode_reg (mode0, op0);
11934 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
11935 op1 = copy_to_mode_reg (mode1, op1);
11937 pat = GEN_FCN (icode) (target, op0, op1);
11938 if (! pat)
11939 return 0;
11940 emit_insn (pat);
11941 return target;
11944 /* Subroutine of arm_expand_builtin to take care of unop insns. */
11946 static rtx
11947 arm_expand_unop_builtin (enum insn_code icode,
11948 tree arglist, rtx target, int do_load)
11950 rtx pat;
11951 tree arg0 = TREE_VALUE (arglist);
11952 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
11953 enum machine_mode tmode = insn_data[icode].operand[0].mode;
11954 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
11956 if (! target
11957 || GET_MODE (target) != tmode
11958 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
11959 target = gen_reg_rtx (tmode);
11960 if (do_load)
11961 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
11962 else
11964 if (VECTOR_MODE_P (mode0))
11965 op0 = safe_vector_operand (op0, mode0);
11967 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
11968 op0 = copy_to_mode_reg (mode0, op0);
11971 pat = GEN_FCN (icode) (target, op0);
11972 if (! pat)
11973 return 0;
11974 emit_insn (pat);
11975 return target;
11978 /* Expand an expression EXP that calls a built-in function,
11979 with result going to TARGET if that's convenient
11980 (and in mode MODE if that's convenient).
11981 SUBTARGET may be used as the target for computing one of EXP's operands.
11982 IGNORE is nonzero if the value is to be ignored. */
11984 static rtx
11985 arm_expand_builtin (tree exp,
11986 rtx target,
11987 rtx subtarget ATTRIBUTE_UNUSED,
11988 enum machine_mode mode ATTRIBUTE_UNUSED,
11989 int ignore ATTRIBUTE_UNUSED)
11991 const struct builtin_description * d;
11992 enum insn_code icode;
11993 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
11994 tree arglist = TREE_OPERAND (exp, 1);
11995 tree arg0;
11996 tree arg1;
11997 tree arg2;
11998 rtx op0;
11999 rtx op1;
12000 rtx op2;
12001 rtx pat;
12002 int fcode = DECL_FUNCTION_CODE (fndecl);
12003 size_t i;
12004 enum machine_mode tmode;
12005 enum machine_mode mode0;
12006 enum machine_mode mode1;
12007 enum machine_mode mode2;
12009 switch (fcode)
12011 case ARM_BUILTIN_TEXTRMSB:
12012 case ARM_BUILTIN_TEXTRMUB:
12013 case ARM_BUILTIN_TEXTRMSH:
12014 case ARM_BUILTIN_TEXTRMUH:
12015 case ARM_BUILTIN_TEXTRMSW:
12016 case ARM_BUILTIN_TEXTRMUW:
12017 icode = (fcode == ARM_BUILTIN_TEXTRMSB ? CODE_FOR_iwmmxt_textrmsb
12018 : fcode == ARM_BUILTIN_TEXTRMUB ? CODE_FOR_iwmmxt_textrmub
12019 : fcode == ARM_BUILTIN_TEXTRMSH ? CODE_FOR_iwmmxt_textrmsh
12020 : fcode == ARM_BUILTIN_TEXTRMUH ? CODE_FOR_iwmmxt_textrmuh
12021 : CODE_FOR_iwmmxt_textrmw);
12023 arg0 = TREE_VALUE (arglist);
12024 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12025 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12026 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12027 tmode = insn_data[icode].operand[0].mode;
12028 mode0 = insn_data[icode].operand[1].mode;
12029 mode1 = insn_data[icode].operand[2].mode;
12031 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12032 op0 = copy_to_mode_reg (mode0, op0);
12033 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12035 /* @@@ better error message */
12036 error ("selector must be an immediate");
12037 return gen_reg_rtx (tmode);
12039 if (target == 0
12040 || GET_MODE (target) != tmode
12041 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12042 target = gen_reg_rtx (tmode);
12043 pat = GEN_FCN (icode) (target, op0, op1);
12044 if (! pat)
12045 return 0;
12046 emit_insn (pat);
12047 return target;
12049 case ARM_BUILTIN_TINSRB:
12050 case ARM_BUILTIN_TINSRH:
12051 case ARM_BUILTIN_TINSRW:
12052 icode = (fcode == ARM_BUILTIN_TINSRB ? CODE_FOR_iwmmxt_tinsrb
12053 : fcode == ARM_BUILTIN_TINSRH ? CODE_FOR_iwmmxt_tinsrh
12054 : CODE_FOR_iwmmxt_tinsrw);
12055 arg0 = TREE_VALUE (arglist);
12056 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12057 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
12058 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12059 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12060 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
12061 tmode = insn_data[icode].operand[0].mode;
12062 mode0 = insn_data[icode].operand[1].mode;
12063 mode1 = insn_data[icode].operand[2].mode;
12064 mode2 = insn_data[icode].operand[3].mode;
12066 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12067 op0 = copy_to_mode_reg (mode0, op0);
12068 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12069 op1 = copy_to_mode_reg (mode1, op1);
12070 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
12072 /* @@@ better error message */
12073 error ("selector must be an immediate");
12074 return const0_rtx;
12076 if (target == 0
12077 || GET_MODE (target) != tmode
12078 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12079 target = gen_reg_rtx (tmode);
12080 pat = GEN_FCN (icode) (target, op0, op1, op2);
12081 if (! pat)
12082 return 0;
12083 emit_insn (pat);
12084 return target;
12086 case ARM_BUILTIN_SETWCX:
12087 arg0 = TREE_VALUE (arglist);
12088 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12089 op0 = force_reg (SImode, expand_expr (arg0, NULL_RTX, VOIDmode, 0));
12090 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12091 emit_insn (gen_iwmmxt_tmcr (op1, op0));
12092 return 0;
12094 case ARM_BUILTIN_GETWCX:
12095 arg0 = TREE_VALUE (arglist);
12096 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12097 target = gen_reg_rtx (SImode);
12098 emit_insn (gen_iwmmxt_tmrc (target, op0));
12099 return target;
12101 case ARM_BUILTIN_WSHUFH:
12102 icode = CODE_FOR_iwmmxt_wshufh;
12103 arg0 = TREE_VALUE (arglist);
12104 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12105 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12106 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12107 tmode = insn_data[icode].operand[0].mode;
12108 mode1 = insn_data[icode].operand[1].mode;
12109 mode2 = insn_data[icode].operand[2].mode;
12111 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
12112 op0 = copy_to_mode_reg (mode1, op0);
12113 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
12115 /* @@@ better error message */
12116 error ("mask must be an immediate");
12117 return const0_rtx;
12119 if (target == 0
12120 || GET_MODE (target) != tmode
12121 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12122 target = gen_reg_rtx (tmode);
12123 pat = GEN_FCN (icode) (target, op0, op1);
12124 if (! pat)
12125 return 0;
12126 emit_insn (pat);
12127 return target;
12129 case ARM_BUILTIN_WSADB:
12130 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadb, arglist, target);
12131 case ARM_BUILTIN_WSADH:
12132 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadh, arglist, target);
12133 case ARM_BUILTIN_WSADBZ:
12134 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadbz, arglist, target);
12135 case ARM_BUILTIN_WSADHZ:
12136 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadhz, arglist, target);
12138 /* Several three-argument builtins. */
12139 case ARM_BUILTIN_WMACS:
12140 case ARM_BUILTIN_WMACU:
12141 case ARM_BUILTIN_WALIGN:
12142 case ARM_BUILTIN_TMIA:
12143 case ARM_BUILTIN_TMIAPH:
12144 case ARM_BUILTIN_TMIATT:
12145 case ARM_BUILTIN_TMIATB:
12146 case ARM_BUILTIN_TMIABT:
12147 case ARM_BUILTIN_TMIABB:
12148 icode = (fcode == ARM_BUILTIN_WMACS ? CODE_FOR_iwmmxt_wmacs
12149 : fcode == ARM_BUILTIN_WMACU ? CODE_FOR_iwmmxt_wmacu
12150 : fcode == ARM_BUILTIN_TMIA ? CODE_FOR_iwmmxt_tmia
12151 : fcode == ARM_BUILTIN_TMIAPH ? CODE_FOR_iwmmxt_tmiaph
12152 : fcode == ARM_BUILTIN_TMIABB ? CODE_FOR_iwmmxt_tmiabb
12153 : fcode == ARM_BUILTIN_TMIABT ? CODE_FOR_iwmmxt_tmiabt
12154 : fcode == ARM_BUILTIN_TMIATB ? CODE_FOR_iwmmxt_tmiatb
12155 : fcode == ARM_BUILTIN_TMIATT ? CODE_FOR_iwmmxt_tmiatt
12156 : CODE_FOR_iwmmxt_walign);
12157 arg0 = TREE_VALUE (arglist);
12158 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12159 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
12160 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12161 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12162 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
12163 tmode = insn_data[icode].operand[0].mode;
12164 mode0 = insn_data[icode].operand[1].mode;
12165 mode1 = insn_data[icode].operand[2].mode;
12166 mode2 = insn_data[icode].operand[3].mode;
12168 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12169 op0 = copy_to_mode_reg (mode0, op0);
12170 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12171 op1 = copy_to_mode_reg (mode1, op1);
12172 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
12173 op2 = copy_to_mode_reg (mode2, op2);
12174 if (target == 0
12175 || GET_MODE (target) != tmode
12176 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12177 target = gen_reg_rtx (tmode);
12178 pat = GEN_FCN (icode) (target, op0, op1, op2);
12179 if (! pat)
12180 return 0;
12181 emit_insn (pat);
12182 return target;
12184 case ARM_BUILTIN_WZERO:
12185 target = gen_reg_rtx (DImode);
12186 emit_insn (gen_iwmmxt_clrdi (target));
12187 return target;
12189 default:
12190 break;
12193 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
12194 if (d->code == (const enum arm_builtins) fcode)
12195 return arm_expand_binop_builtin (d->icode, arglist, target);
12197 for (i = 0, d = bdesc_1arg; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
12198 if (d->code == (const enum arm_builtins) fcode)
12199 return arm_expand_unop_builtin (d->icode, arglist, target, 0);
12201 /* @@@ Should really do something sensible here. */
12202 return NULL_RTX;
12205 /* Return the number (counting from 0) of
12206 the least significant set bit in MASK. */
12208 inline static int
12209 number_of_first_bit_set (unsigned mask)
12211 int bit;
12213 for (bit = 0;
12214 (mask & (1 << bit)) == 0;
12215 ++bit)
12216 continue;
12218 return bit;
12221 /* Emit code to push or pop registers to or from the stack. F is the
12222 assembly file. MASK is the registers to push or pop. PUSH is
12223 nonzero if we should push, and zero if we should pop. For debugging
12224 output, if pushing, adjust CFA_OFFSET by the amount of space added
12225 to the stack. REAL_REGS should have the same number of bits set as
12226 MASK, and will be used instead (in the same order) to describe which
12227 registers were saved - this is used to mark the save slots when we
12228 push high registers after moving them to low registers. */
12229 static void
12230 thumb_pushpop (FILE *f, unsigned long mask, int push, int *cfa_offset,
12231 unsigned long real_regs)
12233 int regno;
12234 int lo_mask = mask & 0xFF;
12235 int pushed_words = 0;
12237 if (mask == 0)
12238 abort ();
12240 if (lo_mask == 0 && !push && (mask & (1 << PC_REGNUM)))
12242 /* Special case. Do not generate a POP PC statement here, do it in
12243 thumb_exit() */
12244 thumb_exit (f, -1);
12245 return;
12248 fprintf (f, "\t%s\t{", push ? "push" : "pop");
12250 /* Look at the low registers first. */
12251 for (regno = 0; regno <= LAST_LO_REGNUM; regno++, lo_mask >>= 1)
12253 if (lo_mask & 1)
12255 asm_fprintf (f, "%r", regno);
12257 if ((lo_mask & ~1) != 0)
12258 fprintf (f, ", ");
12260 pushed_words++;
12264 if (push && (mask & (1 << LR_REGNUM)))
12266 /* Catch pushing the LR. */
12267 if (mask & 0xFF)
12268 fprintf (f, ", ");
12270 asm_fprintf (f, "%r", LR_REGNUM);
12272 pushed_words++;
12274 else if (!push && (mask & (1 << PC_REGNUM)))
12276 /* Catch popping the PC. */
12277 if (TARGET_INTERWORK || TARGET_BACKTRACE
12278 || current_function_calls_eh_return)
12280 /* The PC is never poped directly, instead
12281 it is popped into r3 and then BX is used. */
12282 fprintf (f, "}\n");
12284 thumb_exit (f, -1);
12286 return;
12288 else
12290 if (mask & 0xFF)
12291 fprintf (f, ", ");
12293 asm_fprintf (f, "%r", PC_REGNUM);
12297 fprintf (f, "}\n");
12299 if (push && pushed_words && dwarf2out_do_frame ())
12301 char *l = dwarf2out_cfi_label ();
12302 int pushed_mask = real_regs;
12304 *cfa_offset += pushed_words * 4;
12305 dwarf2out_def_cfa (l, SP_REGNUM, *cfa_offset);
12307 pushed_words = 0;
12308 pushed_mask = real_regs;
12309 for (regno = 0; regno <= 14; regno++, pushed_mask >>= 1)
12311 if (pushed_mask & 1)
12312 dwarf2out_reg_save (l, regno, 4 * pushed_words++ - *cfa_offset);
12317 /* Generate code to return from a thumb function.
12318 If 'reg_containing_return_addr' is -1, then the return address is
12319 actually on the stack, at the stack pointer. */
12320 static void
12321 thumb_exit (FILE *f, int reg_containing_return_addr)
12323 unsigned regs_available_for_popping;
12324 unsigned regs_to_pop;
12325 int pops_needed;
12326 unsigned available;
12327 unsigned required;
12328 int mode;
12329 int size;
12330 int restore_a4 = FALSE;
12332 /* Compute the registers we need to pop. */
12333 regs_to_pop = 0;
12334 pops_needed = 0;
12336 if (reg_containing_return_addr == -1)
12338 regs_to_pop |= 1 << LR_REGNUM;
12339 ++pops_needed;
12342 if (TARGET_BACKTRACE)
12344 /* Restore the (ARM) frame pointer and stack pointer. */
12345 regs_to_pop |= (1 << ARM_HARD_FRAME_POINTER_REGNUM) | (1 << SP_REGNUM);
12346 pops_needed += 2;
12349 /* If there is nothing to pop then just emit the BX instruction and
12350 return. */
12351 if (pops_needed == 0)
12353 if (current_function_calls_eh_return)
12354 asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, ARM_EH_STACKADJ_REGNUM);
12356 asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
12357 return;
12359 /* Otherwise if we are not supporting interworking and we have not created
12360 a backtrace structure and the function was not entered in ARM mode then
12361 just pop the return address straight into the PC. */
12362 else if (!TARGET_INTERWORK
12363 && !TARGET_BACKTRACE
12364 && !is_called_in_ARM_mode (current_function_decl)
12365 && !current_function_calls_eh_return)
12367 asm_fprintf (f, "\tpop\t{%r}\n", PC_REGNUM);
12368 return;
12371 /* Find out how many of the (return) argument registers we can corrupt. */
12372 regs_available_for_popping = 0;
12374 /* If returning via __builtin_eh_return, the bottom three registers
12375 all contain information needed for the return. */
12376 if (current_function_calls_eh_return)
12377 size = 12;
12378 else
12380 /* If we can deduce the registers used from the function's
12381 return value. This is more reliable that examining
12382 regs_ever_live[] because that will be set if the register is
12383 ever used in the function, not just if the register is used
12384 to hold a return value. */
12386 if (current_function_return_rtx != 0)
12387 mode = GET_MODE (current_function_return_rtx);
12388 else
12389 mode = DECL_MODE (DECL_RESULT (current_function_decl));
12391 size = GET_MODE_SIZE (mode);
12393 if (size == 0)
12395 /* In a void function we can use any argument register.
12396 In a function that returns a structure on the stack
12397 we can use the second and third argument registers. */
12398 if (mode == VOIDmode)
12399 regs_available_for_popping =
12400 (1 << ARG_REGISTER (1))
12401 | (1 << ARG_REGISTER (2))
12402 | (1 << ARG_REGISTER (3));
12403 else
12404 regs_available_for_popping =
12405 (1 << ARG_REGISTER (2))
12406 | (1 << ARG_REGISTER (3));
12408 else if (size <= 4)
12409 regs_available_for_popping =
12410 (1 << ARG_REGISTER (2))
12411 | (1 << ARG_REGISTER (3));
12412 else if (size <= 8)
12413 regs_available_for_popping =
12414 (1 << ARG_REGISTER (3));
12417 /* Match registers to be popped with registers into which we pop them. */
12418 for (available = regs_available_for_popping,
12419 required = regs_to_pop;
12420 required != 0 && available != 0;
12421 available &= ~(available & - available),
12422 required &= ~(required & - required))
12423 -- pops_needed;
12425 /* If we have any popping registers left over, remove them. */
12426 if (available > 0)
12427 regs_available_for_popping &= ~available;
12429 /* Otherwise if we need another popping register we can use
12430 the fourth argument register. */
12431 else if (pops_needed)
12433 /* If we have not found any free argument registers and
12434 reg a4 contains the return address, we must move it. */
12435 if (regs_available_for_popping == 0
12436 && reg_containing_return_addr == LAST_ARG_REGNUM)
12438 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM, LAST_ARG_REGNUM);
12439 reg_containing_return_addr = LR_REGNUM;
12441 else if (size > 12)
12443 /* Register a4 is being used to hold part of the return value,
12444 but we have dire need of a free, low register. */
12445 restore_a4 = TRUE;
12447 asm_fprintf (f, "\tmov\t%r, %r\n",IP_REGNUM, LAST_ARG_REGNUM);
12450 if (reg_containing_return_addr != LAST_ARG_REGNUM)
12452 /* The fourth argument register is available. */
12453 regs_available_for_popping |= 1 << LAST_ARG_REGNUM;
12455 --pops_needed;
12459 /* Pop as many registers as we can. */
12460 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
12461 regs_available_for_popping);
12463 /* Process the registers we popped. */
12464 if (reg_containing_return_addr == -1)
12466 /* The return address was popped into the lowest numbered register. */
12467 regs_to_pop &= ~(1 << LR_REGNUM);
12469 reg_containing_return_addr =
12470 number_of_first_bit_set (regs_available_for_popping);
12472 /* Remove this register for the mask of available registers, so that
12473 the return address will not be corrupted by further pops. */
12474 regs_available_for_popping &= ~(1 << reg_containing_return_addr);
12477 /* If we popped other registers then handle them here. */
12478 if (regs_available_for_popping)
12480 int frame_pointer;
12482 /* Work out which register currently contains the frame pointer. */
12483 frame_pointer = number_of_first_bit_set (regs_available_for_popping);
12485 /* Move it into the correct place. */
12486 asm_fprintf (f, "\tmov\t%r, %r\n",
12487 ARM_HARD_FRAME_POINTER_REGNUM, frame_pointer);
12489 /* (Temporarily) remove it from the mask of popped registers. */
12490 regs_available_for_popping &= ~(1 << frame_pointer);
12491 regs_to_pop &= ~(1 << ARM_HARD_FRAME_POINTER_REGNUM);
12493 if (regs_available_for_popping)
12495 int stack_pointer;
12497 /* We popped the stack pointer as well,
12498 find the register that contains it. */
12499 stack_pointer = number_of_first_bit_set (regs_available_for_popping);
12501 /* Move it into the stack register. */
12502 asm_fprintf (f, "\tmov\t%r, %r\n", SP_REGNUM, stack_pointer);
12504 /* At this point we have popped all necessary registers, so
12505 do not worry about restoring regs_available_for_popping
12506 to its correct value:
12508 assert (pops_needed == 0)
12509 assert (regs_available_for_popping == (1 << frame_pointer))
12510 assert (regs_to_pop == (1 << STACK_POINTER)) */
12512 else
12514 /* Since we have just move the popped value into the frame
12515 pointer, the popping register is available for reuse, and
12516 we know that we still have the stack pointer left to pop. */
12517 regs_available_for_popping |= (1 << frame_pointer);
12521 /* If we still have registers left on the stack, but we no longer have
12522 any registers into which we can pop them, then we must move the return
12523 address into the link register and make available the register that
12524 contained it. */
12525 if (regs_available_for_popping == 0 && pops_needed > 0)
12527 regs_available_for_popping |= 1 << reg_containing_return_addr;
12529 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM,
12530 reg_containing_return_addr);
12532 reg_containing_return_addr = LR_REGNUM;
12535 /* If we have registers left on the stack then pop some more.
12536 We know that at most we will want to pop FP and SP. */
12537 if (pops_needed > 0)
12539 int popped_into;
12540 int move_to;
12542 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
12543 regs_available_for_popping);
12545 /* We have popped either FP or SP.
12546 Move whichever one it is into the correct register. */
12547 popped_into = number_of_first_bit_set (regs_available_for_popping);
12548 move_to = number_of_first_bit_set (regs_to_pop);
12550 asm_fprintf (f, "\tmov\t%r, %r\n", move_to, popped_into);
12552 regs_to_pop &= ~(1 << move_to);
12554 --pops_needed;
12557 /* If we still have not popped everything then we must have only
12558 had one register available to us and we are now popping the SP. */
12559 if (pops_needed > 0)
12561 int popped_into;
12563 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
12564 regs_available_for_popping);
12566 popped_into = number_of_first_bit_set (regs_available_for_popping);
12568 asm_fprintf (f, "\tmov\t%r, %r\n", SP_REGNUM, popped_into);
12570 assert (regs_to_pop == (1 << STACK_POINTER))
12571 assert (pops_needed == 1)
12575 /* If necessary restore the a4 register. */
12576 if (restore_a4)
12578 if (reg_containing_return_addr != LR_REGNUM)
12580 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM, LAST_ARG_REGNUM);
12581 reg_containing_return_addr = LR_REGNUM;
12584 asm_fprintf (f, "\tmov\t%r, %r\n", LAST_ARG_REGNUM, IP_REGNUM);
12587 if (current_function_calls_eh_return)
12588 asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, ARM_EH_STACKADJ_REGNUM);
12590 /* Return to caller. */
12591 asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
12595 void
12596 thumb_final_prescan_insn (rtx insn)
12598 if (flag_print_asm_name)
12599 asm_fprintf (asm_out_file, "%@ 0x%04x\n",
12600 INSN_ADDRESSES (INSN_UID (insn)));
12604 thumb_shiftable_const (unsigned HOST_WIDE_INT val)
12606 unsigned HOST_WIDE_INT mask = 0xff;
12607 int i;
12609 if (val == 0) /* XXX */
12610 return 0;
12612 for (i = 0; i < 25; i++)
12613 if ((val & (mask << i)) == val)
12614 return 1;
12616 return 0;
12619 /* Returns nonzero if the current function contains,
12620 or might contain a far jump. */
12621 static int
12622 thumb_far_jump_used_p (void)
12624 rtx insn;
12626 /* This test is only important for leaf functions. */
12627 /* assert (!leaf_function_p ()); */
12629 /* If we have already decided that far jumps may be used,
12630 do not bother checking again, and always return true even if
12631 it turns out that they are not being used. Once we have made
12632 the decision that far jumps are present (and that hence the link
12633 register will be pushed onto the stack) we cannot go back on it. */
12634 if (cfun->machine->far_jump_used)
12635 return 1;
12637 /* If this function is not being called from the prologue/epilogue
12638 generation code then it must be being called from the
12639 INITIAL_ELIMINATION_OFFSET macro. */
12640 if (!(ARM_DOUBLEWORD_ALIGN || reload_completed))
12642 /* In this case we know that we are being asked about the elimination
12643 of the arg pointer register. If that register is not being used,
12644 then there are no arguments on the stack, and we do not have to
12645 worry that a far jump might force the prologue to push the link
12646 register, changing the stack offsets. In this case we can just
12647 return false, since the presence of far jumps in the function will
12648 not affect stack offsets.
12650 If the arg pointer is live (or if it was live, but has now been
12651 eliminated and so set to dead) then we do have to test to see if
12652 the function might contain a far jump. This test can lead to some
12653 false negatives, since before reload is completed, then length of
12654 branch instructions is not known, so gcc defaults to returning their
12655 longest length, which in turn sets the far jump attribute to true.
12657 A false negative will not result in bad code being generated, but it
12658 will result in a needless push and pop of the link register. We
12659 hope that this does not occur too often.
12661 If we need doubleword stack alignment this could affect the other
12662 elimination offsets so we can't risk getting it wrong. */
12663 if (regs_ever_live [ARG_POINTER_REGNUM])
12664 cfun->machine->arg_pointer_live = 1;
12665 else if (!cfun->machine->arg_pointer_live)
12666 return 0;
12669 /* Check to see if the function contains a branch
12670 insn with the far jump attribute set. */
12671 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
12673 if (GET_CODE (insn) == JUMP_INSN
12674 /* Ignore tablejump patterns. */
12675 && GET_CODE (PATTERN (insn)) != ADDR_VEC
12676 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC
12677 && get_attr_far_jump (insn) == FAR_JUMP_YES
12680 /* Record the fact that we have decided that
12681 the function does use far jumps. */
12682 cfun->machine->far_jump_used = 1;
12683 return 1;
12687 return 0;
12690 /* Return nonzero if FUNC must be entered in ARM mode. */
12692 is_called_in_ARM_mode (tree func)
12694 if (TREE_CODE (func) != FUNCTION_DECL)
12695 abort ();
12697 /* Ignore the problem about functions whose address is taken. */
12698 if (TARGET_CALLEE_INTERWORKING && TREE_PUBLIC (func))
12699 return TRUE;
12701 #ifdef ARM_PE
12702 return lookup_attribute ("interfacearm", DECL_ATTRIBUTES (func)) != NULL_TREE;
12703 #else
12704 return FALSE;
12705 #endif
12708 /* The bits which aren't usefully expanded as rtl. */
12709 const char *
12710 thumb_unexpanded_epilogue (void)
12712 int regno;
12713 unsigned long live_regs_mask = 0;
12714 int high_regs_pushed = 0;
12715 int had_to_push_lr;
12716 int size;
12717 int mode;
12719 if (return_used_this_function)
12720 return "";
12722 if (IS_NAKED (arm_current_func_type ()))
12723 return "";
12725 live_regs_mask = thumb_compute_save_reg_mask ();
12726 high_regs_pushed = bit_count (live_regs_mask & 0x0f00);
12728 /* If we can deduce the registers used from the function's return value.
12729 This is more reliable that examining regs_ever_live[] because that
12730 will be set if the register is ever used in the function, not just if
12731 the register is used to hold a return value. */
12733 if (current_function_return_rtx != 0)
12734 mode = GET_MODE (current_function_return_rtx);
12735 else
12736 mode = DECL_MODE (DECL_RESULT (current_function_decl));
12738 size = GET_MODE_SIZE (mode);
12740 /* The prolog may have pushed some high registers to use as
12741 work registers. e.g. the testsuite file:
12742 gcc/testsuite/gcc/gcc.c-torture/execute/complex-2.c
12743 compiles to produce:
12744 push {r4, r5, r6, r7, lr}
12745 mov r7, r9
12746 mov r6, r8
12747 push {r6, r7}
12748 as part of the prolog. We have to undo that pushing here. */
12750 if (high_regs_pushed)
12752 unsigned long mask = live_regs_mask & 0xff;
12753 int next_hi_reg;
12755 /* The available low registers depend on the size of the value we are
12756 returning. */
12757 if (size <= 12)
12758 mask |= 1 << 3;
12759 if (size <= 8)
12760 mask |= 1 << 2;
12762 if (mask == 0)
12763 /* Oh dear! We have no low registers into which we can pop
12764 high registers! */
12765 internal_error
12766 ("no low registers available for popping high registers");
12768 for (next_hi_reg = 8; next_hi_reg < 13; next_hi_reg++)
12769 if (live_regs_mask & (1 << next_hi_reg))
12770 break;
12772 while (high_regs_pushed)
12774 /* Find lo register(s) into which the high register(s) can
12775 be popped. */
12776 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
12778 if (mask & (1 << regno))
12779 high_regs_pushed--;
12780 if (high_regs_pushed == 0)
12781 break;
12784 mask &= (2 << regno) - 1; /* A noop if regno == 8 */
12786 /* Pop the values into the low register(s). */
12787 thumb_pushpop (asm_out_file, mask, 0, NULL, mask);
12789 /* Move the value(s) into the high registers. */
12790 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
12792 if (mask & (1 << regno))
12794 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", next_hi_reg,
12795 regno);
12797 for (next_hi_reg++; next_hi_reg < 13; next_hi_reg++)
12798 if (live_regs_mask & (1 << next_hi_reg))
12799 break;
12803 live_regs_mask &= ~0x0f00;
12806 had_to_push_lr = (live_regs_mask & (1 << LR_REGNUM)) != 0;
12807 live_regs_mask &= 0xff;
12809 if (current_function_pretend_args_size == 0 || TARGET_BACKTRACE)
12811 /* Pop the return address into the PC. */
12812 if (had_to_push_lr)
12813 live_regs_mask |= 1 << PC_REGNUM;
12815 /* Either no argument registers were pushed or a backtrace
12816 structure was created which includes an adjusted stack
12817 pointer, so just pop everything. */
12818 if (live_regs_mask)
12819 thumb_pushpop (asm_out_file, live_regs_mask, FALSE, NULL,
12820 live_regs_mask);
12822 /* We have either just popped the return address into the
12823 PC or it is was kept in LR for the entire function. */
12824 if (!had_to_push_lr)
12825 thumb_exit (asm_out_file, LR_REGNUM);
12827 else
12829 /* Pop everything but the return address. */
12830 if (live_regs_mask)
12831 thumb_pushpop (asm_out_file, live_regs_mask, FALSE, NULL,
12832 live_regs_mask);
12834 if (had_to_push_lr)
12836 if (size > 12)
12838 /* We have no free low regs, so save one. */
12839 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", IP_REGNUM,
12840 LAST_ARG_REGNUM);
12843 /* Get the return address into a temporary register. */
12844 thumb_pushpop (asm_out_file, 1 << LAST_ARG_REGNUM, 0, NULL,
12845 1 << LAST_ARG_REGNUM);
12847 if (size > 12)
12849 /* Move the return address to lr. */
12850 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", LR_REGNUM,
12851 LAST_ARG_REGNUM);
12852 /* Restore the low register. */
12853 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", LAST_ARG_REGNUM,
12854 IP_REGNUM);
12855 regno = LR_REGNUM;
12857 else
12858 regno = LAST_ARG_REGNUM;
12860 else
12861 regno = LR_REGNUM;
12863 /* Remove the argument registers that were pushed onto the stack. */
12864 asm_fprintf (asm_out_file, "\tadd\t%r, %r, #%d\n",
12865 SP_REGNUM, SP_REGNUM,
12866 current_function_pretend_args_size);
12868 thumb_exit (asm_out_file, regno);
12871 return "";
12874 /* Functions to save and restore machine-specific function data. */
12875 static struct machine_function *
12876 arm_init_machine_status (void)
12878 struct machine_function *machine;
12879 machine = (machine_function *) ggc_alloc_cleared (sizeof (machine_function));
12881 #if ARM_FT_UNKNOWN != 0
12882 machine->func_type = ARM_FT_UNKNOWN;
12883 #endif
12884 return machine;
12887 /* Return an RTX indicating where the return address to the
12888 calling function can be found. */
12890 arm_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
12892 if (count != 0)
12893 return NULL_RTX;
12895 return get_hard_reg_initial_val (Pmode, LR_REGNUM);
12898 /* Do anything needed before RTL is emitted for each function. */
12899 void
12900 arm_init_expanders (void)
12902 /* Arrange to initialize and mark the machine per-function status. */
12903 init_machine_status = arm_init_machine_status;
12905 /* This is to stop the combine pass optimizing away the alignment
12906 adjustment of va_arg. */
12907 /* ??? It is claimed that this should not be necessary. */
12908 if (cfun)
12909 mark_reg_pointer (arg_pointer_rtx, PARM_BOUNDARY);
12913 /* Like arm_compute_initial_elimination offset. Simpler because
12914 THUMB_HARD_FRAME_POINTER isn't actually the ABI specified frame pointer. */
12916 HOST_WIDE_INT
12917 thumb_compute_initial_elimination_offset (unsigned int from, unsigned int to)
12919 arm_stack_offsets *offsets;
12921 offsets = arm_get_frame_offsets ();
12923 switch (from)
12925 case ARG_POINTER_REGNUM:
12926 switch (to)
12928 case STACK_POINTER_REGNUM:
12929 return offsets->outgoing_args - offsets->saved_args;
12931 case FRAME_POINTER_REGNUM:
12932 return offsets->soft_frame - offsets->saved_args;
12934 case THUMB_HARD_FRAME_POINTER_REGNUM:
12935 case ARM_HARD_FRAME_POINTER_REGNUM:
12936 return offsets->saved_regs - offsets->saved_args;
12938 default:
12939 abort();
12941 break;
12943 case FRAME_POINTER_REGNUM:
12944 switch (to)
12946 case STACK_POINTER_REGNUM:
12947 return offsets->outgoing_args - offsets->soft_frame;
12949 case THUMB_HARD_FRAME_POINTER_REGNUM:
12950 case ARM_HARD_FRAME_POINTER_REGNUM:
12951 return offsets->saved_regs - offsets->soft_frame;
12953 default:
12954 abort();
12956 break;
12958 default:
12959 abort ();
12964 /* Generate the rest of a function's prologue. */
12965 void
12966 thumb_expand_prologue (void)
12968 rtx insn, dwarf;
12970 HOST_WIDE_INT amount;
12971 arm_stack_offsets *offsets;
12972 unsigned long func_type;
12973 int regno;
12974 unsigned long live_regs_mask;
12976 func_type = arm_current_func_type ();
12978 /* Naked functions don't have prologues. */
12979 if (IS_NAKED (func_type))
12980 return;
12982 if (IS_INTERRUPT (func_type))
12984 error ("interrupt Service Routines cannot be coded in Thumb mode");
12985 return;
12988 live_regs_mask = thumb_compute_save_reg_mask ();
12989 /* Load the pic register before setting the frame pointer,
12990 so we can use r7 as a temporary work register. */
12991 if (flag_pic)
12992 arm_load_pic_register (thumb_find_work_register (live_regs_mask));
12994 offsets = arm_get_frame_offsets ();
12996 if (frame_pointer_needed)
12998 insn = emit_insn (gen_movsi (hard_frame_pointer_rtx,
12999 stack_pointer_rtx));
13000 RTX_FRAME_RELATED_P (insn) = 1;
13002 else if (CALLER_INTERWORKING_SLOT_SIZE > 0)
13003 emit_move_insn (gen_rtx_REG (Pmode, ARM_HARD_FRAME_POINTER_REGNUM),
13004 stack_pointer_rtx);
13006 amount = offsets->outgoing_args - offsets->saved_regs;
13007 if (amount)
13009 if (amount < 512)
13011 insn = emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
13012 GEN_INT (- amount)));
13013 RTX_FRAME_RELATED_P (insn) = 1;
13015 else
13017 rtx reg;
13019 /* The stack decrement is too big for an immediate value in a single
13020 insn. In theory we could issue multiple subtracts, but after
13021 three of them it becomes more space efficient to place the full
13022 value in the constant pool and load into a register. (Also the
13023 ARM debugger really likes to see only one stack decrement per
13024 function). So instead we look for a scratch register into which
13025 we can load the decrement, and then we subtract this from the
13026 stack pointer. Unfortunately on the thumb the only available
13027 scratch registers are the argument registers, and we cannot use
13028 these as they may hold arguments to the function. Instead we
13029 attempt to locate a call preserved register which is used by this
13030 function. If we can find one, then we know that it will have
13031 been pushed at the start of the prologue and so we can corrupt
13032 it now. */
13033 for (regno = LAST_ARG_REGNUM + 1; regno <= LAST_LO_REGNUM; regno++)
13034 if (live_regs_mask & (1 << regno)
13035 && !(frame_pointer_needed
13036 && (regno == THUMB_HARD_FRAME_POINTER_REGNUM)))
13037 break;
13039 if (regno > LAST_LO_REGNUM) /* Very unlikely. */
13041 rtx spare = gen_rtx_REG (SImode, IP_REGNUM);
13043 /* Choose an arbitrary, non-argument low register. */
13044 reg = gen_rtx_REG (SImode, LAST_LO_REGNUM);
13046 /* Save it by copying it into a high, scratch register. */
13047 emit_insn (gen_movsi (spare, reg));
13048 /* Add a USE to stop propagate_one_insn() from barfing. */
13049 emit_insn (gen_prologue_use (spare));
13051 /* Decrement the stack. */
13052 emit_insn (gen_movsi (reg, GEN_INT (- amount)));
13053 insn = emit_insn (gen_addsi3 (stack_pointer_rtx,
13054 stack_pointer_rtx, reg));
13055 RTX_FRAME_RELATED_P (insn) = 1;
13056 dwarf = gen_rtx_SET (SImode, stack_pointer_rtx,
13057 plus_constant (stack_pointer_rtx,
13058 -amount));
13059 RTX_FRAME_RELATED_P (dwarf) = 1;
13060 REG_NOTES (insn)
13061 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
13062 REG_NOTES (insn));
13064 /* Restore the low register's original value. */
13065 emit_insn (gen_movsi (reg, spare));
13067 /* Emit a USE of the restored scratch register, so that flow
13068 analysis will not consider the restore redundant. The
13069 register won't be used again in this function and isn't
13070 restored by the epilogue. */
13071 emit_insn (gen_prologue_use (reg));
13073 else
13075 reg = gen_rtx_REG (SImode, regno);
13077 emit_insn (gen_movsi (reg, GEN_INT (- amount)));
13079 insn = emit_insn (gen_addsi3 (stack_pointer_rtx,
13080 stack_pointer_rtx, reg));
13081 RTX_FRAME_RELATED_P (insn) = 1;
13082 dwarf = gen_rtx_SET (SImode, stack_pointer_rtx,
13083 plus_constant (stack_pointer_rtx,
13084 -amount));
13085 RTX_FRAME_RELATED_P (dwarf) = 1;
13086 REG_NOTES (insn)
13087 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
13088 REG_NOTES (insn));
13091 /* If the frame pointer is needed, emit a special barrier that
13092 will prevent the scheduler from moving stores to the frame
13093 before the stack adjustment. */
13094 if (frame_pointer_needed)
13095 emit_insn (gen_stack_tie (stack_pointer_rtx,
13096 hard_frame_pointer_rtx));
13099 if (current_function_profile || TARGET_NO_SCHED_PRO)
13100 emit_insn (gen_blockage ());
13102 cfun->machine->lr_save_eliminated = !thumb_force_lr_save ();
13103 if (live_regs_mask & 0xff)
13104 cfun->machine->lr_save_eliminated = 0;
13106 /* If the link register is being kept alive, with the return address in it,
13107 then make sure that it does not get reused by the ce2 pass. */
13108 if (cfun->machine->lr_save_eliminated)
13109 emit_insn (gen_prologue_use (gen_rtx_REG (SImode, LR_REGNUM)));
13113 void
13114 thumb_expand_epilogue (void)
13116 HOST_WIDE_INT amount;
13117 arm_stack_offsets *offsets;
13118 int regno;
13120 /* Naked functions don't have prologues. */
13121 if (IS_NAKED (arm_current_func_type ()))
13122 return;
13124 offsets = arm_get_frame_offsets ();
13125 amount = offsets->outgoing_args - offsets->saved_regs;
13127 if (frame_pointer_needed)
13128 emit_insn (gen_movsi (stack_pointer_rtx, hard_frame_pointer_rtx));
13129 else if (amount)
13131 if (amount < 512)
13132 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
13133 GEN_INT (amount)));
13134 else
13136 /* r3 is always free in the epilogue. */
13137 rtx reg = gen_rtx_REG (SImode, LAST_ARG_REGNUM);
13139 emit_insn (gen_movsi (reg, GEN_INT (amount)));
13140 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, reg));
13144 /* Emit a USE (stack_pointer_rtx), so that
13145 the stack adjustment will not be deleted. */
13146 emit_insn (gen_prologue_use (stack_pointer_rtx));
13148 if (current_function_profile || TARGET_NO_SCHED_PRO)
13149 emit_insn (gen_blockage ());
13151 /* Emit a clobber for each insn that will be restored in the epilogue,
13152 so that flow2 will get register lifetimes correct. */
13153 for (regno = 0; regno < 13; regno++)
13154 if (regs_ever_live[regno] && !call_used_regs[regno])
13155 emit_insn (gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, regno)));
13157 if (! regs_ever_live[LR_REGNUM])
13158 emit_insn (gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, LR_REGNUM)));
13161 static void
13162 thumb_output_function_prologue (FILE *f, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
13164 unsigned long live_regs_mask = 0;
13165 unsigned long l_mask;
13166 unsigned high_regs_pushed = 0;
13167 int cfa_offset = 0;
13168 int regno;
13170 if (IS_NAKED (arm_current_func_type ()))
13171 return;
13173 if (is_called_in_ARM_mode (current_function_decl))
13175 const char * name;
13177 if (GET_CODE (DECL_RTL (current_function_decl)) != MEM)
13178 abort ();
13179 if (GET_CODE (XEXP (DECL_RTL (current_function_decl), 0)) != SYMBOL_REF)
13180 abort ();
13181 name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
13183 /* Generate code sequence to switch us into Thumb mode. */
13184 /* The .code 32 directive has already been emitted by
13185 ASM_DECLARE_FUNCTION_NAME. */
13186 asm_fprintf (f, "\torr\t%r, %r, #1\n", IP_REGNUM, PC_REGNUM);
13187 asm_fprintf (f, "\tbx\t%r\n", IP_REGNUM);
13189 /* Generate a label, so that the debugger will notice the
13190 change in instruction sets. This label is also used by
13191 the assembler to bypass the ARM code when this function
13192 is called from a Thumb encoded function elsewhere in the
13193 same file. Hence the definition of STUB_NAME here must
13194 agree with the definition in gas/config/tc-arm.c. */
13196 #define STUB_NAME ".real_start_of"
13198 fprintf (f, "\t.code\t16\n");
13199 #ifdef ARM_PE
13200 if (arm_dllexport_name_p (name))
13201 name = arm_strip_name_encoding (name);
13202 #endif
13203 asm_fprintf (f, "\t.globl %s%U%s\n", STUB_NAME, name);
13204 fprintf (f, "\t.thumb_func\n");
13205 asm_fprintf (f, "%s%U%s:\n", STUB_NAME, name);
13208 if (current_function_pretend_args_size)
13210 if (cfun->machine->uses_anonymous_args)
13212 int num_pushes;
13214 fprintf (f, "\tpush\t{");
13216 num_pushes = ARM_NUM_INTS (current_function_pretend_args_size);
13218 for (regno = LAST_ARG_REGNUM + 1 - num_pushes;
13219 regno <= LAST_ARG_REGNUM;
13220 regno++)
13221 asm_fprintf (f, "%r%s", regno,
13222 regno == LAST_ARG_REGNUM ? "" : ", ");
13224 fprintf (f, "}\n");
13226 else
13227 asm_fprintf (f, "\tsub\t%r, %r, #%d\n",
13228 SP_REGNUM, SP_REGNUM,
13229 current_function_pretend_args_size);
13231 /* We don't need to record the stores for unwinding (would it
13232 help the debugger any if we did?), but record the change in
13233 the stack pointer. */
13234 if (dwarf2out_do_frame ())
13236 char *l = dwarf2out_cfi_label ();
13238 cfa_offset = cfa_offset + current_function_pretend_args_size;
13239 dwarf2out_def_cfa (l, SP_REGNUM, cfa_offset);
13243 /* Get the registers we are going to push. */
13244 live_regs_mask = thumb_compute_save_reg_mask ();
13245 /* Extract a mask of the ones we can give to the Thumb's push instruction. */
13246 l_mask = live_regs_mask & 0x40ff;
13247 /* Then count how many other high registers will need to be pushed. */
13248 high_regs_pushed = bit_count (live_regs_mask & 0x0f00);
13250 if (TARGET_BACKTRACE)
13252 unsigned offset;
13253 unsigned work_register;
13255 /* We have been asked to create a stack backtrace structure.
13256 The code looks like this:
13258 0 .align 2
13259 0 func:
13260 0 sub SP, #16 Reserve space for 4 registers.
13261 2 push {R7} Push low registers.
13262 4 add R7, SP, #20 Get the stack pointer before the push.
13263 6 str R7, [SP, #8] Store the stack pointer (before reserving the space).
13264 8 mov R7, PC Get hold of the start of this code plus 12.
13265 10 str R7, [SP, #16] Store it.
13266 12 mov R7, FP Get hold of the current frame pointer.
13267 14 str R7, [SP, #4] Store it.
13268 16 mov R7, LR Get hold of the current return address.
13269 18 str R7, [SP, #12] Store it.
13270 20 add R7, SP, #16 Point at the start of the backtrace structure.
13271 22 mov FP, R7 Put this value into the frame pointer. */
13273 work_register = thumb_find_work_register (live_regs_mask);
13275 asm_fprintf
13276 (f, "\tsub\t%r, %r, #16\t%@ Create stack backtrace structure\n",
13277 SP_REGNUM, SP_REGNUM);
13279 if (dwarf2out_do_frame ())
13281 char *l = dwarf2out_cfi_label ();
13283 cfa_offset = cfa_offset + 16;
13284 dwarf2out_def_cfa (l, SP_REGNUM, cfa_offset);
13287 if (l_mask)
13289 thumb_pushpop (f, l_mask, 1, &cfa_offset, l_mask);
13290 offset = bit_count (l_mask);
13292 else
13293 offset = 0;
13295 asm_fprintf (f, "\tadd\t%r, %r, #%d\n", work_register, SP_REGNUM,
13296 offset + 16 + current_function_pretend_args_size);
13298 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13299 offset + 4);
13301 /* Make sure that the instruction fetching the PC is in the right place
13302 to calculate "start of backtrace creation code + 12". */
13303 if (l_mask)
13305 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, PC_REGNUM);
13306 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13307 offset + 12);
13308 asm_fprintf (f, "\tmov\t%r, %r\n", work_register,
13309 ARM_HARD_FRAME_POINTER_REGNUM);
13310 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13311 offset);
13313 else
13315 asm_fprintf (f, "\tmov\t%r, %r\n", work_register,
13316 ARM_HARD_FRAME_POINTER_REGNUM);
13317 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13318 offset);
13319 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, PC_REGNUM);
13320 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13321 offset + 12);
13324 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, LR_REGNUM);
13325 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13326 offset + 8);
13327 asm_fprintf (f, "\tadd\t%r, %r, #%d\n", work_register, SP_REGNUM,
13328 offset + 12);
13329 asm_fprintf (f, "\tmov\t%r, %r\t\t%@ Backtrace structure created\n",
13330 ARM_HARD_FRAME_POINTER_REGNUM, work_register);
13332 /* Optimisation: If we are not pushing any low registers but we are going
13333 to push some high registers then delay our first push. This will just
13334 be a push of LR and we can combine it with the push of the first high
13335 register. */
13336 else if ((l_mask & 0xff) != 0
13337 || (high_regs_pushed == 0 && l_mask))
13338 thumb_pushpop (f, l_mask, 1, &cfa_offset, l_mask);
13340 if (high_regs_pushed)
13342 unsigned pushable_regs;
13343 unsigned next_hi_reg;
13345 for (next_hi_reg = 12; next_hi_reg > LAST_LO_REGNUM; next_hi_reg--)
13346 if (live_regs_mask & (1 << next_hi_reg))
13347 break;
13349 pushable_regs = l_mask & 0xff;
13351 if (pushable_regs == 0)
13352 pushable_regs = 1 << thumb_find_work_register (live_regs_mask);
13354 while (high_regs_pushed > 0)
13356 unsigned long real_regs_mask = 0;
13358 for (regno = LAST_LO_REGNUM; regno >= 0; regno --)
13360 if (pushable_regs & (1 << regno))
13362 asm_fprintf (f, "\tmov\t%r, %r\n", regno, next_hi_reg);
13364 high_regs_pushed --;
13365 real_regs_mask |= (1 << next_hi_reg);
13367 if (high_regs_pushed)
13369 for (next_hi_reg --; next_hi_reg > LAST_LO_REGNUM;
13370 next_hi_reg --)
13371 if (live_regs_mask & (1 << next_hi_reg))
13372 break;
13374 else
13376 pushable_regs &= ~((1 << regno) - 1);
13377 break;
13382 /* If we had to find a work register and we have not yet
13383 saved the LR then add it to the list of regs to push. */
13384 if (l_mask == (1 << LR_REGNUM))
13386 thumb_pushpop (f, pushable_regs | (1 << LR_REGNUM),
13387 1, &cfa_offset,
13388 real_regs_mask | (1 << LR_REGNUM));
13389 l_mask = 0;
13391 else
13392 thumb_pushpop (f, pushable_regs, 1, &cfa_offset, real_regs_mask);
13397 /* Handle the case of a double word load into a low register from
13398 a computed memory address. The computed address may involve a
13399 register which is overwritten by the load. */
13400 const char *
13401 thumb_load_double_from_address (rtx *operands)
13403 rtx addr;
13404 rtx base;
13405 rtx offset;
13406 rtx arg1;
13407 rtx arg2;
13409 if (GET_CODE (operands[0]) != REG)
13410 abort ();
13412 if (GET_CODE (operands[1]) != MEM)
13413 abort ();
13415 /* Get the memory address. */
13416 addr = XEXP (operands[1], 0);
13418 /* Work out how the memory address is computed. */
13419 switch (GET_CODE (addr))
13421 case REG:
13422 operands[2] = gen_rtx_MEM (SImode,
13423 plus_constant (XEXP (operands[1], 0), 4));
13425 if (REGNO (operands[0]) == REGNO (addr))
13427 output_asm_insn ("ldr\t%H0, %2", operands);
13428 output_asm_insn ("ldr\t%0, %1", operands);
13430 else
13432 output_asm_insn ("ldr\t%0, %1", operands);
13433 output_asm_insn ("ldr\t%H0, %2", operands);
13435 break;
13437 case CONST:
13438 /* Compute <address> + 4 for the high order load. */
13439 operands[2] = gen_rtx_MEM (SImode,
13440 plus_constant (XEXP (operands[1], 0), 4));
13442 output_asm_insn ("ldr\t%0, %1", operands);
13443 output_asm_insn ("ldr\t%H0, %2", operands);
13444 break;
13446 case PLUS:
13447 arg1 = XEXP (addr, 0);
13448 arg2 = XEXP (addr, 1);
13450 if (CONSTANT_P (arg1))
13451 base = arg2, offset = arg1;
13452 else
13453 base = arg1, offset = arg2;
13455 if (GET_CODE (base) != REG)
13456 abort ();
13458 /* Catch the case of <address> = <reg> + <reg> */
13459 if (GET_CODE (offset) == REG)
13461 int reg_offset = REGNO (offset);
13462 int reg_base = REGNO (base);
13463 int reg_dest = REGNO (operands[0]);
13465 /* Add the base and offset registers together into the
13466 higher destination register. */
13467 asm_fprintf (asm_out_file, "\tadd\t%r, %r, %r",
13468 reg_dest + 1, reg_base, reg_offset);
13470 /* Load the lower destination register from the address in
13471 the higher destination register. */
13472 asm_fprintf (asm_out_file, "\tldr\t%r, [%r, #0]",
13473 reg_dest, reg_dest + 1);
13475 /* Load the higher destination register from its own address
13476 plus 4. */
13477 asm_fprintf (asm_out_file, "\tldr\t%r, [%r, #4]",
13478 reg_dest + 1, reg_dest + 1);
13480 else
13482 /* Compute <address> + 4 for the high order load. */
13483 operands[2] = gen_rtx_MEM (SImode,
13484 plus_constant (XEXP (operands[1], 0), 4));
13486 /* If the computed address is held in the low order register
13487 then load the high order register first, otherwise always
13488 load the low order register first. */
13489 if (REGNO (operands[0]) == REGNO (base))
13491 output_asm_insn ("ldr\t%H0, %2", operands);
13492 output_asm_insn ("ldr\t%0, %1", operands);
13494 else
13496 output_asm_insn ("ldr\t%0, %1", operands);
13497 output_asm_insn ("ldr\t%H0, %2", operands);
13500 break;
13502 case LABEL_REF:
13503 /* With no registers to worry about we can just load the value
13504 directly. */
13505 operands[2] = gen_rtx_MEM (SImode,
13506 plus_constant (XEXP (operands[1], 0), 4));
13508 output_asm_insn ("ldr\t%H0, %2", operands);
13509 output_asm_insn ("ldr\t%0, %1", operands);
13510 break;
13512 default:
13513 abort ();
13514 break;
13517 return "";
13520 const char *
13521 thumb_output_move_mem_multiple (int n, rtx *operands)
13523 rtx tmp;
13525 switch (n)
13527 case 2:
13528 if (REGNO (operands[4]) > REGNO (operands[5]))
13530 tmp = operands[4];
13531 operands[4] = operands[5];
13532 operands[5] = tmp;
13534 output_asm_insn ("ldmia\t%1!, {%4, %5}", operands);
13535 output_asm_insn ("stmia\t%0!, {%4, %5}", operands);
13536 break;
13538 case 3:
13539 if (REGNO (operands[4]) > REGNO (operands[5]))
13541 tmp = operands[4];
13542 operands[4] = operands[5];
13543 operands[5] = tmp;
13545 if (REGNO (operands[5]) > REGNO (operands[6]))
13547 tmp = operands[5];
13548 operands[5] = operands[6];
13549 operands[6] = tmp;
13551 if (REGNO (operands[4]) > REGNO (operands[5]))
13553 tmp = operands[4];
13554 operands[4] = operands[5];
13555 operands[5] = tmp;
13558 output_asm_insn ("ldmia\t%1!, {%4, %5, %6}", operands);
13559 output_asm_insn ("stmia\t%0!, {%4, %5, %6}", operands);
13560 break;
13562 default:
13563 abort ();
13566 return "";
13569 /* Output a call-via instruction for thumb state. */
13570 const char *
13571 thumb_call_via_reg (rtx reg)
13573 int regno = REGNO (reg);
13574 rtx *labelp;
13576 gcc_assert (regno < LR_REGNUM);
13578 /* If we are in the normal text section we can use a single instance
13579 per compilation unit. If we are doing function sections, then we need
13580 an entry per section, since we can't rely on reachability. */
13581 if (in_text_section ())
13583 thumb_call_reg_needed = 1;
13585 if (thumb_call_via_label[regno] == NULL)
13586 thumb_call_via_label[regno] = gen_label_rtx ();
13587 labelp = thumb_call_via_label + regno;
13589 else
13591 if (cfun->machine->call_via[regno] == NULL)
13592 cfun->machine->call_via[regno] = gen_label_rtx ();
13593 labelp = cfun->machine->call_via + regno;
13596 output_asm_insn ("bl\t%a0", labelp);
13597 return "";
13600 /* Routines for generating rtl. */
13601 void
13602 thumb_expand_movmemqi (rtx *operands)
13604 rtx out = copy_to_mode_reg (SImode, XEXP (operands[0], 0));
13605 rtx in = copy_to_mode_reg (SImode, XEXP (operands[1], 0));
13606 HOST_WIDE_INT len = INTVAL (operands[2]);
13607 HOST_WIDE_INT offset = 0;
13609 while (len >= 12)
13611 emit_insn (gen_movmem12b (out, in, out, in));
13612 len -= 12;
13615 if (len >= 8)
13617 emit_insn (gen_movmem8b (out, in, out, in));
13618 len -= 8;
13621 if (len >= 4)
13623 rtx reg = gen_reg_rtx (SImode);
13624 emit_insn (gen_movsi (reg, gen_rtx_MEM (SImode, in)));
13625 emit_insn (gen_movsi (gen_rtx_MEM (SImode, out), reg));
13626 len -= 4;
13627 offset += 4;
13630 if (len >= 2)
13632 rtx reg = gen_reg_rtx (HImode);
13633 emit_insn (gen_movhi (reg, gen_rtx_MEM (HImode,
13634 plus_constant (in, offset))));
13635 emit_insn (gen_movhi (gen_rtx_MEM (HImode, plus_constant (out, offset)),
13636 reg));
13637 len -= 2;
13638 offset += 2;
13641 if (len)
13643 rtx reg = gen_reg_rtx (QImode);
13644 emit_insn (gen_movqi (reg, gen_rtx_MEM (QImode,
13645 plus_constant (in, offset))));
13646 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (out, offset)),
13647 reg));
13651 void
13652 thumb_reload_out_hi (rtx *operands)
13654 emit_insn (gen_thumb_movhi_clobber (operands[0], operands[1], operands[2]));
13657 /* Handle reading a half-word from memory during reload. */
13658 void
13659 thumb_reload_in_hi (rtx *operands ATTRIBUTE_UNUSED)
13661 abort ();
13664 /* Return the length of a function name prefix
13665 that starts with the character 'c'. */
13666 static int
13667 arm_get_strip_length (int c)
13669 switch (c)
13671 ARM_NAME_ENCODING_LENGTHS
13672 default: return 0;
13676 /* Return a pointer to a function's name with any
13677 and all prefix encodings stripped from it. */
13678 const char *
13679 arm_strip_name_encoding (const char *name)
13681 int skip;
13683 while ((skip = arm_get_strip_length (* name)))
13684 name += skip;
13686 return name;
13689 /* If there is a '*' anywhere in the name's prefix, then
13690 emit the stripped name verbatim, otherwise prepend an
13691 underscore if leading underscores are being used. */
13692 void
13693 arm_asm_output_labelref (FILE *stream, const char *name)
13695 int skip;
13696 int verbatim = 0;
13698 while ((skip = arm_get_strip_length (* name)))
13700 verbatim |= (*name == '*');
13701 name += skip;
13704 if (verbatim)
13705 fputs (name, stream);
13706 else
13707 asm_fprintf (stream, "%U%s", name);
13710 static void
13711 arm_file_end (void)
13713 int regno;
13715 if (! thumb_call_reg_needed)
13716 return;
13718 text_section ();
13719 asm_fprintf (asm_out_file, "\t.code 16\n");
13720 ASM_OUTPUT_ALIGN (asm_out_file, 1);
13722 for (regno = 0; regno < LR_REGNUM; regno++)
13724 rtx label = thumb_call_via_label[regno];
13726 if (label != 0)
13728 targetm.asm_out.internal_label (asm_out_file, "L",
13729 CODE_LABEL_NUMBER (label));
13730 asm_fprintf (asm_out_file, "\tbx\t%r\n", regno);
13735 rtx aof_pic_label;
13737 #ifdef AOF_ASSEMBLER
13738 /* Special functions only needed when producing AOF syntax assembler. */
13740 struct pic_chain
13742 struct pic_chain * next;
13743 const char * symname;
13746 static struct pic_chain * aof_pic_chain = NULL;
13749 aof_pic_entry (rtx x)
13751 struct pic_chain ** chainp;
13752 int offset;
13754 if (aof_pic_label == NULL_RTX)
13756 aof_pic_label = gen_rtx_SYMBOL_REF (Pmode, "x$adcons");
13759 for (offset = 0, chainp = &aof_pic_chain; *chainp;
13760 offset += 4, chainp = &(*chainp)->next)
13761 if ((*chainp)->symname == XSTR (x, 0))
13762 return plus_constant (aof_pic_label, offset);
13764 *chainp = (struct pic_chain *) xmalloc (sizeof (struct pic_chain));
13765 (*chainp)->next = NULL;
13766 (*chainp)->symname = XSTR (x, 0);
13767 return plus_constant (aof_pic_label, offset);
13770 void
13771 aof_dump_pic_table (FILE *f)
13773 struct pic_chain * chain;
13775 if (aof_pic_chain == NULL)
13776 return;
13778 asm_fprintf (f, "\tAREA |%r$$adcons|, BASED %r\n",
13779 PIC_OFFSET_TABLE_REGNUM,
13780 PIC_OFFSET_TABLE_REGNUM);
13781 fputs ("|x$adcons|\n", f);
13783 for (chain = aof_pic_chain; chain; chain = chain->next)
13785 fputs ("\tDCD\t", f);
13786 assemble_name (f, chain->symname);
13787 fputs ("\n", f);
13791 int arm_text_section_count = 1;
13793 char *
13794 aof_text_section (void )
13796 static char buf[100];
13797 sprintf (buf, "\tAREA |C$$code%d|, CODE, READONLY",
13798 arm_text_section_count++);
13799 if (flag_pic)
13800 strcat (buf, ", PIC, REENTRANT");
13801 return buf;
13804 static int arm_data_section_count = 1;
13806 char *
13807 aof_data_section (void)
13809 static char buf[100];
13810 sprintf (buf, "\tAREA |C$$data%d|, DATA", arm_data_section_count++);
13811 return buf;
13814 /* The AOF assembler is religiously strict about declarations of
13815 imported and exported symbols, so that it is impossible to declare
13816 a function as imported near the beginning of the file, and then to
13817 export it later on. It is, however, possible to delay the decision
13818 until all the functions in the file have been compiled. To get
13819 around this, we maintain a list of the imports and exports, and
13820 delete from it any that are subsequently defined. At the end of
13821 compilation we spit the remainder of the list out before the END
13822 directive. */
13824 struct import
13826 struct import * next;
13827 const char * name;
13830 static struct import * imports_list = NULL;
13832 void
13833 aof_add_import (const char *name)
13835 struct import * new;
13837 for (new = imports_list; new; new = new->next)
13838 if (new->name == name)
13839 return;
13841 new = (struct import *) xmalloc (sizeof (struct import));
13842 new->next = imports_list;
13843 imports_list = new;
13844 new->name = name;
13847 void
13848 aof_delete_import (const char *name)
13850 struct import ** old;
13852 for (old = &imports_list; *old; old = & (*old)->next)
13854 if ((*old)->name == name)
13856 *old = (*old)->next;
13857 return;
13862 int arm_main_function = 0;
13864 static void
13865 aof_dump_imports (FILE *f)
13867 /* The AOF assembler needs this to cause the startup code to be extracted
13868 from the library. Brining in __main causes the whole thing to work
13869 automagically. */
13870 if (arm_main_function)
13872 text_section ();
13873 fputs ("\tIMPORT __main\n", f);
13874 fputs ("\tDCD __main\n", f);
13877 /* Now dump the remaining imports. */
13878 while (imports_list)
13880 fprintf (f, "\tIMPORT\t");
13881 assemble_name (f, imports_list->name);
13882 fputc ('\n', f);
13883 imports_list = imports_list->next;
13887 static void
13888 aof_globalize_label (FILE *stream, const char *name)
13890 default_globalize_label (stream, name);
13891 if (! strcmp (name, "main"))
13892 arm_main_function = 1;
13895 static void
13896 aof_file_start (void)
13898 fputs ("__r0\tRN\t0\n", asm_out_file);
13899 fputs ("__a1\tRN\t0\n", asm_out_file);
13900 fputs ("__a2\tRN\t1\n", asm_out_file);
13901 fputs ("__a3\tRN\t2\n", asm_out_file);
13902 fputs ("__a4\tRN\t3\n", asm_out_file);
13903 fputs ("__v1\tRN\t4\n", asm_out_file);
13904 fputs ("__v2\tRN\t5\n", asm_out_file);
13905 fputs ("__v3\tRN\t6\n", asm_out_file);
13906 fputs ("__v4\tRN\t7\n", asm_out_file);
13907 fputs ("__v5\tRN\t8\n", asm_out_file);
13908 fputs ("__v6\tRN\t9\n", asm_out_file);
13909 fputs ("__sl\tRN\t10\n", asm_out_file);
13910 fputs ("__fp\tRN\t11\n", asm_out_file);
13911 fputs ("__ip\tRN\t12\n", asm_out_file);
13912 fputs ("__sp\tRN\t13\n", asm_out_file);
13913 fputs ("__lr\tRN\t14\n", asm_out_file);
13914 fputs ("__pc\tRN\t15\n", asm_out_file);
13915 fputs ("__f0\tFN\t0\n", asm_out_file);
13916 fputs ("__f1\tFN\t1\n", asm_out_file);
13917 fputs ("__f2\tFN\t2\n", asm_out_file);
13918 fputs ("__f3\tFN\t3\n", asm_out_file);
13919 fputs ("__f4\tFN\t4\n", asm_out_file);
13920 fputs ("__f5\tFN\t5\n", asm_out_file);
13921 fputs ("__f6\tFN\t6\n", asm_out_file);
13922 fputs ("__f7\tFN\t7\n", asm_out_file);
13923 text_section ();
13926 static void
13927 aof_file_end (void)
13929 if (flag_pic)
13930 aof_dump_pic_table (asm_out_file);
13931 arm_file_end ();
13932 aof_dump_imports (asm_out_file);
13933 fputs ("\tEND\n", asm_out_file);
13935 #endif /* AOF_ASSEMBLER */
13937 #ifndef ARM_PE
13938 /* Symbols in the text segment can be accessed without indirecting via the
13939 constant pool; it may take an extra binary operation, but this is still
13940 faster than indirecting via memory. Don't do this when not optimizing,
13941 since we won't be calculating al of the offsets necessary to do this
13942 simplification. */
13944 static void
13945 arm_encode_section_info (tree decl, rtx rtl, int first)
13947 /* This doesn't work with AOF syntax, since the string table may be in
13948 a different AREA. */
13949 #ifndef AOF_ASSEMBLER
13950 if (optimize > 0 && TREE_CONSTANT (decl))
13951 SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
13952 #endif
13954 /* If we are referencing a function that is weak then encode a long call
13955 flag in the function name, otherwise if the function is static or
13956 or known to be defined in this file then encode a short call flag. */
13957 if (first && DECL_P (decl))
13959 if (TREE_CODE (decl) == FUNCTION_DECL && DECL_WEAK (decl))
13960 arm_encode_call_attribute (decl, LONG_CALL_FLAG_CHAR);
13961 else if (! TREE_PUBLIC (decl))
13962 arm_encode_call_attribute (decl, SHORT_CALL_FLAG_CHAR);
13965 #endif /* !ARM_PE */
13967 static void
13968 arm_internal_label (FILE *stream, const char *prefix, unsigned long labelno)
13970 if (arm_ccfsm_state == 3 && (unsigned) arm_target_label == labelno
13971 && !strcmp (prefix, "L"))
13973 arm_ccfsm_state = 0;
13974 arm_target_insn = NULL;
13976 default_internal_label (stream, prefix, labelno);
13979 /* Output code to add DELTA to the first argument, and then jump
13980 to FUNCTION. Used for C++ multiple inheritance. */
13981 static void
13982 arm_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
13983 HOST_WIDE_INT delta,
13984 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
13985 tree function)
13987 static int thunk_label = 0;
13988 char label[256];
13989 int mi_delta = delta;
13990 const char *const mi_op = mi_delta < 0 ? "sub" : "add";
13991 int shift = 0;
13992 int this_regno = (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function)
13993 ? 1 : 0);
13994 if (mi_delta < 0)
13995 mi_delta = - mi_delta;
13996 if (TARGET_THUMB)
13998 int labelno = thunk_label++;
13999 ASM_GENERATE_INTERNAL_LABEL (label, "LTHUMBFUNC", labelno);
14000 fputs ("\tldr\tr12, ", file);
14001 assemble_name (file, label);
14002 fputc ('\n', file);
14004 while (mi_delta != 0)
14006 if ((mi_delta & (3 << shift)) == 0)
14007 shift += 2;
14008 else
14010 asm_fprintf (file, "\t%s\t%r, %r, #%d\n",
14011 mi_op, this_regno, this_regno,
14012 mi_delta & (0xff << shift));
14013 mi_delta &= ~(0xff << shift);
14014 shift += 8;
14017 if (TARGET_THUMB)
14019 fprintf (file, "\tbx\tr12\n");
14020 ASM_OUTPUT_ALIGN (file, 2);
14021 assemble_name (file, label);
14022 fputs (":\n", file);
14023 assemble_integer (XEXP (DECL_RTL (function), 0), 4, BITS_PER_WORD, 1);
14025 else
14027 fputs ("\tb\t", file);
14028 assemble_name (file, XSTR (XEXP (DECL_RTL (function), 0), 0));
14029 if (NEED_PLT_RELOC)
14030 fputs ("(PLT)", file);
14031 fputc ('\n', file);
14036 arm_emit_vector_const (FILE *file, rtx x)
14038 int i;
14039 const char * pattern;
14041 if (GET_CODE (x) != CONST_VECTOR)
14042 abort ();
14044 switch (GET_MODE (x))
14046 case V2SImode: pattern = "%08x"; break;
14047 case V4HImode: pattern = "%04x"; break;
14048 case V8QImode: pattern = "%02x"; break;
14049 default: abort ();
14052 fprintf (file, "0x");
14053 for (i = CONST_VECTOR_NUNITS (x); i--;)
14055 rtx element;
14057 element = CONST_VECTOR_ELT (x, i);
14058 fprintf (file, pattern, INTVAL (element));
14061 return 1;
14064 const char *
14065 arm_output_load_gr (rtx *operands)
14067 rtx reg;
14068 rtx offset;
14069 rtx wcgr;
14070 rtx sum;
14072 if (GET_CODE (operands [1]) != MEM
14073 || GET_CODE (sum = XEXP (operands [1], 0)) != PLUS
14074 || GET_CODE (reg = XEXP (sum, 0)) != REG
14075 || GET_CODE (offset = XEXP (sum, 1)) != CONST_INT
14076 || ((INTVAL (offset) < 1024) && (INTVAL (offset) > -1024)))
14077 return "wldrw%?\t%0, %1";
14079 /* Fix up an out-of-range load of a GR register. */
14080 output_asm_insn ("str%?\t%0, [sp, #-4]!\t@ Start of GR load expansion", & reg);
14081 wcgr = operands[0];
14082 operands[0] = reg;
14083 output_asm_insn ("ldr%?\t%0, %1", operands);
14085 operands[0] = wcgr;
14086 operands[1] = reg;
14087 output_asm_insn ("tmcr%?\t%0, %1", operands);
14088 output_asm_insn ("ldr%?\t%0, [sp], #4\t@ End of GR load expansion", & reg);
14090 return "";
14093 static rtx
14094 arm_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
14095 int incoming ATTRIBUTE_UNUSED)
14097 #if 0
14098 /* FIXME: The ARM backend has special code to handle structure
14099 returns, and will reserve its own hidden first argument. So
14100 if this macro is enabled a *second* hidden argument will be
14101 reserved, which will break binary compatibility with old
14102 toolchains and also thunk handling. One day this should be
14103 fixed. */
14104 return 0;
14105 #else
14106 /* Register in which address to store a structure value
14107 is passed to a function. */
14108 return gen_rtx_REG (Pmode, ARG_REGISTER (1));
14109 #endif
14112 /* Worker function for TARGET_SETUP_INCOMING_VARARGS.
14114 On the ARM, PRETEND_SIZE is set in order to have the prologue push the last
14115 named arg and all anonymous args onto the stack.
14116 XXX I know the prologue shouldn't be pushing registers, but it is faster
14117 that way. */
14119 static void
14120 arm_setup_incoming_varargs (CUMULATIVE_ARGS *cum,
14121 enum machine_mode mode ATTRIBUTE_UNUSED,
14122 tree type ATTRIBUTE_UNUSED,
14123 int *pretend_size,
14124 int second_time ATTRIBUTE_UNUSED)
14126 cfun->machine->uses_anonymous_args = 1;
14127 if (cum->nregs < NUM_ARG_REGS)
14128 *pretend_size = (NUM_ARG_REGS - cum->nregs) * UNITS_PER_WORD;
14131 /* Return nonzero if the CONSUMER instruction (a store) does not need
14132 PRODUCER's value to calculate the address. */
14135 arm_no_early_store_addr_dep (rtx producer, rtx consumer)
14137 rtx value = PATTERN (producer);
14138 rtx addr = PATTERN (consumer);
14140 if (GET_CODE (value) == COND_EXEC)
14141 value = COND_EXEC_CODE (value);
14142 if (GET_CODE (value) == PARALLEL)
14143 value = XVECEXP (value, 0, 0);
14144 value = XEXP (value, 0);
14145 if (GET_CODE (addr) == COND_EXEC)
14146 addr = COND_EXEC_CODE (addr);
14147 if (GET_CODE (addr) == PARALLEL)
14148 addr = XVECEXP (addr, 0, 0);
14149 addr = XEXP (addr, 0);
14151 return !reg_overlap_mentioned_p (value, addr);
14154 /* Return nonzero if the CONSUMER instruction (an ALU op) does not
14155 have an early register shift value or amount dependency on the
14156 result of PRODUCER. */
14159 arm_no_early_alu_shift_dep (rtx producer, rtx consumer)
14161 rtx value = PATTERN (producer);
14162 rtx op = PATTERN (consumer);
14163 rtx early_op;
14165 if (GET_CODE (value) == COND_EXEC)
14166 value = COND_EXEC_CODE (value);
14167 if (GET_CODE (value) == PARALLEL)
14168 value = XVECEXP (value, 0, 0);
14169 value = XEXP (value, 0);
14170 if (GET_CODE (op) == COND_EXEC)
14171 op = COND_EXEC_CODE (op);
14172 if (GET_CODE (op) == PARALLEL)
14173 op = XVECEXP (op, 0, 0);
14174 op = XEXP (op, 1);
14176 early_op = XEXP (op, 0);
14177 /* This is either an actual independent shift, or a shift applied to
14178 the first operand of another operation. We want the whole shift
14179 operation. */
14180 if (GET_CODE (early_op) == REG)
14181 early_op = op;
14183 return !reg_overlap_mentioned_p (value, early_op);
14186 /* Return nonzero if the CONSUMER instruction (an ALU op) does not
14187 have an early register shift value dependency on the result of
14188 PRODUCER. */
14191 arm_no_early_alu_shift_value_dep (rtx producer, rtx consumer)
14193 rtx value = PATTERN (producer);
14194 rtx op = PATTERN (consumer);
14195 rtx early_op;
14197 if (GET_CODE (value) == COND_EXEC)
14198 value = COND_EXEC_CODE (value);
14199 if (GET_CODE (value) == PARALLEL)
14200 value = XVECEXP (value, 0, 0);
14201 value = XEXP (value, 0);
14202 if (GET_CODE (op) == COND_EXEC)
14203 op = COND_EXEC_CODE (op);
14204 if (GET_CODE (op) == PARALLEL)
14205 op = XVECEXP (op, 0, 0);
14206 op = XEXP (op, 1);
14208 early_op = XEXP (op, 0);
14210 /* This is either an actual independent shift, or a shift applied to
14211 the first operand of another operation. We want the value being
14212 shifted, in either case. */
14213 if (GET_CODE (early_op) != REG)
14214 early_op = XEXP (early_op, 0);
14216 return !reg_overlap_mentioned_p (value, early_op);
14219 /* Return nonzero if the CONSUMER (a mul or mac op) does not
14220 have an early register mult dependency on the result of
14221 PRODUCER. */
14224 arm_no_early_mul_dep (rtx producer, rtx consumer)
14226 rtx value = PATTERN (producer);
14227 rtx op = PATTERN (consumer);
14229 if (GET_CODE (value) == COND_EXEC)
14230 value = COND_EXEC_CODE (value);
14231 if (GET_CODE (value) == PARALLEL)
14232 value = XVECEXP (value, 0, 0);
14233 value = XEXP (value, 0);
14234 if (GET_CODE (op) == COND_EXEC)
14235 op = COND_EXEC_CODE (op);
14236 if (GET_CODE (op) == PARALLEL)
14237 op = XVECEXP (op, 0, 0);
14238 op = XEXP (op, 1);
14240 return (GET_CODE (op) == PLUS
14241 && !reg_overlap_mentioned_p (value, XEXP (op, 0)));
14245 /* We can't rely on the caller doing the proper promotion when
14246 using APCS or ATPCS. */
14248 static bool
14249 arm_promote_prototypes (tree t ATTRIBUTE_UNUSED)
14251 return !TARGET_AAPCS_BASED;
14255 /* AAPCS based ABIs use short enums by default. */
14257 static bool
14258 arm_default_short_enums (void)
14260 return TARGET_AAPCS_BASED;
14264 /* AAPCS requires that anonymous bitfields affect structure alignment. */
14266 static bool
14267 arm_align_anon_bitfield (void)
14269 return TARGET_AAPCS_BASED;
14273 /* The generic C++ ABI says 64-bit (long long). The EABI says 32-bit. */
14275 static tree
14276 arm_cxx_guard_type (void)
14278 return TARGET_AAPCS_BASED ? integer_type_node : long_long_integer_type_node;
14282 /* The EABI says test the least significan bit of a guard variable. */
14284 static bool
14285 arm_cxx_guard_mask_bit (void)
14287 return TARGET_AAPCS_BASED;
14291 /* The EABI specifies that all array cookies are 8 bytes long. */
14293 static tree
14294 arm_get_cookie_size (tree type)
14296 tree size;
14298 if (!TARGET_AAPCS_BASED)
14299 return default_cxx_get_cookie_size (type);
14301 size = build_int_cst (sizetype, 8);
14302 return size;
14306 /* The EABI says that array cookies should also contain the element size. */
14308 static bool
14309 arm_cookie_has_size (void)
14311 return TARGET_AAPCS_BASED;
14315 /* The EABI says constructors and destructors should return a pointer to
14316 the object constructed/destroyed. */
14318 static bool
14319 arm_cxx_cdtor_returns_this (void)
14321 return TARGET_AAPCS_BASED;
14324 /* The EABI says that an inline function may never be the key
14325 method. */
14327 static bool
14328 arm_cxx_key_method_may_be_inline (void)
14330 return !TARGET_AAPCS_BASED;
14333 static void
14334 arm_cxx_determine_class_data_visibility (tree decl)
14336 if (!TARGET_AAPCS_BASED)
14337 return;
14339 /* In general, \S 3.2.5.5 of the ARM EABI requires that class data
14340 is exported. However, on systems without dynamic vague linkage,
14341 \S 3.2.5.6 says that COMDAT class data has hidden linkage. */
14342 if (!TARGET_ARM_DYNAMIC_VAGUE_LINKAGE_P && DECL_COMDAT (decl))
14343 DECL_VISIBILITY (decl) = VISIBILITY_HIDDEN;
14344 else
14345 DECL_VISIBILITY (decl) = VISIBILITY_DEFAULT;
14346 DECL_VISIBILITY_SPECIFIED (decl) = 1;
14349 static bool
14350 arm_cxx_class_data_always_comdat (void)
14352 /* \S 3.2.5.4 of the ARM C++ ABI says that class data only have
14353 vague linkage if the class has no key function. */
14354 return !TARGET_AAPCS_BASED;
14357 void
14358 arm_set_return_address (rtx source, rtx scratch)
14360 arm_stack_offsets *offsets;
14361 HOST_WIDE_INT delta;
14362 rtx addr;
14363 unsigned long saved_regs;
14365 saved_regs = arm_compute_save_reg_mask ();
14367 if ((saved_regs & (1 << LR_REGNUM)) == 0)
14368 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNUM), source);
14369 else
14371 if (frame_pointer_needed)
14372 addr = plus_constant(hard_frame_pointer_rtx, -4);
14373 else
14375 /* LR will be the first saved register. */
14376 offsets = arm_get_frame_offsets ();
14377 delta = offsets->outgoing_args - (offsets->frame + 4);
14380 if (delta >= 4096)
14382 emit_insn (gen_addsi3 (scratch, stack_pointer_rtx,
14383 GEN_INT (delta & ~4095)));
14384 addr = scratch;
14385 delta &= 4095;
14387 else
14388 addr = stack_pointer_rtx;
14390 addr = plus_constant (addr, delta);
14392 emit_move_insn (gen_rtx_MEM (Pmode, addr), source);
14397 void
14398 thumb_set_return_address (rtx source, rtx scratch)
14400 arm_stack_offsets *offsets;
14401 HOST_WIDE_INT delta;
14402 int reg;
14403 rtx addr;
14404 unsigned long mask;
14406 emit_insn (gen_rtx_USE (VOIDmode, source));
14408 mask = thumb_compute_save_reg_mask ();
14409 if (mask & (1 << LR_REGNUM))
14411 offsets = arm_get_frame_offsets ();
14413 /* Find the saved regs. */
14414 if (frame_pointer_needed)
14416 delta = offsets->soft_frame - offsets->saved_args;
14417 reg = THUMB_HARD_FRAME_POINTER_REGNUM;
14419 else
14421 delta = offsets->outgoing_args - offsets->saved_args;
14422 reg = SP_REGNUM;
14424 /* Allow for the stack frame. */
14425 if (TARGET_BACKTRACE)
14426 delta -= 16;
14427 /* The link register is always the first saved register. */
14428 delta -= 4;
14430 /* Construct the address. */
14431 addr = gen_rtx_REG (SImode, reg);
14432 if ((reg != SP_REGNUM && delta >= 128)
14433 || delta >= 1024)
14435 emit_insn (gen_movsi (scratch, GEN_INT (delta)));
14436 emit_insn (gen_addsi3 (scratch, scratch, stack_pointer_rtx));
14437 addr = scratch;
14439 else
14440 addr = plus_constant (addr, delta);
14442 emit_move_insn (gen_rtx_MEM (Pmode, addr), source);
14444 else
14445 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNUM), source);
14448 /* Implements target hook vector_mode_supported_p. */
14449 bool
14450 arm_vector_mode_supported_p (enum machine_mode mode)
14452 if ((mode == V2SImode)
14453 || (mode == V4HImode)
14454 || (mode == V8QImode))
14455 return true;
14457 return false;
14460 /* Implement TARGET_SHIFT_TRUNCATION_MASK. SImode shifts use normal
14461 ARM insns and therefore guarantee that the shift count is modulo 256.
14462 DImode shifts (those implemented by lib1funcs.asm or by optabs.c)
14463 guarantee no particular behavior for out-of-range counts. */
14465 static unsigned HOST_WIDE_INT
14466 arm_shift_truncation_mask (enum machine_mode mode)
14468 return mode == SImode ? 255 : 0;
14472 /* Map internal gcc register numbers to DWARF2 register numbers. */
14474 unsigned int
14475 arm_dbx_register_number (unsigned int regno)
14477 if (regno < 16)
14478 return regno;
14480 /* TODO: Legacy targets output FPA regs as registers 16-23 for backwards
14481 compatibility. The EABI defines them as registers 96-103. */
14482 if (IS_FPA_REGNUM (regno))
14483 return (TARGET_AAPCS_BASED ? 96 : 16) + regno - FIRST_FPA_REGNUM;
14485 if (IS_VFP_REGNUM (regno))
14486 return 64 + regno - FIRST_VFP_REGNUM;
14488 if (IS_IWMMXT_GR_REGNUM (regno))
14489 return 104 + regno - FIRST_IWMMXT_GR_REGNUM;
14491 if (IS_IWMMXT_REGNUM (regno))
14492 return 112 + regno - FIRST_IWMMXT_REGNUM;
14494 abort ();