Merge from the pain train
[official-gcc.git] / gcc / config / arm / arm.c
blobf8907befd61f0561800422b93471a038b346f470
1 /* Output routines for GCC for ARM.
2 Copyright (C) 1991, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
4 Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
5 and Martin Simmons (@harleqn.co.uk).
6 More major hacks by Richard Earnshaw (rearnsha@arm.com).
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it
11 under the terms of the GNU General Public License as published
12 by the Free Software Foundation; either version 2, or (at your
13 option) any later version.
15 GCC is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
17 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
18 License for more details.
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING. If not, write to
22 the Free Software Foundation, 59 Temple Place - Suite 330,
23 Boston, MA 02111-1307, USA. */
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "tm.h"
29 #include "rtl.h"
30 #include "tree.h"
31 #include "obstack.h"
32 #include "regs.h"
33 #include "hard-reg-set.h"
34 #include "real.h"
35 #include "insn-config.h"
36 #include "conditions.h"
37 #include "output.h"
38 #include "insn-attr.h"
39 #include "flags.h"
40 #include "reload.h"
41 #include "function.h"
42 #include "expr.h"
43 #include "optabs.h"
44 #include "toplev.h"
45 #include "recog.h"
46 #include "ggc.h"
47 #include "except.h"
48 #include "c-pragma.h"
49 #include "integrate.h"
50 #include "tm_p.h"
51 #include "target.h"
52 #include "target-def.h"
53 #include "debug.h"
54 #include "langhooks.h"
56 /* Forward definitions of types. */
57 typedef struct minipool_node Mnode;
58 typedef struct minipool_fixup Mfix;
60 const struct attribute_spec arm_attribute_table[];
62 /* Forward function declarations. */
63 static arm_stack_offsets *arm_get_frame_offsets (void);
64 static void arm_add_gc_roots (void);
65 static int arm_gen_constant (enum rtx_code, enum machine_mode, rtx,
66 HOST_WIDE_INT, rtx, rtx, int, int);
67 static unsigned bit_count (unsigned long);
68 static int arm_address_register_rtx_p (rtx, int);
69 static int arm_legitimate_index_p (enum machine_mode, rtx, RTX_CODE, int);
70 static int thumb_base_register_rtx_p (rtx, enum machine_mode, int);
71 inline static int thumb_index_register_rtx_p (rtx, int);
72 static int thumb_far_jump_used_p (void);
73 static bool thumb_force_lr_save (void);
74 static unsigned long thumb_compute_save_reg_mask (void);
75 static int const_ok_for_op (HOST_WIDE_INT, enum rtx_code);
76 static rtx emit_multi_reg_push (int);
77 static rtx emit_sfm (int, int);
78 #ifndef AOF_ASSEMBLER
79 static bool arm_assemble_integer (rtx, unsigned int, int);
80 #endif
81 static const char *fp_const_from_val (REAL_VALUE_TYPE *);
82 static arm_cc get_arm_condition_code (rtx);
83 static HOST_WIDE_INT int_log2 (HOST_WIDE_INT);
84 static rtx is_jump_table (rtx);
85 static const char *output_multi_immediate (rtx *, const char *, const char *,
86 int, HOST_WIDE_INT);
87 static void print_multi_reg (FILE *, const char *, int, int);
88 static const char *shift_op (rtx, HOST_WIDE_INT *);
89 static struct machine_function *arm_init_machine_status (void);
90 static int number_of_first_bit_set (int);
91 static void replace_symbols_in_block (tree, rtx, rtx);
92 static void thumb_exit (FILE *, int);
93 static void thumb_pushpop (FILE *, int, int, int *, int);
94 static rtx is_jump_table (rtx);
95 static HOST_WIDE_INT get_jump_table_size (rtx);
96 static Mnode *move_minipool_fix_forward_ref (Mnode *, Mnode *, HOST_WIDE_INT);
97 static Mnode *add_minipool_forward_ref (Mfix *);
98 static Mnode *move_minipool_fix_backward_ref (Mnode *, Mnode *, HOST_WIDE_INT);
99 static Mnode *add_minipool_backward_ref (Mfix *);
100 static void assign_minipool_offsets (Mfix *);
101 static void arm_print_value (FILE *, rtx);
102 static void dump_minipool (rtx);
103 static int arm_barrier_cost (rtx);
104 static Mfix *create_fix_barrier (Mfix *, HOST_WIDE_INT);
105 static void push_minipool_barrier (rtx, HOST_WIDE_INT);
106 static void push_minipool_fix (rtx, HOST_WIDE_INT, rtx *, enum machine_mode,
107 rtx);
108 static void arm_reorg (void);
109 static bool note_invalid_constants (rtx, HOST_WIDE_INT, int);
110 static int current_file_function_operand (rtx);
111 static unsigned long arm_compute_save_reg0_reg12_mask (void);
112 static unsigned long arm_compute_save_reg_mask (void);
113 static unsigned long arm_isr_value (tree);
114 static unsigned long arm_compute_func_type (void);
115 static tree arm_handle_fndecl_attribute (tree *, tree, tree, int, bool *);
116 static tree arm_handle_isr_attribute (tree *, tree, tree, int, bool *);
117 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
118 static tree arm_handle_notshared_attribute (tree *, tree, tree, int, bool *);
119 #endif
120 static void arm_output_function_epilogue (FILE *, HOST_WIDE_INT);
121 static void arm_output_function_prologue (FILE *, HOST_WIDE_INT);
122 static void thumb_output_function_prologue (FILE *, HOST_WIDE_INT);
123 static int arm_comp_type_attributes (tree, tree);
124 static void arm_set_default_type_attributes (tree);
125 static int arm_adjust_cost (rtx, rtx, rtx, int);
126 static int count_insns_for_constant (HOST_WIDE_INT, int);
127 static int arm_get_strip_length (int);
128 static bool arm_function_ok_for_sibcall (tree, tree);
129 static void arm_internal_label (FILE *, const char *, unsigned long);
130 static void arm_output_mi_thunk (FILE *, tree, HOST_WIDE_INT, HOST_WIDE_INT,
131 tree);
132 static int arm_rtx_costs_1 (rtx, enum rtx_code, enum rtx_code);
133 static bool arm_size_rtx_costs (rtx, int, int, int *);
134 static bool arm_slowmul_rtx_costs (rtx, int, int, int *);
135 static bool arm_fastmul_rtx_costs (rtx, int, int, int *);
136 static bool arm_xscale_rtx_costs (rtx, int, int, int *);
137 static bool arm_9e_rtx_costs (rtx, int, int, int *);
138 static int arm_address_cost (rtx);
139 static bool arm_memory_load_p (rtx);
140 static bool arm_cirrus_insn_p (rtx);
141 static void cirrus_reorg (rtx);
142 static void arm_init_builtins (void);
143 static rtx arm_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
144 static void arm_init_iwmmxt_builtins (void);
145 static rtx safe_vector_operand (rtx, enum machine_mode);
146 static rtx arm_expand_binop_builtin (enum insn_code, tree, rtx);
147 static rtx arm_expand_unop_builtin (enum insn_code, tree, rtx, int);
148 static rtx arm_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
149 static void emit_constant_insn (rtx cond, rtx pattern);
150 static int arm_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode,
151 tree, bool);
153 #ifndef ARM_PE
154 static void arm_encode_section_info (tree, rtx, int);
155 #endif
157 static void arm_file_end (void);
159 #ifdef AOF_ASSEMBLER
160 static void aof_globalize_label (FILE *, const char *);
161 static void aof_dump_imports (FILE *);
162 static void aof_dump_pic_table (FILE *);
163 static void aof_file_start (void);
164 static void aof_file_end (void);
165 #endif
166 static rtx arm_struct_value_rtx (tree, int);
167 static void arm_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
168 tree, int *, int);
169 static bool arm_pass_by_reference (CUMULATIVE_ARGS *,
170 enum machine_mode, tree, bool);
171 static bool arm_promote_prototypes (tree);
172 static bool arm_default_short_enums (void);
173 static bool arm_align_anon_bitfield (void);
175 static tree arm_cxx_guard_type (void);
176 static bool arm_cxx_guard_mask_bit (void);
177 static tree arm_get_cookie_size (tree);
178 static bool arm_cookie_has_size (void);
179 static bool arm_cxx_cdtor_returns_this (void);
180 static bool arm_cxx_key_method_may_be_inline (void);
181 static bool arm_cxx_export_class_data (void);
182 static void arm_init_libfuncs (void);
183 static unsigned HOST_WIDE_INT arm_shift_truncation_mask (enum machine_mode);
185 /* Initialize the GCC target structure. */
186 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
187 #undef TARGET_MERGE_DECL_ATTRIBUTES
188 #define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
189 #endif
191 #undef TARGET_ATTRIBUTE_TABLE
192 #define TARGET_ATTRIBUTE_TABLE arm_attribute_table
194 #undef TARGET_ASM_FILE_END
195 #define TARGET_ASM_FILE_END arm_file_end
197 #ifdef AOF_ASSEMBLER
198 #undef TARGET_ASM_BYTE_OP
199 #define TARGET_ASM_BYTE_OP "\tDCB\t"
200 #undef TARGET_ASM_ALIGNED_HI_OP
201 #define TARGET_ASM_ALIGNED_HI_OP "\tDCW\t"
202 #undef TARGET_ASM_ALIGNED_SI_OP
203 #define TARGET_ASM_ALIGNED_SI_OP "\tDCD\t"
204 #undef TARGET_ASM_GLOBALIZE_LABEL
205 #define TARGET_ASM_GLOBALIZE_LABEL aof_globalize_label
206 #undef TARGET_ASM_FILE_START
207 #define TARGET_ASM_FILE_START aof_file_start
208 #undef TARGET_ASM_FILE_END
209 #define TARGET_ASM_FILE_END aof_file_end
210 #else
211 #undef TARGET_ASM_ALIGNED_SI_OP
212 #define TARGET_ASM_ALIGNED_SI_OP NULL
213 #undef TARGET_ASM_INTEGER
214 #define TARGET_ASM_INTEGER arm_assemble_integer
215 #endif
217 #undef TARGET_ASM_FUNCTION_PROLOGUE
218 #define TARGET_ASM_FUNCTION_PROLOGUE arm_output_function_prologue
220 #undef TARGET_ASM_FUNCTION_EPILOGUE
221 #define TARGET_ASM_FUNCTION_EPILOGUE arm_output_function_epilogue
223 #undef TARGET_COMP_TYPE_ATTRIBUTES
224 #define TARGET_COMP_TYPE_ATTRIBUTES arm_comp_type_attributes
226 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
227 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES arm_set_default_type_attributes
229 #undef TARGET_SCHED_ADJUST_COST
230 #define TARGET_SCHED_ADJUST_COST arm_adjust_cost
232 #undef TARGET_ENCODE_SECTION_INFO
233 #ifdef ARM_PE
234 #define TARGET_ENCODE_SECTION_INFO arm_pe_encode_section_info
235 #else
236 #define TARGET_ENCODE_SECTION_INFO arm_encode_section_info
237 #endif
239 #undef TARGET_STRIP_NAME_ENCODING
240 #define TARGET_STRIP_NAME_ENCODING arm_strip_name_encoding
242 #undef TARGET_ASM_INTERNAL_LABEL
243 #define TARGET_ASM_INTERNAL_LABEL arm_internal_label
245 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
246 #define TARGET_FUNCTION_OK_FOR_SIBCALL arm_function_ok_for_sibcall
248 #undef TARGET_ASM_OUTPUT_MI_THUNK
249 #define TARGET_ASM_OUTPUT_MI_THUNK arm_output_mi_thunk
250 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
251 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
253 /* This will be overridden in arm_override_options. */
254 #undef TARGET_RTX_COSTS
255 #define TARGET_RTX_COSTS arm_slowmul_rtx_costs
256 #undef TARGET_ADDRESS_COST
257 #define TARGET_ADDRESS_COST arm_address_cost
259 #undef TARGET_SHIFT_TRUNCATION_MASK
260 #define TARGET_SHIFT_TRUNCATION_MASK arm_shift_truncation_mask
261 #undef TARGET_VECTOR_MODE_SUPPORTED_P
262 #define TARGET_VECTOR_MODE_SUPPORTED_P arm_vector_mode_supported_p
264 #undef TARGET_MACHINE_DEPENDENT_REORG
265 #define TARGET_MACHINE_DEPENDENT_REORG arm_reorg
267 #undef TARGET_INIT_BUILTINS
268 #define TARGET_INIT_BUILTINS arm_init_builtins
269 #undef TARGET_EXPAND_BUILTIN
270 #define TARGET_EXPAND_BUILTIN arm_expand_builtin
272 #undef TARGET_INIT_LIBFUNCS
273 #define TARGET_INIT_LIBFUNCS arm_init_libfuncs
275 #undef TARGET_PROMOTE_FUNCTION_ARGS
276 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
277 #undef TARGET_PROMOTE_FUNCTION_RETURN
278 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
279 #undef TARGET_PROMOTE_PROTOTYPES
280 #define TARGET_PROMOTE_PROTOTYPES arm_promote_prototypes
281 #undef TARGET_PASS_BY_REFERENCE
282 #define TARGET_PASS_BY_REFERENCE arm_pass_by_reference
283 #undef TARGET_ARG_PARTIAL_BYTES
284 #define TARGET_ARG_PARTIAL_BYTES arm_arg_partial_bytes
286 #undef TARGET_STRUCT_VALUE_RTX
287 #define TARGET_STRUCT_VALUE_RTX arm_struct_value_rtx
289 #undef TARGET_SETUP_INCOMING_VARARGS
290 #define TARGET_SETUP_INCOMING_VARARGS arm_setup_incoming_varargs
292 #undef TARGET_DEFAULT_SHORT_ENUMS
293 #define TARGET_DEFAULT_SHORT_ENUMS arm_default_short_enums
295 #undef TARGET_ALIGN_ANON_BITFIELD
296 #define TARGET_ALIGN_ANON_BITFIELD arm_align_anon_bitfield
298 #undef TARGET_CXX_GUARD_TYPE
299 #define TARGET_CXX_GUARD_TYPE arm_cxx_guard_type
301 #undef TARGET_CXX_GUARD_MASK_BIT
302 #define TARGET_CXX_GUARD_MASK_BIT arm_cxx_guard_mask_bit
304 #undef TARGET_CXX_GET_COOKIE_SIZE
305 #define TARGET_CXX_GET_COOKIE_SIZE arm_get_cookie_size
307 #undef TARGET_CXX_COOKIE_HAS_SIZE
308 #define TARGET_CXX_COOKIE_HAS_SIZE arm_cookie_has_size
310 #undef TARGET_CXX_CDTOR_RETURNS_THIS
311 #define TARGET_CXX_CDTOR_RETURNS_THIS arm_cxx_cdtor_returns_this
313 #undef TARGET_CXX_KEY_METHOD_MAY_BE_INLINE
314 #define TARGET_CXX_KEY_METHOD_MAY_BE_INLINE arm_cxx_key_method_may_be_inline
316 #undef TARGET_CXX_EXPORT_CLASS_DATA
317 #define TARGET_CXX_EXPORT_CLASS_DATA arm_cxx_export_class_data
319 struct gcc_target targetm = TARGET_INITIALIZER;
321 /* Obstack for minipool constant handling. */
322 static struct obstack minipool_obstack;
323 static char * minipool_startobj;
325 /* The maximum number of insns skipped which
326 will be conditionalised if possible. */
327 static int max_insns_skipped = 5;
329 extern FILE * asm_out_file;
331 /* True if we are currently building a constant table. */
332 int making_const_table;
334 /* Define the information needed to generate branch insns. This is
335 stored from the compare operation. */
336 rtx arm_compare_op0, arm_compare_op1;
338 /* The processor for which instructions should be scheduled. */
339 enum processor_type arm_tune = arm_none;
341 /* Which floating point model to use. */
342 enum arm_fp_model arm_fp_model;
344 /* Which floating point hardware is available. */
345 enum fputype arm_fpu_arch;
347 /* Which floating point hardware to schedule for. */
348 enum fputype arm_fpu_tune;
350 /* Whether to use floating point hardware. */
351 enum float_abi_type arm_float_abi;
353 /* Which ABI to use. */
354 enum arm_abi_type arm_abi;
356 /* Set by the -mfpu=... option. */
357 const char * target_fpu_name = NULL;
359 /* Set by the -mfpe=... option. */
360 const char * target_fpe_name = NULL;
362 /* Set by the -mfloat-abi=... option. */
363 const char * target_float_abi_name = NULL;
365 /* Set by the legacy -mhard-float and -msoft-float options. */
366 const char * target_float_switch = NULL;
368 /* Set by the -mabi=... option. */
369 const char * target_abi_name = NULL;
371 /* Used to parse -mstructure_size_boundary command line option. */
372 const char * structure_size_string = NULL;
373 int arm_structure_size_boundary = DEFAULT_STRUCTURE_SIZE_BOUNDARY;
375 /* Used for Thumb call_via trampolines. */
376 rtx thumb_call_via_label[13];
377 static int thumb_call_reg_needed;
379 /* Bit values used to identify processor capabilities. */
380 #define FL_CO_PROC (1 << 0) /* Has external co-processor bus */
381 #define FL_ARCH3M (1 << 1) /* Extended multiply */
382 #define FL_MODE26 (1 << 2) /* 26-bit mode support */
383 #define FL_MODE32 (1 << 3) /* 32-bit mode support */
384 #define FL_ARCH4 (1 << 4) /* Architecture rel 4 */
385 #define FL_ARCH5 (1 << 5) /* Architecture rel 5 */
386 #define FL_THUMB (1 << 6) /* Thumb aware */
387 #define FL_LDSCHED (1 << 7) /* Load scheduling necessary */
388 #define FL_STRONG (1 << 8) /* StrongARM */
389 #define FL_ARCH5E (1 << 9) /* DSP extensions to v5 */
390 #define FL_XSCALE (1 << 10) /* XScale */
391 #define FL_CIRRUS (1 << 11) /* Cirrus/DSP. */
392 #define FL_ARCH6 (1 << 12) /* Architecture rel 6. Adds
393 media instructions. */
394 #define FL_VFPV2 (1 << 13) /* Vector Floating Point V2. */
396 #define FL_IWMMXT (1 << 29) /* XScale v2 or "Intel Wireless MMX technology". */
398 #define FL_FOR_ARCH2 0
399 #define FL_FOR_ARCH3 FL_MODE32
400 #define FL_FOR_ARCH3M (FL_FOR_ARCH3 | FL_ARCH3M)
401 #define FL_FOR_ARCH4 (FL_FOR_ARCH3M | FL_ARCH4)
402 #define FL_FOR_ARCH4T (FL_FOR_ARCH4 | FL_THUMB)
403 #define FL_FOR_ARCH5 (FL_FOR_ARCH4 | FL_ARCH5)
404 #define FL_FOR_ARCH5T (FL_FOR_ARCH5 | FL_THUMB)
405 #define FL_FOR_ARCH5E (FL_FOR_ARCH5 | FL_ARCH5E)
406 #define FL_FOR_ARCH5TE (FL_FOR_ARCH5E | FL_THUMB)
407 #define FL_FOR_ARCH5TEJ FL_FOR_ARCH5TE
408 #define FL_FOR_ARCH6 (FL_FOR_ARCH5TE | FL_ARCH6)
409 #define FL_FOR_ARCH6J FL_FOR_ARCH6
410 #define FL_FOR_ARCH6K FL_FOR_ARCH6
411 #define FL_FOR_ARCH6Z FL_FOR_ARCH6
412 #define FL_FOR_ARCH6ZK FL_FOR_ARCH6
414 /* The bits in this mask specify which
415 instructions we are allowed to generate. */
416 static unsigned long insn_flags = 0;
418 /* The bits in this mask specify which instruction scheduling options should
419 be used. */
420 static unsigned long tune_flags = 0;
422 /* The following are used in the arm.md file as equivalents to bits
423 in the above two flag variables. */
425 /* Nonzero if this chip supports the ARM Architecture 3M extensions. */
426 int arm_arch3m = 0;
428 /* Nonzero if this chip supports the ARM Architecture 4 extensions. */
429 int arm_arch4 = 0;
431 /* Nonzero if this chip supports the ARM Architecture 4t extensions. */
432 int arm_arch4t = 0;
434 /* Nonzero if this chip supports the ARM Architecture 5 extensions. */
435 int arm_arch5 = 0;
437 /* Nonzero if this chip supports the ARM Architecture 5E extensions. */
438 int arm_arch5e = 0;
440 /* Nonzero if this chip supports the ARM Architecture 6 extensions. */
441 int arm_arch6 = 0;
443 /* Nonzero if this chip can benefit from load scheduling. */
444 int arm_ld_sched = 0;
446 /* Nonzero if this chip is a StrongARM. */
447 int arm_is_strong = 0;
449 /* Nonzero if this chip is a Cirrus variant. */
450 int arm_arch_cirrus = 0;
452 /* Nonzero if this chip supports Intel Wireless MMX technology. */
453 int arm_arch_iwmmxt = 0;
455 /* Nonzero if this chip is an XScale. */
456 int arm_arch_xscale = 0;
458 /* Nonzero if tuning for XScale */
459 int arm_tune_xscale = 0;
461 /* Nonzero if this chip is an ARM6 or an ARM7. */
462 int arm_is_6_or_7 = 0;
464 /* Nonzero if generating Thumb instructions. */
465 int thumb_code = 0;
467 /* Nonzero if we should define __THUMB_INTERWORK__ in the
468 preprocessor.
469 XXX This is a bit of a hack, it's intended to help work around
470 problems in GLD which doesn't understand that armv5t code is
471 interworking clean. */
472 int arm_cpp_interwork = 0;
474 /* In case of a PRE_INC, POST_INC, PRE_DEC, POST_DEC memory reference, we
475 must report the mode of the memory reference from PRINT_OPERAND to
476 PRINT_OPERAND_ADDRESS. */
477 enum machine_mode output_memory_reference_mode;
479 /* The register number to be used for the PIC offset register. */
480 const char * arm_pic_register_string = NULL;
481 int arm_pic_register = INVALID_REGNUM;
483 /* Set to 1 when a return insn is output, this means that the epilogue
484 is not needed. */
485 int return_used_this_function;
487 /* Set to 1 after arm_reorg has started. Reset to start at the start of
488 the next function. */
489 static int after_arm_reorg = 0;
491 /* The maximum number of insns to be used when loading a constant. */
492 static int arm_constant_limit = 3;
494 /* For an explanation of these variables, see final_prescan_insn below. */
495 int arm_ccfsm_state;
496 enum arm_cond_code arm_current_cc;
497 rtx arm_target_insn;
498 int arm_target_label;
500 /* The condition codes of the ARM, and the inverse function. */
501 static const char * const arm_condition_codes[] =
503 "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc",
504 "hi", "ls", "ge", "lt", "gt", "le", "al", "nv"
507 #define streq(string1, string2) (strcmp (string1, string2) == 0)
509 /* Initialization code. */
511 struct processors
513 const char *const name;
514 enum processor_type core;
515 const char *arch;
516 const unsigned long flags;
517 bool (* rtx_costs) (rtx, int, int, int *);
520 /* Not all of these give usefully different compilation alternatives,
521 but there is no simple way of generalizing them. */
522 static const struct processors all_cores[] =
524 /* ARM Cores */
525 #define ARM_CORE(NAME, IDENT, ARCH, FLAGS, COSTS) \
526 {NAME, arm_none, #ARCH, FLAGS | FL_FOR_ARCH##ARCH, arm_##COSTS##_rtx_costs},
527 #include "arm-cores.def"
528 #undef ARM_CORE
529 {NULL, arm_none, NULL, 0, NULL}
532 static const struct processors all_architectures[] =
534 /* ARM Architectures */
535 /* We don't specify rtx_costs here as it will be figured out
536 from the core. */
538 {"armv2", arm2, "2", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH2, NULL},
539 {"armv2a", arm2, "2", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH2, NULL},
540 {"armv3", arm6, "3", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH3, NULL},
541 {"armv3m", arm7m, "3M", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH3M, NULL},
542 {"armv4", arm7tdmi, "4", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH4, NULL},
543 /* Strictly, FL_MODE26 is a permitted option for v4t, but there are no
544 implementations that support it, so we will leave it out for now. */
545 {"armv4t", arm7tdmi, "4T", FL_CO_PROC | FL_FOR_ARCH4T, NULL},
546 {"armv5", arm10tdmi, "5", FL_CO_PROC | FL_FOR_ARCH5, NULL},
547 {"armv5t", arm10tdmi, "5T", FL_CO_PROC | FL_FOR_ARCH5T, NULL},
548 {"armv5e", arm1026ejs, "5E", FL_CO_PROC | FL_FOR_ARCH5E, NULL},
549 {"armv5te", arm1026ejs, "5TE", FL_CO_PROC | FL_FOR_ARCH5TE, NULL},
550 {"armv6", arm1136js, "6", FL_CO_PROC | FL_FOR_ARCH6, NULL},
551 {"armv6j", arm1136js, "6J", FL_CO_PROC | FL_FOR_ARCH6J, NULL},
552 {"armv6k", mpcore, "6K", FL_CO_PROC | FL_FOR_ARCH6K, NULL},
553 {"armv6z", arm1176jzs, "6Z", FL_CO_PROC | FL_FOR_ARCH6Z, NULL},
554 {"armv6zk", arm1176jzs, "6ZK", FL_CO_PROC | FL_FOR_ARCH6ZK, NULL},
555 {"ep9312", ep9312, "4T", FL_LDSCHED | FL_CIRRUS | FL_FOR_ARCH4, NULL},
556 {"iwmmxt", iwmmxt, "5TE", FL_LDSCHED | FL_STRONG | FL_FOR_ARCH5TE | FL_XSCALE | FL_IWMMXT , NULL},
557 {NULL, arm_none, NULL, 0 , NULL}
560 /* This is a magic structure. The 'string' field is magically filled in
561 with a pointer to the value specified by the user on the command line
562 assuming that the user has specified such a value. */
564 struct arm_cpu_select arm_select[] =
566 /* string name processors */
567 { NULL, "-mcpu=", all_cores },
568 { NULL, "-march=", all_architectures },
569 { NULL, "-mtune=", all_cores }
573 /* The name of the proprocessor macro to define for this architecture. */
575 char arm_arch_name[] = "__ARM_ARCH_0UNK__";
577 struct fpu_desc
579 const char * name;
580 enum fputype fpu;
584 /* Available values for for -mfpu=. */
586 static const struct fpu_desc all_fpus[] =
588 {"fpa", FPUTYPE_FPA},
589 {"fpe2", FPUTYPE_FPA_EMU2},
590 {"fpe3", FPUTYPE_FPA_EMU2},
591 {"maverick", FPUTYPE_MAVERICK},
592 {"vfp", FPUTYPE_VFP}
596 /* Floating point models used by the different hardware.
597 See fputype in arm.h. */
599 static const enum fputype fp_model_for_fpu[] =
601 /* No FP hardware. */
602 ARM_FP_MODEL_UNKNOWN, /* FPUTYPE_NONE */
603 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA */
604 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA_EMU2 */
605 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA_EMU3 */
606 ARM_FP_MODEL_MAVERICK, /* FPUTYPE_MAVERICK */
607 ARM_FP_MODEL_VFP /* FPUTYPE_VFP */
611 struct float_abi
613 const char * name;
614 enum float_abi_type abi_type;
618 /* Available values for -mfloat-abi=. */
620 static const struct float_abi all_float_abis[] =
622 {"soft", ARM_FLOAT_ABI_SOFT},
623 {"softfp", ARM_FLOAT_ABI_SOFTFP},
624 {"hard", ARM_FLOAT_ABI_HARD}
628 struct abi_name
630 const char *name;
631 enum arm_abi_type abi_type;
635 /* Available values for -mabi=. */
637 static const struct abi_name arm_all_abis[] =
639 {"apcs-gnu", ARM_ABI_APCS},
640 {"atpcs", ARM_ABI_ATPCS},
641 {"aapcs", ARM_ABI_AAPCS},
642 {"iwmmxt", ARM_ABI_IWMMXT}
645 /* Return the number of bits set in VALUE. */
646 static unsigned
647 bit_count (unsigned long value)
649 unsigned long count = 0;
651 while (value)
653 count++;
654 value &= value - 1; /* Clear the least-significant set bit. */
657 return count;
660 /* Set up library functions unique to ARM. */
662 static void
663 arm_init_libfuncs (void)
665 /* There are no special library functions unless we are using the
666 ARM BPABI. */
667 if (!TARGET_BPABI)
668 return;
670 /* The functions below are described in Section 4 of the "Run-Time
671 ABI for the ARM architecture", Version 1.0. */
673 /* Double-precision floating-point arithmetic. Table 2. */
674 set_optab_libfunc (add_optab, DFmode, "__aeabi_dadd");
675 set_optab_libfunc (sdiv_optab, DFmode, "__aeabi_ddiv");
676 set_optab_libfunc (smul_optab, DFmode, "__aeabi_dmul");
677 set_optab_libfunc (neg_optab, DFmode, "__aeabi_dneg");
678 set_optab_libfunc (sub_optab, DFmode, "__aeabi_dsub");
680 /* Double-precision comparisons. Table 3. */
681 set_optab_libfunc (eq_optab, DFmode, "__aeabi_dcmpeq");
682 set_optab_libfunc (ne_optab, DFmode, NULL);
683 set_optab_libfunc (lt_optab, DFmode, "__aeabi_dcmplt");
684 set_optab_libfunc (le_optab, DFmode, "__aeabi_dcmple");
685 set_optab_libfunc (ge_optab, DFmode, "__aeabi_dcmpge");
686 set_optab_libfunc (gt_optab, DFmode, "__aeabi_dcmpgt");
687 set_optab_libfunc (unord_optab, DFmode, "__aeabi_dcmpun");
689 /* Single-precision floating-point arithmetic. Table 4. */
690 set_optab_libfunc (add_optab, SFmode, "__aeabi_fadd");
691 set_optab_libfunc (sdiv_optab, SFmode, "__aeabi_fdiv");
692 set_optab_libfunc (smul_optab, SFmode, "__aeabi_fmul");
693 set_optab_libfunc (neg_optab, SFmode, "__aeabi_fneg");
694 set_optab_libfunc (sub_optab, SFmode, "__aeabi_fsub");
696 /* Single-precision comparisons. Table 5. */
697 set_optab_libfunc (eq_optab, SFmode, "__aeabi_fcmpeq");
698 set_optab_libfunc (ne_optab, SFmode, NULL);
699 set_optab_libfunc (lt_optab, SFmode, "__aeabi_fcmplt");
700 set_optab_libfunc (le_optab, SFmode, "__aeabi_fcmple");
701 set_optab_libfunc (ge_optab, SFmode, "__aeabi_fcmpge");
702 set_optab_libfunc (gt_optab, SFmode, "__aeabi_fcmpgt");
703 set_optab_libfunc (unord_optab, SFmode, "__aeabi_fcmpun");
705 /* Floating-point to integer conversions. Table 6. */
706 set_conv_libfunc (sfix_optab, SImode, DFmode, "__aeabi_d2iz");
707 set_conv_libfunc (ufix_optab, SImode, DFmode, "__aeabi_d2uiz");
708 set_conv_libfunc (sfix_optab, DImode, DFmode, "__aeabi_d2lz");
709 set_conv_libfunc (ufix_optab, DImode, DFmode, "__aeabi_d2ulz");
710 set_conv_libfunc (sfix_optab, SImode, SFmode, "__aeabi_f2iz");
711 set_conv_libfunc (ufix_optab, SImode, SFmode, "__aeabi_f2uiz");
712 set_conv_libfunc (sfix_optab, DImode, SFmode, "__aeabi_f2lz");
713 set_conv_libfunc (ufix_optab, DImode, SFmode, "__aeabi_f2ulz");
715 /* Conversions between floating types. Table 7. */
716 set_conv_libfunc (trunc_optab, SFmode, DFmode, "__aeabi_d2f");
717 set_conv_libfunc (sext_optab, DFmode, SFmode, "__aeabi_f2d");
719 /* Integer to floating-point conversions. Table 8. */
720 set_conv_libfunc (sfloat_optab, DFmode, SImode, "__aeabi_i2d");
721 set_conv_libfunc (ufloat_optab, DFmode, SImode, "__aeabi_ui2d");
722 set_conv_libfunc (sfloat_optab, DFmode, DImode, "__aeabi_l2d");
723 set_conv_libfunc (ufloat_optab, DFmode, DImode, "__aeabi_ul2d");
724 set_conv_libfunc (sfloat_optab, SFmode, SImode, "__aeabi_i2f");
725 set_conv_libfunc (ufloat_optab, SFmode, SImode, "__aeabi_ui2f");
726 set_conv_libfunc (sfloat_optab, SFmode, DImode, "__aeabi_l2f");
727 set_conv_libfunc (ufloat_optab, SFmode, DImode, "__aeabi_ul2f");
729 /* Long long. Table 9. */
730 set_optab_libfunc (smul_optab, DImode, "__aeabi_lmul");
731 set_optab_libfunc (sdivmod_optab, DImode, "__aeabi_ldivmod");
732 set_optab_libfunc (udivmod_optab, DImode, "__aeabi_uldivmod");
733 set_optab_libfunc (ashl_optab, DImode, "__aeabi_llsl");
734 set_optab_libfunc (lshr_optab, DImode, "__aeabi_llsr");
735 set_optab_libfunc (ashr_optab, DImode, "__aeabi_lasr");
736 set_optab_libfunc (cmp_optab, DImode, "__aeabi_lcmp");
737 set_optab_libfunc (ucmp_optab, DImode, "__aeabi_ulcmp");
739 /* Integer (32/32->32) division. \S 4.3.1. */
740 set_optab_libfunc (sdivmod_optab, SImode, "__aeabi_idivmod");
741 set_optab_libfunc (udivmod_optab, SImode, "__aeabi_uidivmod");
743 /* The divmod functions are designed so that they can be used for
744 plain division, even though they return both the quotient and the
745 remainder. The quotient is returned in the usual location (i.e.,
746 r0 for SImode, {r0, r1} for DImode), just as would be expected
747 for an ordinary division routine. Because the AAPCS calling
748 conventions specify that all of { r0, r1, r2, r3 } are
749 callee-saved registers, there is no need to tell the compiler
750 explicitly that those registers are clobbered by these
751 routines. */
752 set_optab_libfunc (sdiv_optab, DImode, "__aeabi_ldivmod");
753 set_optab_libfunc (udiv_optab, DImode, "__aeabi_uldivmod");
754 set_optab_libfunc (sdiv_optab, SImode, "__aeabi_idivmod");
755 set_optab_libfunc (udiv_optab, SImode, "__aeabi_uidivmod");
758 /* Fix up any incompatible options that the user has specified.
759 This has now turned into a maze. */
760 void
761 arm_override_options (void)
763 unsigned i;
765 /* Set up the flags based on the cpu/architecture selected by the user. */
766 for (i = ARRAY_SIZE (arm_select); i--;)
768 struct arm_cpu_select * ptr = arm_select + i;
770 if (ptr->string != NULL && ptr->string[0] != '\0')
772 const struct processors * sel;
774 for (sel = ptr->processors; sel->name != NULL; sel++)
775 if (streq (ptr->string, sel->name))
777 /* Set the architecture define. */
778 if (i != 2)
779 sprintf (arm_arch_name, "__ARM_ARCH_%s__", sel->arch);
781 /* Determine the processor core for which we should
782 tune code-generation. */
783 if (/* -mcpu= is a sensible default. */
784 i == 0
785 /* If -march= is used, and -mcpu= has not been used,
786 assume that we should tune for a representative
787 CPU from that architecture. */
788 || i == 1
789 /* -mtune= overrides -mcpu= and -march=. */
790 || i == 2)
791 arm_tune = (enum processor_type) (sel - ptr->processors);
793 if (i != 2)
795 /* If we have been given an architecture and a processor
796 make sure that they are compatible. We only generate
797 a warning though, and we prefer the CPU over the
798 architecture. */
799 if (insn_flags != 0 && (insn_flags ^ sel->flags))
800 warning ("switch -mcpu=%s conflicts with -march= switch",
801 ptr->string);
803 insn_flags = sel->flags;
806 break;
809 if (sel->name == NULL)
810 error ("bad value (%s) for %s switch", ptr->string, ptr->name);
814 /* If the user did not specify a processor, choose one for them. */
815 if (insn_flags == 0)
817 const struct processors * sel;
818 unsigned int sought;
819 enum processor_type cpu;
821 cpu = TARGET_CPU_DEFAULT;
822 if (cpu == arm_none)
824 #ifdef SUBTARGET_CPU_DEFAULT
825 /* Use the subtarget default CPU if none was specified by
826 configure. */
827 cpu = SUBTARGET_CPU_DEFAULT;
828 #endif
829 /* Default to ARM6. */
830 if (cpu == arm_none)
831 cpu = arm6;
833 sel = &all_cores[cpu];
835 insn_flags = sel->flags;
837 /* Now check to see if the user has specified some command line
838 switch that require certain abilities from the cpu. */
839 sought = 0;
841 if (TARGET_INTERWORK || TARGET_THUMB)
843 sought |= (FL_THUMB | FL_MODE32);
845 /* There are no ARM processors that support both APCS-26 and
846 interworking. Therefore we force FL_MODE26 to be removed
847 from insn_flags here (if it was set), so that the search
848 below will always be able to find a compatible processor. */
849 insn_flags &= ~FL_MODE26;
852 if (sought != 0 && ((sought & insn_flags) != sought))
854 /* Try to locate a CPU type that supports all of the abilities
855 of the default CPU, plus the extra abilities requested by
856 the user. */
857 for (sel = all_cores; sel->name != NULL; sel++)
858 if ((sel->flags & sought) == (sought | insn_flags))
859 break;
861 if (sel->name == NULL)
863 unsigned current_bit_count = 0;
864 const struct processors * best_fit = NULL;
866 /* Ideally we would like to issue an error message here
867 saying that it was not possible to find a CPU compatible
868 with the default CPU, but which also supports the command
869 line options specified by the programmer, and so they
870 ought to use the -mcpu=<name> command line option to
871 override the default CPU type.
873 If we cannot find a cpu that has both the
874 characteristics of the default cpu and the given
875 command line options we scan the array again looking
876 for a best match. */
877 for (sel = all_cores; sel->name != NULL; sel++)
878 if ((sel->flags & sought) == sought)
880 unsigned count;
882 count = bit_count (sel->flags & insn_flags);
884 if (count >= current_bit_count)
886 best_fit = sel;
887 current_bit_count = count;
891 if (best_fit == NULL)
892 abort ();
893 else
894 sel = best_fit;
897 insn_flags = sel->flags;
899 sprintf (arm_arch_name, "__ARM_ARCH_%s__", sel->arch);
900 if (arm_tune == arm_none)
901 arm_tune = (enum processor_type) (sel - all_cores);
904 /* The processor for which we should tune should now have been
905 chosen. */
906 if (arm_tune == arm_none)
907 abort ();
909 tune_flags = all_cores[(int)arm_tune].flags;
910 if (optimize_size)
911 targetm.rtx_costs = arm_size_rtx_costs;
912 else
913 targetm.rtx_costs = all_cores[(int)arm_tune].rtx_costs;
915 /* Make sure that the processor choice does not conflict with any of the
916 other command line choices. */
917 if (TARGET_INTERWORK && !(insn_flags & FL_THUMB))
919 warning ("target CPU does not support interworking" );
920 target_flags &= ~ARM_FLAG_INTERWORK;
923 if (TARGET_THUMB && !(insn_flags & FL_THUMB))
925 warning ("target CPU does not support THUMB instructions");
926 target_flags &= ~ARM_FLAG_THUMB;
929 if (TARGET_APCS_FRAME && TARGET_THUMB)
931 /* warning ("ignoring -mapcs-frame because -mthumb was used"); */
932 target_flags &= ~ARM_FLAG_APCS_FRAME;
935 /* TARGET_BACKTRACE calls leaf_function_p, which causes a crash if done
936 from here where no function is being compiled currently. */
937 if ((target_flags & (THUMB_FLAG_LEAF_BACKTRACE | THUMB_FLAG_BACKTRACE))
938 && TARGET_ARM)
939 warning ("enabling backtrace support is only meaningful when compiling for the Thumb");
941 if (TARGET_ARM && TARGET_CALLEE_INTERWORKING)
942 warning ("enabling callee interworking support is only meaningful when compiling for the Thumb");
944 if (TARGET_ARM && TARGET_CALLER_INTERWORKING)
945 warning ("enabling caller interworking support is only meaningful when compiling for the Thumb");
947 if (TARGET_APCS_STACK && !TARGET_APCS_FRAME)
949 warning ("-mapcs-stack-check incompatible with -mno-apcs-frame");
950 target_flags |= ARM_FLAG_APCS_FRAME;
953 if (TARGET_POKE_FUNCTION_NAME)
954 target_flags |= ARM_FLAG_APCS_FRAME;
956 if (TARGET_APCS_REENT && flag_pic)
957 error ("-fpic and -mapcs-reent are incompatible");
959 if (TARGET_APCS_REENT)
960 warning ("APCS reentrant code not supported. Ignored");
962 /* If this target is normally configured to use APCS frames, warn if they
963 are turned off and debugging is turned on. */
964 if (TARGET_ARM
965 && write_symbols != NO_DEBUG
966 && !TARGET_APCS_FRAME
967 && (TARGET_DEFAULT & ARM_FLAG_APCS_FRAME))
968 warning ("-g with -mno-apcs-frame may not give sensible debugging");
970 /* If stack checking is disabled, we can use r10 as the PIC register,
971 which keeps r9 available. */
972 if (flag_pic)
973 arm_pic_register = TARGET_APCS_STACK ? 9 : 10;
975 if (TARGET_APCS_FLOAT)
976 warning ("passing floating point arguments in fp regs not yet supported");
978 /* Initialize boolean versions of the flags, for use in the arm.md file. */
979 arm_arch3m = (insn_flags & FL_ARCH3M) != 0;
980 arm_arch4 = (insn_flags & FL_ARCH4) != 0;
981 arm_arch4t = arm_arch4 & ((insn_flags & FL_THUMB) != 0);
982 arm_arch5 = (insn_flags & FL_ARCH5) != 0;
983 arm_arch5e = (insn_flags & FL_ARCH5E) != 0;
984 arm_arch6 = (insn_flags & FL_ARCH6) != 0;
985 arm_arch_xscale = (insn_flags & FL_XSCALE) != 0;
986 arm_arch_cirrus = (insn_flags & FL_CIRRUS) != 0;
988 arm_ld_sched = (tune_flags & FL_LDSCHED) != 0;
989 arm_is_strong = (tune_flags & FL_STRONG) != 0;
990 thumb_code = (TARGET_ARM == 0);
991 arm_is_6_or_7 = (((tune_flags & (FL_MODE26 | FL_MODE32))
992 && !(tune_flags & FL_ARCH4))) != 0;
993 arm_tune_xscale = (tune_flags & FL_XSCALE) != 0;
994 arm_arch_iwmmxt = (insn_flags & FL_IWMMXT) != 0;
996 /* V5 code we generate is completely interworking capable, so we turn off
997 TARGET_INTERWORK here to avoid many tests later on. */
999 /* XXX However, we must pass the right pre-processor defines to CPP
1000 or GLD can get confused. This is a hack. */
1001 if (TARGET_INTERWORK)
1002 arm_cpp_interwork = 1;
1004 if (arm_arch5)
1005 target_flags &= ~ARM_FLAG_INTERWORK;
1007 if (target_abi_name)
1009 for (i = 0; i < ARRAY_SIZE (arm_all_abis); i++)
1011 if (streq (arm_all_abis[i].name, target_abi_name))
1013 arm_abi = arm_all_abis[i].abi_type;
1014 break;
1017 if (i == ARRAY_SIZE (arm_all_abis))
1018 error ("invalid ABI option: -mabi=%s", target_abi_name);
1020 else
1021 arm_abi = ARM_DEFAULT_ABI;
1023 if (TARGET_IWMMXT && !ARM_DOUBLEWORD_ALIGN)
1024 error ("iwmmxt requires an AAPCS compatible ABI for proper operation");
1026 if (TARGET_IWMMXT_ABI && !TARGET_IWMMXT)
1027 error ("iwmmxt abi requires an iwmmxt capable cpu");
1029 arm_fp_model = ARM_FP_MODEL_UNKNOWN;
1030 if (target_fpu_name == NULL && target_fpe_name != NULL)
1032 if (streq (target_fpe_name, "2"))
1033 target_fpu_name = "fpe2";
1034 else if (streq (target_fpe_name, "3"))
1035 target_fpu_name = "fpe3";
1036 else
1037 error ("invalid floating point emulation option: -mfpe=%s",
1038 target_fpe_name);
1040 if (target_fpu_name != NULL)
1042 /* The user specified a FPU. */
1043 for (i = 0; i < ARRAY_SIZE (all_fpus); i++)
1045 if (streq (all_fpus[i].name, target_fpu_name))
1047 arm_fpu_arch = all_fpus[i].fpu;
1048 arm_fpu_tune = arm_fpu_arch;
1049 arm_fp_model = fp_model_for_fpu[arm_fpu_arch];
1050 break;
1053 if (arm_fp_model == ARM_FP_MODEL_UNKNOWN)
1054 error ("invalid floating point option: -mfpu=%s", target_fpu_name);
1056 else
1058 #ifdef FPUTYPE_DEFAULT
1059 /* Use the default if it is specified for this platform. */
1060 arm_fpu_arch = FPUTYPE_DEFAULT;
1061 arm_fpu_tune = FPUTYPE_DEFAULT;
1062 #else
1063 /* Pick one based on CPU type. */
1064 /* ??? Some targets assume FPA is the default.
1065 if ((insn_flags & FL_VFP) != 0)
1066 arm_fpu_arch = FPUTYPE_VFP;
1067 else
1069 if (arm_arch_cirrus)
1070 arm_fpu_arch = FPUTYPE_MAVERICK;
1071 else
1072 arm_fpu_arch = FPUTYPE_FPA_EMU2;
1073 #endif
1074 if (tune_flags & FL_CO_PROC && arm_fpu_arch == FPUTYPE_FPA_EMU2)
1075 arm_fpu_tune = FPUTYPE_FPA;
1076 else
1077 arm_fpu_tune = arm_fpu_arch;
1078 arm_fp_model = fp_model_for_fpu[arm_fpu_arch];
1079 if (arm_fp_model == ARM_FP_MODEL_UNKNOWN)
1080 abort ();
1083 if (target_float_abi_name != NULL)
1085 /* The user specified a FP ABI. */
1086 for (i = 0; i < ARRAY_SIZE (all_float_abis); i++)
1088 if (streq (all_float_abis[i].name, target_float_abi_name))
1090 arm_float_abi = all_float_abis[i].abi_type;
1091 break;
1094 if (i == ARRAY_SIZE (all_float_abis))
1095 error ("invalid floating point abi: -mfloat-abi=%s",
1096 target_float_abi_name);
1098 else if (target_float_switch)
1100 /* This is a bit of a hack to avoid needing target flags for these. */
1101 if (target_float_switch[0] == 'h')
1102 arm_float_abi = ARM_FLOAT_ABI_HARD;
1103 else
1104 arm_float_abi = ARM_FLOAT_ABI_SOFT;
1106 else
1107 arm_float_abi = TARGET_DEFAULT_FLOAT_ABI;
1109 if (arm_float_abi == ARM_FLOAT_ABI_HARD && TARGET_VFP)
1110 sorry ("-mfloat-abi=hard and VFP");
1112 /* If soft-float is specified then don't use FPU. */
1113 if (TARGET_SOFT_FLOAT)
1114 arm_fpu_arch = FPUTYPE_NONE;
1116 /* For arm2/3 there is no need to do any scheduling if there is only
1117 a floating point emulator, or we are doing software floating-point. */
1118 if ((TARGET_SOFT_FLOAT
1119 || arm_fpu_tune == FPUTYPE_FPA_EMU2
1120 || arm_fpu_tune == FPUTYPE_FPA_EMU3)
1121 && (tune_flags & FL_MODE32) == 0)
1122 flag_schedule_insns = flag_schedule_insns_after_reload = 0;
1124 /* Override the default structure alignment for AAPCS ABI. */
1125 if (arm_abi == ARM_ABI_AAPCS)
1126 arm_structure_size_boundary = 8;
1128 if (structure_size_string != NULL)
1130 int size = strtol (structure_size_string, NULL, 0);
1132 if (size == 8 || size == 32
1133 || (ARM_DOUBLEWORD_ALIGN && size == 64))
1134 arm_structure_size_boundary = size;
1135 else
1136 warning ("structure size boundary can only be set to %s",
1137 ARM_DOUBLEWORD_ALIGN ? "8, 32 or 64": "8 or 32");
1140 if (arm_pic_register_string != NULL)
1142 int pic_register = decode_reg_name (arm_pic_register_string);
1144 if (!flag_pic)
1145 warning ("-mpic-register= is useless without -fpic");
1147 /* Prevent the user from choosing an obviously stupid PIC register. */
1148 else if (pic_register < 0 || call_used_regs[pic_register]
1149 || pic_register == HARD_FRAME_POINTER_REGNUM
1150 || pic_register == STACK_POINTER_REGNUM
1151 || pic_register >= PC_REGNUM)
1152 error ("unable to use '%s' for PIC register", arm_pic_register_string);
1153 else
1154 arm_pic_register = pic_register;
1157 if (TARGET_THUMB && flag_schedule_insns)
1159 /* Don't warn since it's on by default in -O2. */
1160 flag_schedule_insns = 0;
1163 if (optimize_size)
1165 /* There's some dispute as to whether this should be 1 or 2. However,
1166 experiments seem to show that in pathological cases a setting of
1167 1 degrades less severely than a setting of 2. This could change if
1168 other parts of the compiler change their behavior. */
1169 arm_constant_limit = 1;
1171 /* If optimizing for size, bump the number of instructions that we
1172 are prepared to conditionally execute (even on a StrongARM). */
1173 max_insns_skipped = 6;
1175 else
1177 /* For processors with load scheduling, it never costs more than
1178 2 cycles to load a constant, and the load scheduler may well
1179 reduce that to 1. */
1180 if (arm_ld_sched)
1181 arm_constant_limit = 1;
1183 /* On XScale the longer latency of a load makes it more difficult
1184 to achieve a good schedule, so it's faster to synthesize
1185 constants that can be done in two insns. */
1186 if (arm_tune_xscale)
1187 arm_constant_limit = 2;
1189 /* StrongARM has early execution of branches, so a sequence
1190 that is worth skipping is shorter. */
1191 if (arm_is_strong)
1192 max_insns_skipped = 3;
1195 /* Register global variables with the garbage collector. */
1196 arm_add_gc_roots ();
1199 static void
1200 arm_add_gc_roots (void)
1202 gcc_obstack_init(&minipool_obstack);
1203 minipool_startobj = (char *) obstack_alloc (&minipool_obstack, 0);
1206 /* A table of known ARM exception types.
1207 For use with the interrupt function attribute. */
1209 typedef struct
1211 const char *const arg;
1212 const unsigned long return_value;
1214 isr_attribute_arg;
1216 static const isr_attribute_arg isr_attribute_args [] =
1218 { "IRQ", ARM_FT_ISR },
1219 { "irq", ARM_FT_ISR },
1220 { "FIQ", ARM_FT_FIQ },
1221 { "fiq", ARM_FT_FIQ },
1222 { "ABORT", ARM_FT_ISR },
1223 { "abort", ARM_FT_ISR },
1224 { "ABORT", ARM_FT_ISR },
1225 { "abort", ARM_FT_ISR },
1226 { "UNDEF", ARM_FT_EXCEPTION },
1227 { "undef", ARM_FT_EXCEPTION },
1228 { "SWI", ARM_FT_EXCEPTION },
1229 { "swi", ARM_FT_EXCEPTION },
1230 { NULL, ARM_FT_NORMAL }
1233 /* Returns the (interrupt) function type of the current
1234 function, or ARM_FT_UNKNOWN if the type cannot be determined. */
1236 static unsigned long
1237 arm_isr_value (tree argument)
1239 const isr_attribute_arg * ptr;
1240 const char * arg;
1242 /* No argument - default to IRQ. */
1243 if (argument == NULL_TREE)
1244 return ARM_FT_ISR;
1246 /* Get the value of the argument. */
1247 if (TREE_VALUE (argument) == NULL_TREE
1248 || TREE_CODE (TREE_VALUE (argument)) != STRING_CST)
1249 return ARM_FT_UNKNOWN;
1251 arg = TREE_STRING_POINTER (TREE_VALUE (argument));
1253 /* Check it against the list of known arguments. */
1254 for (ptr = isr_attribute_args; ptr->arg != NULL; ptr++)
1255 if (streq (arg, ptr->arg))
1256 return ptr->return_value;
1258 /* An unrecognized interrupt type. */
1259 return ARM_FT_UNKNOWN;
1262 /* Computes the type of the current function. */
1264 static unsigned long
1265 arm_compute_func_type (void)
1267 unsigned long type = ARM_FT_UNKNOWN;
1268 tree a;
1269 tree attr;
1271 if (TREE_CODE (current_function_decl) != FUNCTION_DECL)
1272 abort ();
1274 /* Decide if the current function is volatile. Such functions
1275 never return, and many memory cycles can be saved by not storing
1276 register values that will never be needed again. This optimization
1277 was added to speed up context switching in a kernel application. */
1278 if (optimize > 0
1279 && TREE_NOTHROW (current_function_decl)
1280 && TREE_THIS_VOLATILE (current_function_decl))
1281 type |= ARM_FT_VOLATILE;
1283 if (cfun->static_chain_decl != NULL)
1284 type |= ARM_FT_NESTED;
1286 attr = DECL_ATTRIBUTES (current_function_decl);
1288 a = lookup_attribute ("naked", attr);
1289 if (a != NULL_TREE)
1290 type |= ARM_FT_NAKED;
1292 a = lookup_attribute ("isr", attr);
1293 if (a == NULL_TREE)
1294 a = lookup_attribute ("interrupt", attr);
1296 if (a == NULL_TREE)
1297 type |= TARGET_INTERWORK ? ARM_FT_INTERWORKED : ARM_FT_NORMAL;
1298 else
1299 type |= arm_isr_value (TREE_VALUE (a));
1301 return type;
1304 /* Returns the type of the current function. */
1306 unsigned long
1307 arm_current_func_type (void)
1309 if (ARM_FUNC_TYPE (cfun->machine->func_type) == ARM_FT_UNKNOWN)
1310 cfun->machine->func_type = arm_compute_func_type ();
1312 return cfun->machine->func_type;
1315 /* Return 1 if it is possible to return using a single instruction.
1316 If SIBLING is non-null, this is a test for a return before a sibling
1317 call. SIBLING is the call insn, so we can examine its register usage. */
1320 use_return_insn (int iscond, rtx sibling)
1322 int regno;
1323 unsigned int func_type;
1324 unsigned long saved_int_regs;
1325 unsigned HOST_WIDE_INT stack_adjust;
1326 arm_stack_offsets *offsets;
1328 /* Never use a return instruction before reload has run. */
1329 if (!reload_completed)
1330 return 0;
1332 func_type = arm_current_func_type ();
1334 /* Naked functions and volatile functions need special
1335 consideration. */
1336 if (func_type & (ARM_FT_VOLATILE | ARM_FT_NAKED))
1337 return 0;
1339 /* So do interrupt functions that use the frame pointer. */
1340 if (IS_INTERRUPT (func_type) && frame_pointer_needed)
1341 return 0;
1343 offsets = arm_get_frame_offsets ();
1344 stack_adjust = offsets->outgoing_args - offsets->saved_regs;
1346 /* As do variadic functions. */
1347 if (current_function_pretend_args_size
1348 || cfun->machine->uses_anonymous_args
1349 /* Or if the function calls __builtin_eh_return () */
1350 || current_function_calls_eh_return
1351 /* Or if the function calls alloca */
1352 || current_function_calls_alloca
1353 /* Or if there is a stack adjustment. However, if the stack pointer
1354 is saved on the stack, we can use a pre-incrementing stack load. */
1355 || !(stack_adjust == 0 || (frame_pointer_needed && stack_adjust == 4)))
1356 return 0;
1358 saved_int_regs = arm_compute_save_reg_mask ();
1360 /* Unfortunately, the insn
1362 ldmib sp, {..., sp, ...}
1364 triggers a bug on most SA-110 based devices, such that the stack
1365 pointer won't be correctly restored if the instruction takes a
1366 page fault. We work around this problem by popping r3 along with
1367 the other registers, since that is never slower than executing
1368 another instruction.
1370 We test for !arm_arch5 here, because code for any architecture
1371 less than this could potentially be run on one of the buggy
1372 chips. */
1373 if (stack_adjust == 4 && !arm_arch5)
1375 /* Validate that r3 is a call-clobbered register (always true in
1376 the default abi) ... */
1377 if (!call_used_regs[3])
1378 return 0;
1380 /* ... that it isn't being used for a return value (always true
1381 until we implement return-in-regs), or for a tail-call
1382 argument ... */
1383 if (sibling)
1385 if (GET_CODE (sibling) != CALL_INSN)
1386 abort ();
1388 if (find_regno_fusage (sibling, USE, 3))
1389 return 0;
1392 /* ... and that there are no call-saved registers in r0-r2
1393 (always true in the default ABI). */
1394 if (saved_int_regs & 0x7)
1395 return 0;
1398 /* Can't be done if interworking with Thumb, and any registers have been
1399 stacked. */
1400 if (TARGET_INTERWORK && saved_int_regs != 0)
1401 return 0;
1403 /* On StrongARM, conditional returns are expensive if they aren't
1404 taken and multiple registers have been stacked. */
1405 if (iscond && arm_is_strong)
1407 /* Conditional return when just the LR is stored is a simple
1408 conditional-load instruction, that's not expensive. */
1409 if (saved_int_regs != 0 && saved_int_regs != (1 << LR_REGNUM))
1410 return 0;
1412 if (flag_pic && regs_ever_live[PIC_OFFSET_TABLE_REGNUM])
1413 return 0;
1416 /* If there are saved registers but the LR isn't saved, then we need
1417 two instructions for the return. */
1418 if (saved_int_regs && !(saved_int_regs & (1 << LR_REGNUM)))
1419 return 0;
1421 /* Can't be done if any of the FPA regs are pushed,
1422 since this also requires an insn. */
1423 if (TARGET_HARD_FLOAT && TARGET_FPA)
1424 for (regno = FIRST_FPA_REGNUM; regno <= LAST_FPA_REGNUM; regno++)
1425 if (regs_ever_live[regno] && !call_used_regs[regno])
1426 return 0;
1428 /* Likewise VFP regs. */
1429 if (TARGET_HARD_FLOAT && TARGET_VFP)
1430 for (regno = FIRST_VFP_REGNUM; regno <= LAST_VFP_REGNUM; regno++)
1431 if (regs_ever_live[regno] && !call_used_regs[regno])
1432 return 0;
1434 if (TARGET_REALLY_IWMMXT)
1435 for (regno = FIRST_IWMMXT_REGNUM; regno <= LAST_IWMMXT_REGNUM; regno++)
1436 if (regs_ever_live[regno] && ! call_used_regs [regno])
1437 return 0;
1439 return 1;
1442 /* Return TRUE if int I is a valid immediate ARM constant. */
1445 const_ok_for_arm (HOST_WIDE_INT i)
1447 unsigned HOST_WIDE_INT mask = ~(unsigned HOST_WIDE_INT)0xFF;
1449 /* For machines with >32 bit HOST_WIDE_INT, the bits above bit 31 must
1450 be all zero, or all one. */
1451 if ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff) != 0
1452 && ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff)
1453 != ((~(unsigned HOST_WIDE_INT) 0)
1454 & ~(unsigned HOST_WIDE_INT) 0xffffffff)))
1455 return FALSE;
1457 /* Fast return for 0 and powers of 2 */
1458 if ((i & (i - 1)) == 0)
1459 return TRUE;
1463 if ((i & mask & (unsigned HOST_WIDE_INT) 0xffffffff) == 0)
1464 return TRUE;
1465 mask =
1466 (mask << 2) | ((mask & (unsigned HOST_WIDE_INT) 0xffffffff)
1467 >> (32 - 2)) | ~(unsigned HOST_WIDE_INT) 0xffffffff;
1469 while (mask != ~(unsigned HOST_WIDE_INT) 0xFF);
1471 return FALSE;
1474 /* Return true if I is a valid constant for the operation CODE. */
1475 static int
1476 const_ok_for_op (HOST_WIDE_INT i, enum rtx_code code)
1478 if (const_ok_for_arm (i))
1479 return 1;
1481 switch (code)
1483 case PLUS:
1484 return const_ok_for_arm (ARM_SIGN_EXTEND (-i));
1486 case MINUS: /* Should only occur with (MINUS I reg) => rsb */
1487 case XOR:
1488 case IOR:
1489 return 0;
1491 case AND:
1492 return const_ok_for_arm (ARM_SIGN_EXTEND (~i));
1494 default:
1495 abort ();
1499 /* Emit a sequence of insns to handle a large constant.
1500 CODE is the code of the operation required, it can be any of SET, PLUS,
1501 IOR, AND, XOR, MINUS;
1502 MODE is the mode in which the operation is being performed;
1503 VAL is the integer to operate on;
1504 SOURCE is the other operand (a register, or a null-pointer for SET);
1505 SUBTARGETS means it is safe to create scratch registers if that will
1506 either produce a simpler sequence, or we will want to cse the values.
1507 Return value is the number of insns emitted. */
1510 arm_split_constant (enum rtx_code code, enum machine_mode mode, rtx insn,
1511 HOST_WIDE_INT val, rtx target, rtx source, int subtargets)
1513 rtx cond;
1515 if (insn && GET_CODE (PATTERN (insn)) == COND_EXEC)
1516 cond = COND_EXEC_TEST (PATTERN (insn));
1517 else
1518 cond = NULL_RTX;
1520 if (subtargets || code == SET
1521 || (GET_CODE (target) == REG && GET_CODE (source) == REG
1522 && REGNO (target) != REGNO (source)))
1524 /* After arm_reorg has been called, we can't fix up expensive
1525 constants by pushing them into memory so we must synthesize
1526 them in-line, regardless of the cost. This is only likely to
1527 be more costly on chips that have load delay slots and we are
1528 compiling without running the scheduler (so no splitting
1529 occurred before the final instruction emission).
1531 Ref: gcc -O1 -mcpu=strongarm gcc.c-torture/compile/980506-2.c
1533 if (!after_arm_reorg
1534 && !cond
1535 && (arm_gen_constant (code, mode, NULL_RTX, val, target, source,
1536 1, 0)
1537 > arm_constant_limit + (code != SET)))
1539 if (code == SET)
1541 /* Currently SET is the only monadic value for CODE, all
1542 the rest are diadic. */
1543 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (val)));
1544 return 1;
1546 else
1548 rtx temp = subtargets ? gen_reg_rtx (mode) : target;
1550 emit_insn (gen_rtx_SET (VOIDmode, temp, GEN_INT (val)));
1551 /* For MINUS, the value is subtracted from, since we never
1552 have subtraction of a constant. */
1553 if (code == MINUS)
1554 emit_insn (gen_rtx_SET (VOIDmode, target,
1555 gen_rtx_MINUS (mode, temp, source)));
1556 else
1557 emit_insn (gen_rtx_SET (VOIDmode, target,
1558 gen_rtx_fmt_ee (code, mode, source, temp)));
1559 return 2;
1564 return arm_gen_constant (code, mode, cond, val, target, source, subtargets,
1568 static int
1569 count_insns_for_constant (HOST_WIDE_INT remainder, int i)
1571 HOST_WIDE_INT temp1;
1572 int num_insns = 0;
1575 int end;
1577 if (i <= 0)
1578 i += 32;
1579 if (remainder & (3 << (i - 2)))
1581 end = i - 8;
1582 if (end < 0)
1583 end += 32;
1584 temp1 = remainder & ((0x0ff << end)
1585 | ((i < end) ? (0xff >> (32 - end)) : 0));
1586 remainder &= ~temp1;
1587 num_insns++;
1588 i -= 6;
1590 i -= 2;
1591 } while (remainder);
1592 return num_insns;
1595 /* Emit an instruction with the indicated PATTERN. If COND is
1596 non-NULL, conditionalize the execution of the instruction on COND
1597 being true. */
1599 static void
1600 emit_constant_insn (rtx cond, rtx pattern)
1602 if (cond)
1603 pattern = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (cond), pattern);
1604 emit_insn (pattern);
1607 /* As above, but extra parameter GENERATE which, if clear, suppresses
1608 RTL generation. */
1610 static int
1611 arm_gen_constant (enum rtx_code code, enum machine_mode mode, rtx cond,
1612 HOST_WIDE_INT val, rtx target, rtx source, int subtargets,
1613 int generate)
1615 int can_invert = 0;
1616 int can_negate = 0;
1617 int can_negate_initial = 0;
1618 int can_shift = 0;
1619 int i;
1620 int num_bits_set = 0;
1621 int set_sign_bit_copies = 0;
1622 int clear_sign_bit_copies = 0;
1623 int clear_zero_bit_copies = 0;
1624 int set_zero_bit_copies = 0;
1625 int insns = 0;
1626 unsigned HOST_WIDE_INT temp1, temp2;
1627 unsigned HOST_WIDE_INT remainder = val & 0xffffffff;
1629 /* Find out which operations are safe for a given CODE. Also do a quick
1630 check for degenerate cases; these can occur when DImode operations
1631 are split. */
1632 switch (code)
1634 case SET:
1635 can_invert = 1;
1636 can_shift = 1;
1637 can_negate = 1;
1638 break;
1640 case PLUS:
1641 can_negate = 1;
1642 can_negate_initial = 1;
1643 break;
1645 case IOR:
1646 if (remainder == 0xffffffff)
1648 if (generate)
1649 emit_constant_insn (cond,
1650 gen_rtx_SET (VOIDmode, target,
1651 GEN_INT (ARM_SIGN_EXTEND (val))));
1652 return 1;
1654 if (remainder == 0)
1656 if (reload_completed && rtx_equal_p (target, source))
1657 return 0;
1658 if (generate)
1659 emit_constant_insn (cond,
1660 gen_rtx_SET (VOIDmode, target, source));
1661 return 1;
1663 break;
1665 case AND:
1666 if (remainder == 0)
1668 if (generate)
1669 emit_constant_insn (cond,
1670 gen_rtx_SET (VOIDmode, target, const0_rtx));
1671 return 1;
1673 if (remainder == 0xffffffff)
1675 if (reload_completed && rtx_equal_p (target, source))
1676 return 0;
1677 if (generate)
1678 emit_constant_insn (cond,
1679 gen_rtx_SET (VOIDmode, target, source));
1680 return 1;
1682 can_invert = 1;
1683 break;
1685 case XOR:
1686 if (remainder == 0)
1688 if (reload_completed && rtx_equal_p (target, source))
1689 return 0;
1690 if (generate)
1691 emit_constant_insn (cond,
1692 gen_rtx_SET (VOIDmode, target, source));
1693 return 1;
1695 if (remainder == 0xffffffff)
1697 if (generate)
1698 emit_constant_insn (cond,
1699 gen_rtx_SET (VOIDmode, target,
1700 gen_rtx_NOT (mode, source)));
1701 return 1;
1704 /* We don't know how to handle this yet below. */
1705 abort ();
1707 case MINUS:
1708 /* We treat MINUS as (val - source), since (source - val) is always
1709 passed as (source + (-val)). */
1710 if (remainder == 0)
1712 if (generate)
1713 emit_constant_insn (cond,
1714 gen_rtx_SET (VOIDmode, target,
1715 gen_rtx_NEG (mode, source)));
1716 return 1;
1718 if (const_ok_for_arm (val))
1720 if (generate)
1721 emit_constant_insn (cond,
1722 gen_rtx_SET (VOIDmode, target,
1723 gen_rtx_MINUS (mode, GEN_INT (val),
1724 source)));
1725 return 1;
1727 can_negate = 1;
1729 break;
1731 default:
1732 abort ();
1735 /* If we can do it in one insn get out quickly. */
1736 if (const_ok_for_arm (val)
1737 || (can_negate_initial && const_ok_for_arm (-val))
1738 || (can_invert && const_ok_for_arm (~val)))
1740 if (generate)
1741 emit_constant_insn (cond,
1742 gen_rtx_SET (VOIDmode, target,
1743 (source
1744 ? gen_rtx_fmt_ee (code, mode, source,
1745 GEN_INT (val))
1746 : GEN_INT (val))));
1747 return 1;
1750 /* Calculate a few attributes that may be useful for specific
1751 optimizations. */
1752 for (i = 31; i >= 0; i--)
1754 if ((remainder & (1 << i)) == 0)
1755 clear_sign_bit_copies++;
1756 else
1757 break;
1760 for (i = 31; i >= 0; i--)
1762 if ((remainder & (1 << i)) != 0)
1763 set_sign_bit_copies++;
1764 else
1765 break;
1768 for (i = 0; i <= 31; i++)
1770 if ((remainder & (1 << i)) == 0)
1771 clear_zero_bit_copies++;
1772 else
1773 break;
1776 for (i = 0; i <= 31; i++)
1778 if ((remainder & (1 << i)) != 0)
1779 set_zero_bit_copies++;
1780 else
1781 break;
1784 switch (code)
1786 case SET:
1787 /* See if we can do this by sign_extending a constant that is known
1788 to be negative. This is a good, way of doing it, since the shift
1789 may well merge into a subsequent insn. */
1790 if (set_sign_bit_copies > 1)
1792 if (const_ok_for_arm
1793 (temp1 = ARM_SIGN_EXTEND (remainder
1794 << (set_sign_bit_copies - 1))))
1796 if (generate)
1798 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1799 emit_constant_insn (cond,
1800 gen_rtx_SET (VOIDmode, new_src,
1801 GEN_INT (temp1)));
1802 emit_constant_insn (cond,
1803 gen_ashrsi3 (target, new_src,
1804 GEN_INT (set_sign_bit_copies - 1)));
1806 return 2;
1808 /* For an inverted constant, we will need to set the low bits,
1809 these will be shifted out of harm's way. */
1810 temp1 |= (1 << (set_sign_bit_copies - 1)) - 1;
1811 if (const_ok_for_arm (~temp1))
1813 if (generate)
1815 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1816 emit_constant_insn (cond,
1817 gen_rtx_SET (VOIDmode, new_src,
1818 GEN_INT (temp1)));
1819 emit_constant_insn (cond,
1820 gen_ashrsi3 (target, new_src,
1821 GEN_INT (set_sign_bit_copies - 1)));
1823 return 2;
1827 /* See if we can generate this by setting the bottom (or the top)
1828 16 bits, and then shifting these into the other half of the
1829 word. We only look for the simplest cases, to do more would cost
1830 too much. Be careful, however, not to generate this when the
1831 alternative would take fewer insns. */
1832 if (val & 0xffff0000)
1834 temp1 = remainder & 0xffff0000;
1835 temp2 = remainder & 0x0000ffff;
1837 /* Overlaps outside this range are best done using other methods. */
1838 for (i = 9; i < 24; i++)
1840 if ((((temp2 | (temp2 << i)) & 0xffffffff) == remainder)
1841 && !const_ok_for_arm (temp2))
1843 rtx new_src = (subtargets
1844 ? (generate ? gen_reg_rtx (mode) : NULL_RTX)
1845 : target);
1846 insns = arm_gen_constant (code, mode, cond, temp2, new_src,
1847 source, subtargets, generate);
1848 source = new_src;
1849 if (generate)
1850 emit_constant_insn
1851 (cond,
1852 gen_rtx_SET
1853 (VOIDmode, target,
1854 gen_rtx_IOR (mode,
1855 gen_rtx_ASHIFT (mode, source,
1856 GEN_INT (i)),
1857 source)));
1858 return insns + 1;
1862 /* Don't duplicate cases already considered. */
1863 for (i = 17; i < 24; i++)
1865 if (((temp1 | (temp1 >> i)) == remainder)
1866 && !const_ok_for_arm (temp1))
1868 rtx new_src = (subtargets
1869 ? (generate ? gen_reg_rtx (mode) : NULL_RTX)
1870 : target);
1871 insns = arm_gen_constant (code, mode, cond, temp1, new_src,
1872 source, subtargets, generate);
1873 source = new_src;
1874 if (generate)
1875 emit_constant_insn
1876 (cond,
1877 gen_rtx_SET (VOIDmode, target,
1878 gen_rtx_IOR
1879 (mode,
1880 gen_rtx_LSHIFTRT (mode, source,
1881 GEN_INT (i)),
1882 source)));
1883 return insns + 1;
1887 break;
1889 case IOR:
1890 case XOR:
1891 /* If we have IOR or XOR, and the constant can be loaded in a
1892 single instruction, and we can find a temporary to put it in,
1893 then this can be done in two instructions instead of 3-4. */
1894 if (subtargets
1895 /* TARGET can't be NULL if SUBTARGETS is 0 */
1896 || (reload_completed && !reg_mentioned_p (target, source)))
1898 if (const_ok_for_arm (ARM_SIGN_EXTEND (~val)))
1900 if (generate)
1902 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1904 emit_constant_insn (cond,
1905 gen_rtx_SET (VOIDmode, sub,
1906 GEN_INT (val)));
1907 emit_constant_insn (cond,
1908 gen_rtx_SET (VOIDmode, target,
1909 gen_rtx_fmt_ee (code, mode,
1910 source, sub)));
1912 return 2;
1916 if (code == XOR)
1917 break;
1919 if (set_sign_bit_copies > 8
1920 && (val & (-1 << (32 - set_sign_bit_copies))) == val)
1922 if (generate)
1924 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1925 rtx shift = GEN_INT (set_sign_bit_copies);
1927 emit_constant_insn
1928 (cond,
1929 gen_rtx_SET (VOIDmode, sub,
1930 gen_rtx_NOT (mode,
1931 gen_rtx_ASHIFT (mode,
1932 source,
1933 shift))));
1934 emit_constant_insn
1935 (cond,
1936 gen_rtx_SET (VOIDmode, target,
1937 gen_rtx_NOT (mode,
1938 gen_rtx_LSHIFTRT (mode, sub,
1939 shift))));
1941 return 2;
1944 if (set_zero_bit_copies > 8
1945 && (remainder & ((1 << set_zero_bit_copies) - 1)) == remainder)
1947 if (generate)
1949 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1950 rtx shift = GEN_INT (set_zero_bit_copies);
1952 emit_constant_insn
1953 (cond,
1954 gen_rtx_SET (VOIDmode, sub,
1955 gen_rtx_NOT (mode,
1956 gen_rtx_LSHIFTRT (mode,
1957 source,
1958 shift))));
1959 emit_constant_insn
1960 (cond,
1961 gen_rtx_SET (VOIDmode, target,
1962 gen_rtx_NOT (mode,
1963 gen_rtx_ASHIFT (mode, sub,
1964 shift))));
1966 return 2;
1969 if (const_ok_for_arm (temp1 = ARM_SIGN_EXTEND (~val)))
1971 if (generate)
1973 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1974 emit_constant_insn (cond,
1975 gen_rtx_SET (VOIDmode, sub,
1976 gen_rtx_NOT (mode, source)));
1977 source = sub;
1978 if (subtargets)
1979 sub = gen_reg_rtx (mode);
1980 emit_constant_insn (cond,
1981 gen_rtx_SET (VOIDmode, sub,
1982 gen_rtx_AND (mode, source,
1983 GEN_INT (temp1))));
1984 emit_constant_insn (cond,
1985 gen_rtx_SET (VOIDmode, target,
1986 gen_rtx_NOT (mode, sub)));
1988 return 3;
1990 break;
1992 case AND:
1993 /* See if two shifts will do 2 or more insn's worth of work. */
1994 if (clear_sign_bit_copies >= 16 && clear_sign_bit_copies < 24)
1996 HOST_WIDE_INT shift_mask = ((0xffffffff
1997 << (32 - clear_sign_bit_copies))
1998 & 0xffffffff);
2000 if ((remainder | shift_mask) != 0xffffffff)
2002 if (generate)
2004 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2005 insns = arm_gen_constant (AND, mode, cond,
2006 remainder | shift_mask,
2007 new_src, source, subtargets, 1);
2008 source = new_src;
2010 else
2012 rtx targ = subtargets ? NULL_RTX : target;
2013 insns = arm_gen_constant (AND, mode, cond,
2014 remainder | shift_mask,
2015 targ, source, subtargets, 0);
2019 if (generate)
2021 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2022 rtx shift = GEN_INT (clear_sign_bit_copies);
2024 emit_insn (gen_ashlsi3 (new_src, source, shift));
2025 emit_insn (gen_lshrsi3 (target, new_src, shift));
2028 return insns + 2;
2031 if (clear_zero_bit_copies >= 16 && clear_zero_bit_copies < 24)
2033 HOST_WIDE_INT shift_mask = (1 << clear_zero_bit_copies) - 1;
2035 if ((remainder | shift_mask) != 0xffffffff)
2037 if (generate)
2039 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2041 insns = arm_gen_constant (AND, mode, cond,
2042 remainder | shift_mask,
2043 new_src, source, subtargets, 1);
2044 source = new_src;
2046 else
2048 rtx targ = subtargets ? NULL_RTX : target;
2050 insns = arm_gen_constant (AND, mode, cond,
2051 remainder | shift_mask,
2052 targ, source, subtargets, 0);
2056 if (generate)
2058 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2059 rtx shift = GEN_INT (clear_zero_bit_copies);
2061 emit_insn (gen_lshrsi3 (new_src, source, shift));
2062 emit_insn (gen_ashlsi3 (target, new_src, shift));
2065 return insns + 2;
2068 break;
2070 default:
2071 break;
2074 for (i = 0; i < 32; i++)
2075 if (remainder & (1 << i))
2076 num_bits_set++;
2078 if (code == AND || (can_invert && num_bits_set > 16))
2079 remainder = (~remainder) & 0xffffffff;
2080 else if (code == PLUS && num_bits_set > 16)
2081 remainder = (-remainder) & 0xffffffff;
2082 else
2084 can_invert = 0;
2085 can_negate = 0;
2088 /* Now try and find a way of doing the job in either two or three
2089 instructions.
2090 We start by looking for the largest block of zeros that are aligned on
2091 a 2-bit boundary, we then fill up the temps, wrapping around to the
2092 top of the word when we drop off the bottom.
2093 In the worst case this code should produce no more than four insns. */
2095 int best_start = 0;
2096 int best_consecutive_zeros = 0;
2098 for (i = 0; i < 32; i += 2)
2100 int consecutive_zeros = 0;
2102 if (!(remainder & (3 << i)))
2104 while ((i < 32) && !(remainder & (3 << i)))
2106 consecutive_zeros += 2;
2107 i += 2;
2109 if (consecutive_zeros > best_consecutive_zeros)
2111 best_consecutive_zeros = consecutive_zeros;
2112 best_start = i - consecutive_zeros;
2114 i -= 2;
2118 /* So long as it won't require any more insns to do so, it's
2119 desirable to emit a small constant (in bits 0...9) in the last
2120 insn. This way there is more chance that it can be combined with
2121 a later addressing insn to form a pre-indexed load or store
2122 operation. Consider:
2124 *((volatile int *)0xe0000100) = 1;
2125 *((volatile int *)0xe0000110) = 2;
2127 We want this to wind up as:
2129 mov rA, #0xe0000000
2130 mov rB, #1
2131 str rB, [rA, #0x100]
2132 mov rB, #2
2133 str rB, [rA, #0x110]
2135 rather than having to synthesize both large constants from scratch.
2137 Therefore, we calculate how many insns would be required to emit
2138 the constant starting from `best_start', and also starting from
2139 zero (i.e. with bit 31 first to be output). If `best_start' doesn't
2140 yield a shorter sequence, we may as well use zero. */
2141 if (best_start != 0
2142 && ((((unsigned HOST_WIDE_INT) 1) << best_start) < remainder)
2143 && (count_insns_for_constant (remainder, 0) <=
2144 count_insns_for_constant (remainder, best_start)))
2145 best_start = 0;
2147 /* Now start emitting the insns. */
2148 i = best_start;
2151 int end;
2153 if (i <= 0)
2154 i += 32;
2155 if (remainder & (3 << (i - 2)))
2157 end = i - 8;
2158 if (end < 0)
2159 end += 32;
2160 temp1 = remainder & ((0x0ff << end)
2161 | ((i < end) ? (0xff >> (32 - end)) : 0));
2162 remainder &= ~temp1;
2164 if (generate)
2166 rtx new_src, temp1_rtx;
2168 if (code == SET || code == MINUS)
2170 new_src = (subtargets ? gen_reg_rtx (mode) : target);
2171 if (can_invert && code != MINUS)
2172 temp1 = ~temp1;
2174 else
2176 if (remainder && subtargets)
2177 new_src = gen_reg_rtx (mode);
2178 else
2179 new_src = target;
2180 if (can_invert)
2181 temp1 = ~temp1;
2182 else if (can_negate)
2183 temp1 = -temp1;
2186 temp1 = trunc_int_for_mode (temp1, mode);
2187 temp1_rtx = GEN_INT (temp1);
2189 if (code == SET)
2191 else if (code == MINUS)
2192 temp1_rtx = gen_rtx_MINUS (mode, temp1_rtx, source);
2193 else
2194 temp1_rtx = gen_rtx_fmt_ee (code, mode, source, temp1_rtx);
2196 emit_constant_insn (cond,
2197 gen_rtx_SET (VOIDmode, new_src,
2198 temp1_rtx));
2199 source = new_src;
2202 if (code == SET)
2204 can_invert = 0;
2205 code = PLUS;
2207 else if (code == MINUS)
2208 code = PLUS;
2210 insns++;
2211 i -= 6;
2213 i -= 2;
2215 while (remainder);
2218 return insns;
2221 /* Canonicalize a comparison so that we are more likely to recognize it.
2222 This can be done for a few constant compares, where we can make the
2223 immediate value easier to load. */
2225 enum rtx_code
2226 arm_canonicalize_comparison (enum rtx_code code, rtx * op1)
2228 unsigned HOST_WIDE_INT i = INTVAL (*op1);
2230 switch (code)
2232 case EQ:
2233 case NE:
2234 return code;
2236 case GT:
2237 case LE:
2238 if (i != ((((unsigned HOST_WIDE_INT) 1) << (HOST_BITS_PER_WIDE_INT - 1)) - 1)
2239 && (const_ok_for_arm (i + 1) || const_ok_for_arm (-(i + 1))))
2241 *op1 = GEN_INT (i + 1);
2242 return code == GT ? GE : LT;
2244 break;
2246 case GE:
2247 case LT:
2248 if (i != (((unsigned HOST_WIDE_INT) 1) << (HOST_BITS_PER_WIDE_INT - 1))
2249 && (const_ok_for_arm (i - 1) || const_ok_for_arm (-(i - 1))))
2251 *op1 = GEN_INT (i - 1);
2252 return code == GE ? GT : LE;
2254 break;
2256 case GTU:
2257 case LEU:
2258 if (i != ~((unsigned HOST_WIDE_INT) 0)
2259 && (const_ok_for_arm (i + 1) || const_ok_for_arm (-(i + 1))))
2261 *op1 = GEN_INT (i + 1);
2262 return code == GTU ? GEU : LTU;
2264 break;
2266 case GEU:
2267 case LTU:
2268 if (i != 0
2269 && (const_ok_for_arm (i - 1) || const_ok_for_arm (-(i - 1))))
2271 *op1 = GEN_INT (i - 1);
2272 return code == GEU ? GTU : LEU;
2274 break;
2276 default:
2277 abort ();
2280 return code;
2284 /* Define how to find the value returned by a function. */
2287 arm_function_value(tree type, tree func ATTRIBUTE_UNUSED)
2289 enum machine_mode mode;
2290 int unsignedp ATTRIBUTE_UNUSED;
2291 rtx r ATTRIBUTE_UNUSED;
2294 mode = TYPE_MODE (type);
2295 /* Promote integer types. */
2296 if (INTEGRAL_TYPE_P (type))
2297 PROMOTE_FUNCTION_MODE (mode, unsignedp, type);
2298 return LIBCALL_VALUE(mode);
2301 /* Determine the amount of memory needed to store the possible return
2302 registers of an untyped call. */
2304 arm_apply_result_size (void)
2306 int size = 16;
2308 if (TARGET_ARM)
2310 if (TARGET_HARD_FLOAT_ABI)
2312 if (TARGET_FPA)
2313 size += 12;
2314 if (TARGET_MAVERICK)
2315 size += 8;
2317 if (TARGET_IWMMXT_ABI)
2318 size += 8;
2321 return size;
2324 /* Decide whether a type should be returned in memory (true)
2325 or in a register (false). This is called by the macro
2326 RETURN_IN_MEMORY. */
2328 arm_return_in_memory (tree type)
2330 HOST_WIDE_INT size;
2332 if (!AGGREGATE_TYPE_P (type) &&
2333 !(TARGET_AAPCS_BASED && TREE_CODE (type) == COMPLEX_TYPE))
2334 /* All simple types are returned in registers.
2335 For AAPCS, complex types are treated the same as aggregates. */
2336 return 0;
2338 size = int_size_in_bytes (type);
2340 if (arm_abi != ARM_ABI_APCS)
2342 /* ATPCS and later return aggregate types in memory only if they are
2343 larger than a word (or are variable size). */
2344 return (size < 0 || size > UNITS_PER_WORD);
2347 /* For the arm-wince targets we choose to be compatible with Microsoft's
2348 ARM and Thumb compilers, which always return aggregates in memory. */
2349 #ifndef ARM_WINCE
2350 /* All structures/unions bigger than one word are returned in memory.
2351 Also catch the case where int_size_in_bytes returns -1. In this case
2352 the aggregate is either huge or of variable size, and in either case
2353 we will want to return it via memory and not in a register. */
2354 if (size < 0 || size > UNITS_PER_WORD)
2355 return 1;
2357 if (TREE_CODE (type) == RECORD_TYPE)
2359 tree field;
2361 /* For a struct the APCS says that we only return in a register
2362 if the type is 'integer like' and every addressable element
2363 has an offset of zero. For practical purposes this means
2364 that the structure can have at most one non bit-field element
2365 and that this element must be the first one in the structure. */
2367 /* Find the first field, ignoring non FIELD_DECL things which will
2368 have been created by C++. */
2369 for (field = TYPE_FIELDS (type);
2370 field && TREE_CODE (field) != FIELD_DECL;
2371 field = TREE_CHAIN (field))
2372 continue;
2374 if (field == NULL)
2375 return 0; /* An empty structure. Allowed by an extension to ANSI C. */
2377 /* Check that the first field is valid for returning in a register. */
2379 /* ... Floats are not allowed */
2380 if (FLOAT_TYPE_P (TREE_TYPE (field)))
2381 return 1;
2383 /* ... Aggregates that are not themselves valid for returning in
2384 a register are not allowed. */
2385 if (RETURN_IN_MEMORY (TREE_TYPE (field)))
2386 return 1;
2388 /* Now check the remaining fields, if any. Only bitfields are allowed,
2389 since they are not addressable. */
2390 for (field = TREE_CHAIN (field);
2391 field;
2392 field = TREE_CHAIN (field))
2394 if (TREE_CODE (field) != FIELD_DECL)
2395 continue;
2397 if (!DECL_BIT_FIELD_TYPE (field))
2398 return 1;
2401 return 0;
2404 if (TREE_CODE (type) == UNION_TYPE)
2406 tree field;
2408 /* Unions can be returned in registers if every element is
2409 integral, or can be returned in an integer register. */
2410 for (field = TYPE_FIELDS (type);
2411 field;
2412 field = TREE_CHAIN (field))
2414 if (TREE_CODE (field) != FIELD_DECL)
2415 continue;
2417 if (FLOAT_TYPE_P (TREE_TYPE (field)))
2418 return 1;
2420 if (RETURN_IN_MEMORY (TREE_TYPE (field)))
2421 return 1;
2424 return 0;
2426 #endif /* not ARM_WINCE */
2428 /* Return all other types in memory. */
2429 return 1;
2432 /* Indicate whether or not words of a double are in big-endian order. */
2435 arm_float_words_big_endian (void)
2437 if (TARGET_MAVERICK)
2438 return 0;
2440 /* For FPA, float words are always big-endian. For VFP, floats words
2441 follow the memory system mode. */
2443 if (TARGET_FPA)
2445 return 1;
2448 if (TARGET_VFP)
2449 return (TARGET_BIG_END ? 1 : 0);
2451 return 1;
2454 /* Initialize a variable CUM of type CUMULATIVE_ARGS
2455 for a call to a function whose data type is FNTYPE.
2456 For a library call, FNTYPE is NULL. */
2457 void
2458 arm_init_cumulative_args (CUMULATIVE_ARGS *pcum, tree fntype,
2459 rtx libname ATTRIBUTE_UNUSED,
2460 tree fndecl ATTRIBUTE_UNUSED)
2462 /* On the ARM, the offset starts at 0. */
2463 pcum->nregs = ((fntype && aggregate_value_p (TREE_TYPE (fntype), fntype)) ? 1 : 0);
2464 pcum->iwmmxt_nregs = 0;
2465 pcum->can_split = true;
2467 pcum->call_cookie = CALL_NORMAL;
2469 if (TARGET_LONG_CALLS)
2470 pcum->call_cookie = CALL_LONG;
2472 /* Check for long call/short call attributes. The attributes
2473 override any command line option. */
2474 if (fntype)
2476 if (lookup_attribute ("short_call", TYPE_ATTRIBUTES (fntype)))
2477 pcum->call_cookie = CALL_SHORT;
2478 else if (lookup_attribute ("long_call", TYPE_ATTRIBUTES (fntype)))
2479 pcum->call_cookie = CALL_LONG;
2482 /* Varargs vectors are treated the same as long long.
2483 named_count avoids having to change the way arm handles 'named' */
2484 pcum->named_count = 0;
2485 pcum->nargs = 0;
2487 if (TARGET_REALLY_IWMMXT && fntype)
2489 tree fn_arg;
2491 for (fn_arg = TYPE_ARG_TYPES (fntype);
2492 fn_arg;
2493 fn_arg = TREE_CHAIN (fn_arg))
2494 pcum->named_count += 1;
2496 if (! pcum->named_count)
2497 pcum->named_count = INT_MAX;
2502 /* Return true if mode/type need doubleword alignment. */
2503 bool
2504 arm_needs_doubleword_align (enum machine_mode mode, tree type)
2506 return (GET_MODE_ALIGNMENT (mode) > PARM_BOUNDARY
2507 || (type && TYPE_ALIGN (type) > PARM_BOUNDARY));
2511 /* Determine where to put an argument to a function.
2512 Value is zero to push the argument on the stack,
2513 or a hard register in which to store the argument.
2515 MODE is the argument's machine mode.
2516 TYPE is the data type of the argument (as a tree).
2517 This is null for libcalls where that information may
2518 not be available.
2519 CUM is a variable of type CUMULATIVE_ARGS which gives info about
2520 the preceding args and about the function being called.
2521 NAMED is nonzero if this argument is a named parameter
2522 (otherwise it is an extra parameter matching an ellipsis). */
2525 arm_function_arg (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
2526 tree type, int named)
2528 int nregs;
2530 /* Varargs vectors are treated the same as long long.
2531 named_count avoids having to change the way arm handles 'named' */
2532 if (TARGET_IWMMXT_ABI
2533 && arm_vector_mode_supported_p (mode)
2534 && pcum->named_count > pcum->nargs + 1)
2536 if (pcum->iwmmxt_nregs <= 9)
2537 return gen_rtx_REG (mode, pcum->iwmmxt_nregs + FIRST_IWMMXT_REGNUM);
2538 else
2540 pcum->can_split = false;
2541 return NULL_RTX;
2545 /* Put doubleword aligned quantities in even register pairs. */
2546 if (pcum->nregs & 1
2547 && ARM_DOUBLEWORD_ALIGN
2548 && arm_needs_doubleword_align (mode, type))
2549 pcum->nregs++;
2551 if (mode == VOIDmode)
2552 /* Compute operand 2 of the call insn. */
2553 return GEN_INT (pcum->call_cookie);
2555 /* Only allow splitting an arg between regs and memory if all preceding
2556 args were allocated to regs. For args passed by reference we only count
2557 the reference pointer. */
2558 if (pcum->can_split)
2559 nregs = 1;
2560 else
2561 nregs = ARM_NUM_REGS2 (mode, type);
2563 if (!named || pcum->nregs + nregs > NUM_ARG_REGS)
2564 return NULL_RTX;
2566 return gen_rtx_REG (mode, pcum->nregs);
2569 static int
2570 arm_arg_partial_bytes (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
2571 tree type, bool named ATTRIBUTE_UNUSED)
2573 int nregs = pcum->nregs;
2575 if (arm_vector_mode_supported_p (mode))
2576 return 0;
2578 if (NUM_ARG_REGS > nregs
2579 && (NUM_ARG_REGS < nregs + ARM_NUM_REGS2 (mode, type))
2580 && pcum->can_split)
2581 return (NUM_ARG_REGS - nregs) * UNITS_PER_WORD;
2583 return 0;
2586 /* Variable sized types are passed by reference. This is a GCC
2587 extension to the ARM ABI. */
2589 static bool
2590 arm_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
2591 enum machine_mode mode ATTRIBUTE_UNUSED,
2592 tree type, bool named ATTRIBUTE_UNUSED)
2594 return type && TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST;
2597 /* Encode the current state of the #pragma [no_]long_calls. */
2598 typedef enum
2600 OFF, /* No #pramgma [no_]long_calls is in effect. */
2601 LONG, /* #pragma long_calls is in effect. */
2602 SHORT /* #pragma no_long_calls is in effect. */
2603 } arm_pragma_enum;
2605 static arm_pragma_enum arm_pragma_long_calls = OFF;
2607 void
2608 arm_pr_long_calls (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2610 arm_pragma_long_calls = LONG;
2613 void
2614 arm_pr_no_long_calls (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2616 arm_pragma_long_calls = SHORT;
2619 void
2620 arm_pr_long_calls_off (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2622 arm_pragma_long_calls = OFF;
2625 /* Table of machine attributes. */
2626 const struct attribute_spec arm_attribute_table[] =
2628 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
2629 /* Function calls made to this symbol must be done indirectly, because
2630 it may lie outside of the 26 bit addressing range of a normal function
2631 call. */
2632 { "long_call", 0, 0, false, true, true, NULL },
2633 /* Whereas these functions are always known to reside within the 26 bit
2634 addressing range. */
2635 { "short_call", 0, 0, false, true, true, NULL },
2636 /* Interrupt Service Routines have special prologue and epilogue requirements. */
2637 { "isr", 0, 1, false, false, false, arm_handle_isr_attribute },
2638 { "interrupt", 0, 1, false, false, false, arm_handle_isr_attribute },
2639 { "naked", 0, 0, true, false, false, arm_handle_fndecl_attribute },
2640 #ifdef ARM_PE
2641 /* ARM/PE has three new attributes:
2642 interfacearm - ?
2643 dllexport - for exporting a function/variable that will live in a dll
2644 dllimport - for importing a function/variable from a dll
2646 Microsoft allows multiple declspecs in one __declspec, separating
2647 them with spaces. We do NOT support this. Instead, use __declspec
2648 multiple times.
2650 { "dllimport", 0, 0, true, false, false, NULL },
2651 { "dllexport", 0, 0, true, false, false, NULL },
2652 { "interfacearm", 0, 0, true, false, false, arm_handle_fndecl_attribute },
2653 #elif TARGET_DLLIMPORT_DECL_ATTRIBUTES
2654 { "dllimport", 0, 0, false, false, false, handle_dll_attribute },
2655 { "dllexport", 0, 0, false, false, false, handle_dll_attribute },
2656 { "notshared", 0, 0, false, true, false, arm_handle_notshared_attribute },
2657 #endif
2658 { NULL, 0, 0, false, false, false, NULL }
2661 /* Handle an attribute requiring a FUNCTION_DECL;
2662 arguments as in struct attribute_spec.handler. */
2663 static tree
2664 arm_handle_fndecl_attribute (tree *node, tree name, tree args ATTRIBUTE_UNUSED,
2665 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
2667 if (TREE_CODE (*node) != FUNCTION_DECL)
2669 warning ("%qs attribute only applies to functions",
2670 IDENTIFIER_POINTER (name));
2671 *no_add_attrs = true;
2674 return NULL_TREE;
2677 /* Handle an "interrupt" or "isr" attribute;
2678 arguments as in struct attribute_spec.handler. */
2679 static tree
2680 arm_handle_isr_attribute (tree *node, tree name, tree args, int flags,
2681 bool *no_add_attrs)
2683 if (DECL_P (*node))
2685 if (TREE_CODE (*node) != FUNCTION_DECL)
2687 warning ("%qs attribute only applies to functions",
2688 IDENTIFIER_POINTER (name));
2689 *no_add_attrs = true;
2691 /* FIXME: the argument if any is checked for type attributes;
2692 should it be checked for decl ones? */
2694 else
2696 if (TREE_CODE (*node) == FUNCTION_TYPE
2697 || TREE_CODE (*node) == METHOD_TYPE)
2699 if (arm_isr_value (args) == ARM_FT_UNKNOWN)
2701 warning ("%qs attribute ignored", IDENTIFIER_POINTER (name));
2702 *no_add_attrs = true;
2705 else if (TREE_CODE (*node) == POINTER_TYPE
2706 && (TREE_CODE (TREE_TYPE (*node)) == FUNCTION_TYPE
2707 || TREE_CODE (TREE_TYPE (*node)) == METHOD_TYPE)
2708 && arm_isr_value (args) != ARM_FT_UNKNOWN)
2710 *node = build_variant_type_copy (*node);
2711 TREE_TYPE (*node) = build_type_attribute_variant
2712 (TREE_TYPE (*node),
2713 tree_cons (name, args, TYPE_ATTRIBUTES (TREE_TYPE (*node))));
2714 *no_add_attrs = true;
2716 else
2718 /* Possibly pass this attribute on from the type to a decl. */
2719 if (flags & ((int) ATTR_FLAG_DECL_NEXT
2720 | (int) ATTR_FLAG_FUNCTION_NEXT
2721 | (int) ATTR_FLAG_ARRAY_NEXT))
2723 *no_add_attrs = true;
2724 return tree_cons (name, args, NULL_TREE);
2726 else
2728 warning ("%qs attribute ignored", IDENTIFIER_POINTER (name));
2733 return NULL_TREE;
2736 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
2737 /* Handle the "notshared" attribute. This attribute is another way of
2738 requesting hidden visibility. ARM's compiler supports
2739 "__declspec(notshared)"; we support the same thing via an
2740 attribute. */
2742 static tree
2743 arm_handle_notshared_attribute (tree *node,
2744 tree name ATTRIBUTE_UNUSED,
2745 tree args ATTRIBUTE_UNUSED,
2746 int flags ATTRIBUTE_UNUSED,
2747 bool *no_add_attrs)
2749 tree decl = TYPE_NAME (*node);
2751 if (decl)
2753 DECL_VISIBILITY (decl) = VISIBILITY_HIDDEN;
2754 DECL_VISIBILITY_SPECIFIED (decl) = 1;
2755 *no_add_attrs = false;
2757 return NULL_TREE;
2759 #endif
2761 /* Return 0 if the attributes for two types are incompatible, 1 if they
2762 are compatible, and 2 if they are nearly compatible (which causes a
2763 warning to be generated). */
2764 static int
2765 arm_comp_type_attributes (tree type1, tree type2)
2767 int l1, l2, s1, s2;
2769 /* Check for mismatch of non-default calling convention. */
2770 if (TREE_CODE (type1) != FUNCTION_TYPE)
2771 return 1;
2773 /* Check for mismatched call attributes. */
2774 l1 = lookup_attribute ("long_call", TYPE_ATTRIBUTES (type1)) != NULL;
2775 l2 = lookup_attribute ("long_call", TYPE_ATTRIBUTES (type2)) != NULL;
2776 s1 = lookup_attribute ("short_call", TYPE_ATTRIBUTES (type1)) != NULL;
2777 s2 = lookup_attribute ("short_call", TYPE_ATTRIBUTES (type2)) != NULL;
2779 /* Only bother to check if an attribute is defined. */
2780 if (l1 | l2 | s1 | s2)
2782 /* If one type has an attribute, the other must have the same attribute. */
2783 if ((l1 != l2) || (s1 != s2))
2784 return 0;
2786 /* Disallow mixed attributes. */
2787 if ((l1 & s2) || (l2 & s1))
2788 return 0;
2791 /* Check for mismatched ISR attribute. */
2792 l1 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type1)) != NULL;
2793 if (! l1)
2794 l1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type1)) != NULL;
2795 l2 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type2)) != NULL;
2796 if (! l2)
2797 l1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type2)) != NULL;
2798 if (l1 != l2)
2799 return 0;
2801 return 1;
2804 /* Encode long_call or short_call attribute by prefixing
2805 symbol name in DECL with a special character FLAG. */
2806 void
2807 arm_encode_call_attribute (tree decl, int flag)
2809 const char * str = XSTR (XEXP (DECL_RTL (decl), 0), 0);
2810 int len = strlen (str);
2811 char * newstr;
2813 /* Do not allow weak functions to be treated as short call. */
2814 if (DECL_WEAK (decl) && flag == SHORT_CALL_FLAG_CHAR)
2815 return;
2817 newstr = alloca (len + 2);
2818 newstr[0] = flag;
2819 strcpy (newstr + 1, str);
2821 newstr = (char *) ggc_alloc_string (newstr, len + 1);
2822 XSTR (XEXP (DECL_RTL (decl), 0), 0) = newstr;
2825 /* Assigns default attributes to newly defined type. This is used to
2826 set short_call/long_call attributes for function types of
2827 functions defined inside corresponding #pragma scopes. */
2828 static void
2829 arm_set_default_type_attributes (tree type)
2831 /* Add __attribute__ ((long_call)) to all functions, when
2832 inside #pragma long_calls or __attribute__ ((short_call)),
2833 when inside #pragma no_long_calls. */
2834 if (TREE_CODE (type) == FUNCTION_TYPE || TREE_CODE (type) == METHOD_TYPE)
2836 tree type_attr_list, attr_name;
2837 type_attr_list = TYPE_ATTRIBUTES (type);
2839 if (arm_pragma_long_calls == LONG)
2840 attr_name = get_identifier ("long_call");
2841 else if (arm_pragma_long_calls == SHORT)
2842 attr_name = get_identifier ("short_call");
2843 else
2844 return;
2846 type_attr_list = tree_cons (attr_name, NULL_TREE, type_attr_list);
2847 TYPE_ATTRIBUTES (type) = type_attr_list;
2851 /* Return 1 if the operand is a SYMBOL_REF for a function known to be
2852 defined within the current compilation unit. If this cannot be
2853 determined, then 0 is returned. */
2854 static int
2855 current_file_function_operand (rtx sym_ref)
2857 /* This is a bit of a fib. A function will have a short call flag
2858 applied to its name if it has the short call attribute, or it has
2859 already been defined within the current compilation unit. */
2860 if (ENCODED_SHORT_CALL_ATTR_P (XSTR (sym_ref, 0)))
2861 return 1;
2863 /* The current function is always defined within the current compilation
2864 unit. If it s a weak definition however, then this may not be the real
2865 definition of the function, and so we have to say no. */
2866 if (sym_ref == XEXP (DECL_RTL (current_function_decl), 0)
2867 && !DECL_WEAK (current_function_decl))
2868 return 1;
2870 /* We cannot make the determination - default to returning 0. */
2871 return 0;
2874 /* Return nonzero if a 32 bit "long_call" should be generated for
2875 this call. We generate a long_call if the function:
2877 a. has an __attribute__((long call))
2878 or b. is within the scope of a #pragma long_calls
2879 or c. the -mlong-calls command line switch has been specified
2880 . and either:
2881 1. -ffunction-sections is in effect
2882 or 2. the current function has __attribute__ ((section))
2883 or 3. the target function has __attribute__ ((section))
2885 However we do not generate a long call if the function:
2887 d. has an __attribute__ ((short_call))
2888 or e. is inside the scope of a #pragma no_long_calls
2889 or f. is defined within the current compilation unit.
2891 This function will be called by C fragments contained in the machine
2892 description file. SYM_REF and CALL_COOKIE correspond to the matched
2893 rtl operands. CALL_SYMBOL is used to distinguish between
2894 two different callers of the function. It is set to 1 in the
2895 "call_symbol" and "call_symbol_value" patterns and to 0 in the "call"
2896 and "call_value" patterns. This is because of the difference in the
2897 SYM_REFs passed by these patterns. */
2899 arm_is_longcall_p (rtx sym_ref, int call_cookie, int call_symbol)
2901 if (!call_symbol)
2903 if (GET_CODE (sym_ref) != MEM)
2904 return 0;
2906 sym_ref = XEXP (sym_ref, 0);
2909 if (GET_CODE (sym_ref) != SYMBOL_REF)
2910 return 0;
2912 if (call_cookie & CALL_SHORT)
2913 return 0;
2915 if (TARGET_LONG_CALLS)
2917 if (flag_function_sections
2918 || DECL_SECTION_NAME (current_function_decl))
2919 /* c.3 is handled by the definition of the
2920 ARM_DECLARE_FUNCTION_SIZE macro. */
2921 return 1;
2924 if (current_file_function_operand (sym_ref))
2925 return 0;
2927 return (call_cookie & CALL_LONG)
2928 || ENCODED_LONG_CALL_ATTR_P (XSTR (sym_ref, 0))
2929 || TARGET_LONG_CALLS;
2932 /* Return nonzero if it is ok to make a tail-call to DECL. */
2933 static bool
2934 arm_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
2936 int call_type = TARGET_LONG_CALLS ? CALL_LONG : CALL_NORMAL;
2938 if (cfun->machine->sibcall_blocked)
2939 return false;
2941 /* Never tailcall something for which we have no decl, or if we
2942 are in Thumb mode. */
2943 if (decl == NULL || TARGET_THUMB)
2944 return false;
2946 /* Get the calling method. */
2947 if (lookup_attribute ("short_call", TYPE_ATTRIBUTES (TREE_TYPE (decl))))
2948 call_type = CALL_SHORT;
2949 else if (lookup_attribute ("long_call", TYPE_ATTRIBUTES (TREE_TYPE (decl))))
2950 call_type = CALL_LONG;
2952 /* Cannot tail-call to long calls, since these are out of range of
2953 a branch instruction. However, if not compiling PIC, we know
2954 we can reach the symbol if it is in this compilation unit. */
2955 if (call_type == CALL_LONG && (flag_pic || !TREE_ASM_WRITTEN (decl)))
2956 return false;
2958 /* If we are interworking and the function is not declared static
2959 then we can't tail-call it unless we know that it exists in this
2960 compilation unit (since it might be a Thumb routine). */
2961 if (TARGET_INTERWORK && TREE_PUBLIC (decl) && !TREE_ASM_WRITTEN (decl))
2962 return false;
2964 /* Never tailcall from an ISR routine - it needs a special exit sequence. */
2965 if (IS_INTERRUPT (arm_current_func_type ()))
2966 return false;
2968 /* Everything else is ok. */
2969 return true;
2973 /* Addressing mode support functions. */
2975 /* Return nonzero if X is a legitimate immediate operand when compiling
2976 for PIC. */
2978 legitimate_pic_operand_p (rtx x)
2980 if (CONSTANT_P (x)
2981 && flag_pic
2982 && (GET_CODE (x) == SYMBOL_REF
2983 || (GET_CODE (x) == CONST
2984 && GET_CODE (XEXP (x, 0)) == PLUS
2985 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF)))
2986 return 0;
2988 return 1;
2992 legitimize_pic_address (rtx orig, enum machine_mode mode, rtx reg)
2994 if (GET_CODE (orig) == SYMBOL_REF
2995 || GET_CODE (orig) == LABEL_REF)
2997 #ifndef AOF_ASSEMBLER
2998 rtx pic_ref, address;
2999 #endif
3000 rtx insn;
3001 int subregs = 0;
3003 if (reg == 0)
3005 if (no_new_pseudos)
3006 abort ();
3007 else
3008 reg = gen_reg_rtx (Pmode);
3010 subregs = 1;
3013 #ifdef AOF_ASSEMBLER
3014 /* The AOF assembler can generate relocations for these directly, and
3015 understands that the PIC register has to be added into the offset. */
3016 insn = emit_insn (gen_pic_load_addr_based (reg, orig));
3017 #else
3018 if (subregs)
3019 address = gen_reg_rtx (Pmode);
3020 else
3021 address = reg;
3023 if (TARGET_ARM)
3024 emit_insn (gen_pic_load_addr_arm (address, orig));
3025 else
3026 emit_insn (gen_pic_load_addr_thumb (address, orig));
3028 if ((GET_CODE (orig) == LABEL_REF
3029 || (GET_CODE (orig) == SYMBOL_REF &&
3030 SYMBOL_REF_LOCAL_P (orig)))
3031 && NEED_GOT_RELOC)
3032 pic_ref = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, address);
3033 else
3035 pic_ref = gen_const_mem (Pmode,
3036 gen_rtx_PLUS (Pmode, pic_offset_table_rtx,
3037 address));
3040 insn = emit_move_insn (reg, pic_ref);
3041 #endif
3042 current_function_uses_pic_offset_table = 1;
3043 /* Put a REG_EQUAL note on this insn, so that it can be optimized
3044 by loop. */
3045 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EQUAL, orig,
3046 REG_NOTES (insn));
3047 return reg;
3049 else if (GET_CODE (orig) == CONST)
3051 rtx base, offset;
3053 if (GET_CODE (XEXP (orig, 0)) == PLUS
3054 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
3055 return orig;
3057 if (reg == 0)
3059 if (no_new_pseudos)
3060 abort ();
3061 else
3062 reg = gen_reg_rtx (Pmode);
3065 if (GET_CODE (XEXP (orig, 0)) == PLUS)
3067 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
3068 offset = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
3069 base == reg ? 0 : reg);
3071 else
3072 abort ();
3074 if (GET_CODE (offset) == CONST_INT)
3076 /* The base register doesn't really matter, we only want to
3077 test the index for the appropriate mode. */
3078 if (!arm_legitimate_index_p (mode, offset, SET, 0))
3080 if (!no_new_pseudos)
3081 offset = force_reg (Pmode, offset);
3082 else
3083 abort ();
3086 if (GET_CODE (offset) == CONST_INT)
3087 return plus_constant (base, INTVAL (offset));
3090 if (GET_MODE_SIZE (mode) > 4
3091 && (GET_MODE_CLASS (mode) == MODE_INT
3092 || TARGET_SOFT_FLOAT))
3094 emit_insn (gen_addsi3 (reg, base, offset));
3095 return reg;
3098 return gen_rtx_PLUS (Pmode, base, offset);
3101 return orig;
3105 /* Find a spare low register. */
3107 static int
3108 thumb_find_work_register (int live_regs_mask)
3110 int reg;
3112 /* Use a spare arg register. */
3113 if (!regs_ever_live[LAST_ARG_REGNUM])
3114 return LAST_ARG_REGNUM;
3116 /* Look for a pushed register. This is used before the frame pointer is
3117 setup, so r7 is a candidate. */
3118 for (reg = LAST_LO_REGNUM; reg >=0; reg--)
3119 if (live_regs_mask & (1 << reg))
3120 return reg;
3122 /* Something went wrong. */
3123 abort ();
3127 /* Generate code to load the PIC register. In thumb mode SCRATCH is a
3128 low register. */
3130 void
3131 arm_load_pic_register (unsigned int scratch)
3133 #ifndef AOF_ASSEMBLER
3134 rtx l1, pic_tmp, pic_tmp2, pic_rtx;
3135 rtx global_offset_table;
3137 if (current_function_uses_pic_offset_table == 0 || TARGET_SINGLE_PIC_BASE)
3138 return;
3140 if (!flag_pic)
3141 abort ();
3143 l1 = gen_label_rtx ();
3145 global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
3146 /* On the ARM the PC register contains 'dot + 8' at the time of the
3147 addition, on the Thumb it is 'dot + 4'. */
3148 pic_tmp = plus_constant (gen_rtx_LABEL_REF (Pmode, l1), TARGET_ARM ? 8 : 4);
3149 if (GOT_PCREL)
3150 pic_tmp2 = gen_rtx_CONST (VOIDmode,
3151 gen_rtx_PLUS (Pmode, global_offset_table, pc_rtx));
3152 else
3153 pic_tmp2 = gen_rtx_CONST (VOIDmode, global_offset_table);
3155 pic_rtx = gen_rtx_CONST (Pmode, gen_rtx_MINUS (Pmode, pic_tmp2, pic_tmp));
3157 if (TARGET_ARM)
3159 emit_insn (gen_pic_load_addr_arm (pic_offset_table_rtx, pic_rtx));
3160 emit_insn (gen_pic_add_dot_plus_eight (pic_offset_table_rtx, l1));
3162 else
3164 if (REGNO (pic_offset_table_rtx) > LAST_LO_REGNUM)
3166 /* We will have pushed the pic register, so should always be
3167 able to find a work register. */
3168 pic_tmp = gen_rtx_REG (SImode, scratch);
3169 emit_insn (gen_pic_load_addr_thumb (pic_tmp, pic_rtx));
3170 emit_insn (gen_movsi (pic_offset_table_rtx, pic_tmp));
3172 else
3173 emit_insn (gen_pic_load_addr_thumb (pic_offset_table_rtx, pic_rtx));
3174 emit_insn (gen_pic_add_dot_plus_four (pic_offset_table_rtx, l1));
3177 /* Need to emit this whether or not we obey regdecls,
3178 since setjmp/longjmp can cause life info to screw up. */
3179 emit_insn (gen_rtx_USE (VOIDmode, pic_offset_table_rtx));
3180 #endif /* AOF_ASSEMBLER */
3184 /* Return nonzero if X is valid as an ARM state addressing register. */
3185 static int
3186 arm_address_register_rtx_p (rtx x, int strict_p)
3188 int regno;
3190 if (GET_CODE (x) != REG)
3191 return 0;
3193 regno = REGNO (x);
3195 if (strict_p)
3196 return ARM_REGNO_OK_FOR_BASE_P (regno);
3198 return (regno <= LAST_ARM_REGNUM
3199 || regno >= FIRST_PSEUDO_REGISTER
3200 || regno == FRAME_POINTER_REGNUM
3201 || regno == ARG_POINTER_REGNUM);
3204 /* Return nonzero if X is a valid ARM state address operand. */
3206 arm_legitimate_address_p (enum machine_mode mode, rtx x, RTX_CODE outer,
3207 int strict_p)
3209 bool use_ldrd;
3210 enum rtx_code code = GET_CODE (x);
3212 if (arm_address_register_rtx_p (x, strict_p))
3213 return 1;
3215 use_ldrd = (TARGET_LDRD
3216 && (mode == DImode
3217 || (mode == DFmode && (TARGET_SOFT_FLOAT || TARGET_VFP))));
3219 if (code == POST_INC || code == PRE_DEC
3220 || ((code == PRE_INC || code == POST_DEC)
3221 && (use_ldrd || GET_MODE_SIZE (mode) <= 4)))
3222 return arm_address_register_rtx_p (XEXP (x, 0), strict_p);
3224 else if ((code == POST_MODIFY || code == PRE_MODIFY)
3225 && arm_address_register_rtx_p (XEXP (x, 0), strict_p)
3226 && GET_CODE (XEXP (x, 1)) == PLUS
3227 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
3229 rtx addend = XEXP (XEXP (x, 1), 1);
3231 /* Don't allow ldrd post increment by register because it's hard
3232 to fixup invalid register choices. */
3233 if (use_ldrd
3234 && GET_CODE (x) == POST_MODIFY
3235 && GET_CODE (addend) == REG)
3236 return 0;
3238 return ((use_ldrd || GET_MODE_SIZE (mode) <= 4)
3239 && arm_legitimate_index_p (mode, addend, outer, strict_p));
3242 /* After reload constants split into minipools will have addresses
3243 from a LABEL_REF. */
3244 else if (reload_completed
3245 && (code == LABEL_REF
3246 || (code == CONST
3247 && GET_CODE (XEXP (x, 0)) == PLUS
3248 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
3249 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
3250 return 1;
3252 else if (mode == TImode)
3253 return 0;
3255 else if (code == PLUS)
3257 rtx xop0 = XEXP (x, 0);
3258 rtx xop1 = XEXP (x, 1);
3260 return ((arm_address_register_rtx_p (xop0, strict_p)
3261 && arm_legitimate_index_p (mode, xop1, outer, strict_p))
3262 || (arm_address_register_rtx_p (xop1, strict_p)
3263 && arm_legitimate_index_p (mode, xop0, outer, strict_p)));
3266 #if 0
3267 /* Reload currently can't handle MINUS, so disable this for now */
3268 else if (GET_CODE (x) == MINUS)
3270 rtx xop0 = XEXP (x, 0);
3271 rtx xop1 = XEXP (x, 1);
3273 return (arm_address_register_rtx_p (xop0, strict_p)
3274 && arm_legitimate_index_p (mode, xop1, outer, strict_p));
3276 #endif
3278 else if (GET_MODE_CLASS (mode) != MODE_FLOAT
3279 && code == SYMBOL_REF
3280 && CONSTANT_POOL_ADDRESS_P (x)
3281 && ! (flag_pic
3282 && symbol_mentioned_p (get_pool_constant (x))))
3283 return 1;
3285 return 0;
3288 /* Return nonzero if INDEX is valid for an address index operand in
3289 ARM state. */
3290 static int
3291 arm_legitimate_index_p (enum machine_mode mode, rtx index, RTX_CODE outer,
3292 int strict_p)
3294 HOST_WIDE_INT range;
3295 enum rtx_code code = GET_CODE (index);
3297 /* Standard coprocessor addressing modes. */
3298 if (TARGET_HARD_FLOAT
3299 && (TARGET_FPA || TARGET_MAVERICK)
3300 && (GET_MODE_CLASS (mode) == MODE_FLOAT
3301 || (TARGET_MAVERICK && mode == DImode)))
3302 return (code == CONST_INT && INTVAL (index) < 1024
3303 && INTVAL (index) > -1024
3304 && (INTVAL (index) & 3) == 0);
3306 if (TARGET_REALLY_IWMMXT && VALID_IWMMXT_REG_MODE (mode))
3307 return (code == CONST_INT
3308 && INTVAL (index) < 1024
3309 && INTVAL (index) > -1024
3310 && (INTVAL (index) & 3) == 0);
3312 if (arm_address_register_rtx_p (index, strict_p)
3313 && (GET_MODE_SIZE (mode) <= 4))
3314 return 1;
3316 if (mode == DImode || mode == DFmode)
3318 if (code == CONST_INT)
3320 HOST_WIDE_INT val = INTVAL (index);
3322 if (TARGET_LDRD)
3323 return val > -256 && val < 256;
3324 else
3325 return val > -4096 && val < 4092;
3328 return TARGET_LDRD && arm_address_register_rtx_p (index, strict_p);
3331 if (GET_MODE_SIZE (mode) <= 4
3332 && ! (arm_arch4
3333 && (mode == HImode
3334 || (mode == QImode && outer == SIGN_EXTEND))))
3336 if (code == MULT)
3338 rtx xiop0 = XEXP (index, 0);
3339 rtx xiop1 = XEXP (index, 1);
3341 return ((arm_address_register_rtx_p (xiop0, strict_p)
3342 && power_of_two_operand (xiop1, SImode))
3343 || (arm_address_register_rtx_p (xiop1, strict_p)
3344 && power_of_two_operand (xiop0, SImode)));
3346 else if (code == LSHIFTRT || code == ASHIFTRT
3347 || code == ASHIFT || code == ROTATERT)
3349 rtx op = XEXP (index, 1);
3351 return (arm_address_register_rtx_p (XEXP (index, 0), strict_p)
3352 && GET_CODE (op) == CONST_INT
3353 && INTVAL (op) > 0
3354 && INTVAL (op) <= 31);
3358 /* For ARM v4 we may be doing a sign-extend operation during the
3359 load. */
3360 if (arm_arch4)
3362 if (mode == HImode || (outer == SIGN_EXTEND && mode == QImode))
3363 range = 256;
3364 else
3365 range = 4096;
3367 else
3368 range = (mode == HImode) ? 4095 : 4096;
3370 return (code == CONST_INT
3371 && INTVAL (index) < range
3372 && INTVAL (index) > -range);
3375 /* Return nonzero if X is valid as a Thumb state base register. */
3376 static int
3377 thumb_base_register_rtx_p (rtx x, enum machine_mode mode, int strict_p)
3379 int regno;
3381 if (GET_CODE (x) != REG)
3382 return 0;
3384 regno = REGNO (x);
3386 if (strict_p)
3387 return THUMB_REGNO_MODE_OK_FOR_BASE_P (regno, mode);
3389 return (regno <= LAST_LO_REGNUM
3390 || regno > LAST_VIRTUAL_REGISTER
3391 || regno == FRAME_POINTER_REGNUM
3392 || (GET_MODE_SIZE (mode) >= 4
3393 && (regno == STACK_POINTER_REGNUM
3394 || regno >= FIRST_PSEUDO_REGISTER
3395 || x == hard_frame_pointer_rtx
3396 || x == arg_pointer_rtx)));
3399 /* Return nonzero if x is a legitimate index register. This is the case
3400 for any base register that can access a QImode object. */
3401 inline static int
3402 thumb_index_register_rtx_p (rtx x, int strict_p)
3404 return thumb_base_register_rtx_p (x, QImode, strict_p);
3407 /* Return nonzero if x is a legitimate Thumb-state address.
3409 The AP may be eliminated to either the SP or the FP, so we use the
3410 least common denominator, e.g. SImode, and offsets from 0 to 64.
3412 ??? Verify whether the above is the right approach.
3414 ??? Also, the FP may be eliminated to the SP, so perhaps that
3415 needs special handling also.
3417 ??? Look at how the mips16 port solves this problem. It probably uses
3418 better ways to solve some of these problems.
3420 Although it is not incorrect, we don't accept QImode and HImode
3421 addresses based on the frame pointer or arg pointer until the
3422 reload pass starts. This is so that eliminating such addresses
3423 into stack based ones won't produce impossible code. */
3425 thumb_legitimate_address_p (enum machine_mode mode, rtx x, int strict_p)
3427 /* ??? Not clear if this is right. Experiment. */
3428 if (GET_MODE_SIZE (mode) < 4
3429 && !(reload_in_progress || reload_completed)
3430 && (reg_mentioned_p (frame_pointer_rtx, x)
3431 || reg_mentioned_p (arg_pointer_rtx, x)
3432 || reg_mentioned_p (virtual_incoming_args_rtx, x)
3433 || reg_mentioned_p (virtual_outgoing_args_rtx, x)
3434 || reg_mentioned_p (virtual_stack_dynamic_rtx, x)
3435 || reg_mentioned_p (virtual_stack_vars_rtx, x)))
3436 return 0;
3438 /* Accept any base register. SP only in SImode or larger. */
3439 else if (thumb_base_register_rtx_p (x, mode, strict_p))
3440 return 1;
3442 /* This is PC relative data before arm_reorg runs. */
3443 else if (GET_MODE_SIZE (mode) >= 4 && CONSTANT_P (x)
3444 && GET_CODE (x) == SYMBOL_REF
3445 && CONSTANT_POOL_ADDRESS_P (x) && ! flag_pic)
3446 return 1;
3448 /* This is PC relative data after arm_reorg runs. */
3449 else if (GET_MODE_SIZE (mode) >= 4 && reload_completed
3450 && (GET_CODE (x) == LABEL_REF
3451 || (GET_CODE (x) == CONST
3452 && GET_CODE (XEXP (x, 0)) == PLUS
3453 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
3454 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
3455 return 1;
3457 /* Post-inc indexing only supported for SImode and larger. */
3458 else if (GET_CODE (x) == POST_INC && GET_MODE_SIZE (mode) >= 4
3459 && thumb_index_register_rtx_p (XEXP (x, 0), strict_p))
3460 return 1;
3462 else if (GET_CODE (x) == PLUS)
3464 /* REG+REG address can be any two index registers. */
3465 /* We disallow FRAME+REG addressing since we know that FRAME
3466 will be replaced with STACK, and SP relative addressing only
3467 permits SP+OFFSET. */
3468 if (GET_MODE_SIZE (mode) <= 4
3469 && XEXP (x, 0) != frame_pointer_rtx
3470 && XEXP (x, 1) != frame_pointer_rtx
3471 && thumb_index_register_rtx_p (XEXP (x, 0), strict_p)
3472 && thumb_index_register_rtx_p (XEXP (x, 1), strict_p))
3473 return 1;
3475 /* REG+const has 5-7 bit offset for non-SP registers. */
3476 else if ((thumb_index_register_rtx_p (XEXP (x, 0), strict_p)
3477 || XEXP (x, 0) == arg_pointer_rtx)
3478 && GET_CODE (XEXP (x, 1)) == CONST_INT
3479 && thumb_legitimate_offset_p (mode, INTVAL (XEXP (x, 1))))
3480 return 1;
3482 /* REG+const has 10 bit offset for SP, but only SImode and
3483 larger is supported. */
3484 /* ??? Should probably check for DI/DFmode overflow here
3485 just like GO_IF_LEGITIMATE_OFFSET does. */
3486 else if (GET_CODE (XEXP (x, 0)) == REG
3487 && REGNO (XEXP (x, 0)) == STACK_POINTER_REGNUM
3488 && GET_MODE_SIZE (mode) >= 4
3489 && GET_CODE (XEXP (x, 1)) == CONST_INT
3490 && INTVAL (XEXP (x, 1)) >= 0
3491 && INTVAL (XEXP (x, 1)) + GET_MODE_SIZE (mode) <= 1024
3492 && (INTVAL (XEXP (x, 1)) & 3) == 0)
3493 return 1;
3495 else if (GET_CODE (XEXP (x, 0)) == REG
3496 && REGNO (XEXP (x, 0)) == FRAME_POINTER_REGNUM
3497 && GET_MODE_SIZE (mode) >= 4
3498 && GET_CODE (XEXP (x, 1)) == CONST_INT
3499 && (INTVAL (XEXP (x, 1)) & 3) == 0)
3500 return 1;
3503 else if (GET_MODE_CLASS (mode) != MODE_FLOAT
3504 && GET_MODE_SIZE (mode) == 4
3505 && GET_CODE (x) == SYMBOL_REF
3506 && CONSTANT_POOL_ADDRESS_P (x)
3507 && !(flag_pic
3508 && symbol_mentioned_p (get_pool_constant (x))))
3509 return 1;
3511 return 0;
3514 /* Return nonzero if VAL can be used as an offset in a Thumb-state address
3515 instruction of mode MODE. */
3517 thumb_legitimate_offset_p (enum machine_mode mode, HOST_WIDE_INT val)
3519 switch (GET_MODE_SIZE (mode))
3521 case 1:
3522 return val >= 0 && val < 32;
3524 case 2:
3525 return val >= 0 && val < 64 && (val & 1) == 0;
3527 default:
3528 return (val >= 0
3529 && (val + GET_MODE_SIZE (mode)) <= 128
3530 && (val & 3) == 0);
3534 /* Try machine-dependent ways of modifying an illegitimate address
3535 to be legitimate. If we find one, return the new, valid address. */
3537 arm_legitimize_address (rtx x, rtx orig_x, enum machine_mode mode)
3539 if (GET_CODE (x) == PLUS)
3541 rtx xop0 = XEXP (x, 0);
3542 rtx xop1 = XEXP (x, 1);
3544 if (CONSTANT_P (xop0) && !symbol_mentioned_p (xop0))
3545 xop0 = force_reg (SImode, xop0);
3547 if (CONSTANT_P (xop1) && !symbol_mentioned_p (xop1))
3548 xop1 = force_reg (SImode, xop1);
3550 if (ARM_BASE_REGISTER_RTX_P (xop0)
3551 && GET_CODE (xop1) == CONST_INT)
3553 HOST_WIDE_INT n, low_n;
3554 rtx base_reg, val;
3555 n = INTVAL (xop1);
3557 /* VFP addressing modes actually allow greater offsets, but for
3558 now we just stick with the lowest common denominator. */
3559 if (mode == DImode
3560 || ((TARGET_SOFT_FLOAT || TARGET_VFP) && mode == DFmode))
3562 low_n = n & 0x0f;
3563 n &= ~0x0f;
3564 if (low_n > 4)
3566 n += 16;
3567 low_n -= 16;
3570 else
3572 low_n = ((mode) == TImode ? 0
3573 : n >= 0 ? (n & 0xfff) : -((-n) & 0xfff));
3574 n -= low_n;
3577 base_reg = gen_reg_rtx (SImode);
3578 val = force_operand (gen_rtx_PLUS (SImode, xop0,
3579 GEN_INT (n)), NULL_RTX);
3580 emit_move_insn (base_reg, val);
3581 x = (low_n == 0 ? base_reg
3582 : gen_rtx_PLUS (SImode, base_reg, GEN_INT (low_n)));
3584 else if (xop0 != XEXP (x, 0) || xop1 != XEXP (x, 1))
3585 x = gen_rtx_PLUS (SImode, xop0, xop1);
3588 /* XXX We don't allow MINUS any more -- see comment in
3589 arm_legitimate_address_p (). */
3590 else if (GET_CODE (x) == MINUS)
3592 rtx xop0 = XEXP (x, 0);
3593 rtx xop1 = XEXP (x, 1);
3595 if (CONSTANT_P (xop0))
3596 xop0 = force_reg (SImode, xop0);
3598 if (CONSTANT_P (xop1) && ! symbol_mentioned_p (xop1))
3599 xop1 = force_reg (SImode, xop1);
3601 if (xop0 != XEXP (x, 0) || xop1 != XEXP (x, 1))
3602 x = gen_rtx_MINUS (SImode, xop0, xop1);
3605 if (flag_pic)
3607 /* We need to find and carefully transform any SYMBOL and LABEL
3608 references; so go back to the original address expression. */
3609 rtx new_x = legitimize_pic_address (orig_x, mode, NULL_RTX);
3611 if (new_x != orig_x)
3612 x = new_x;
3615 return x;
3619 /* Try machine-dependent ways of modifying an illegitimate Thumb address
3620 to be legitimate. If we find one, return the new, valid address. */
3622 thumb_legitimize_address (rtx x, rtx orig_x, enum machine_mode mode)
3624 if (GET_CODE (x) == PLUS
3625 && GET_CODE (XEXP (x, 1)) == CONST_INT
3626 && (INTVAL (XEXP (x, 1)) >= 32 * GET_MODE_SIZE (mode)
3627 || INTVAL (XEXP (x, 1)) < 0))
3629 rtx xop0 = XEXP (x, 0);
3630 rtx xop1 = XEXP (x, 1);
3631 HOST_WIDE_INT offset = INTVAL (xop1);
3633 /* Try and fold the offset into a biasing of the base register and
3634 then offsetting that. Don't do this when optimizing for space
3635 since it can cause too many CSEs. */
3636 if (optimize_size && offset >= 0
3637 && offset < 256 + 31 * GET_MODE_SIZE (mode))
3639 HOST_WIDE_INT delta;
3641 if (offset >= 256)
3642 delta = offset - (256 - GET_MODE_SIZE (mode));
3643 else if (offset < 32 * GET_MODE_SIZE (mode) + 8)
3644 delta = 31 * GET_MODE_SIZE (mode);
3645 else
3646 delta = offset & (~31 * GET_MODE_SIZE (mode));
3648 xop0 = force_operand (plus_constant (xop0, offset - delta),
3649 NULL_RTX);
3650 x = plus_constant (xop0, delta);
3652 else if (offset < 0 && offset > -256)
3653 /* Small negative offsets are best done with a subtract before the
3654 dereference, forcing these into a register normally takes two
3655 instructions. */
3656 x = force_operand (x, NULL_RTX);
3657 else
3659 /* For the remaining cases, force the constant into a register. */
3660 xop1 = force_reg (SImode, xop1);
3661 x = gen_rtx_PLUS (SImode, xop0, xop1);
3664 else if (GET_CODE (x) == PLUS
3665 && s_register_operand (XEXP (x, 1), SImode)
3666 && !s_register_operand (XEXP (x, 0), SImode))
3668 rtx xop0 = force_operand (XEXP (x, 0), NULL_RTX);
3670 x = gen_rtx_PLUS (SImode, xop0, XEXP (x, 1));
3673 if (flag_pic)
3675 /* We need to find and carefully transform any SYMBOL and LABEL
3676 references; so go back to the original address expression. */
3677 rtx new_x = legitimize_pic_address (orig_x, mode, NULL_RTX);
3679 if (new_x != orig_x)
3680 x = new_x;
3683 return x;
3688 #define REG_OR_SUBREG_REG(X) \
3689 (GET_CODE (X) == REG \
3690 || (GET_CODE (X) == SUBREG && GET_CODE (SUBREG_REG (X)) == REG))
3692 #define REG_OR_SUBREG_RTX(X) \
3693 (GET_CODE (X) == REG ? (X) : SUBREG_REG (X))
3695 #ifndef COSTS_N_INSNS
3696 #define COSTS_N_INSNS(N) ((N) * 4 - 2)
3697 #endif
3698 static inline int
3699 thumb_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer)
3701 enum machine_mode mode = GET_MODE (x);
3703 switch (code)
3705 case ASHIFT:
3706 case ASHIFTRT:
3707 case LSHIFTRT:
3708 case ROTATERT:
3709 case PLUS:
3710 case MINUS:
3711 case COMPARE:
3712 case NEG:
3713 case NOT:
3714 return COSTS_N_INSNS (1);
3716 case MULT:
3717 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
3719 int cycles = 0;
3720 unsigned HOST_WIDE_INT i = INTVAL (XEXP (x, 1));
3722 while (i)
3724 i >>= 2;
3725 cycles++;
3727 return COSTS_N_INSNS (2) + cycles;
3729 return COSTS_N_INSNS (1) + 16;
3731 case SET:
3732 return (COSTS_N_INSNS (1)
3733 + 4 * ((GET_CODE (SET_SRC (x)) == MEM)
3734 + GET_CODE (SET_DEST (x)) == MEM));
3736 case CONST_INT:
3737 if (outer == SET)
3739 if ((unsigned HOST_WIDE_INT) INTVAL (x) < 256)
3740 return 0;
3741 if (thumb_shiftable_const (INTVAL (x)))
3742 return COSTS_N_INSNS (2);
3743 return COSTS_N_INSNS (3);
3745 else if ((outer == PLUS || outer == COMPARE)
3746 && INTVAL (x) < 256 && INTVAL (x) > -256)
3747 return 0;
3748 else if (outer == AND
3749 && INTVAL (x) < 256 && INTVAL (x) >= -256)
3750 return COSTS_N_INSNS (1);
3751 else if (outer == ASHIFT || outer == ASHIFTRT
3752 || outer == LSHIFTRT)
3753 return 0;
3754 return COSTS_N_INSNS (2);
3756 case CONST:
3757 case CONST_DOUBLE:
3758 case LABEL_REF:
3759 case SYMBOL_REF:
3760 return COSTS_N_INSNS (3);
3762 case UDIV:
3763 case UMOD:
3764 case DIV:
3765 case MOD:
3766 return 100;
3768 case TRUNCATE:
3769 return 99;
3771 case AND:
3772 case XOR:
3773 case IOR:
3774 /* XXX guess. */
3775 return 8;
3777 case MEM:
3778 /* XXX another guess. */
3779 /* Memory costs quite a lot for the first word, but subsequent words
3780 load at the equivalent of a single insn each. */
3781 return (10 + 4 * ((GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD)
3782 + ((GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
3783 ? 4 : 0));
3785 case IF_THEN_ELSE:
3786 /* XXX a guess. */
3787 if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
3788 return 14;
3789 return 2;
3791 case ZERO_EXTEND:
3792 /* XXX still guessing. */
3793 switch (GET_MODE (XEXP (x, 0)))
3795 case QImode:
3796 return (1 + (mode == DImode ? 4 : 0)
3797 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3799 case HImode:
3800 return (4 + (mode == DImode ? 4 : 0)
3801 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3803 case SImode:
3804 return (1 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3806 default:
3807 return 99;
3810 default:
3811 return 99;
3816 /* Worker routine for arm_rtx_costs. */
3817 static inline int
3818 arm_rtx_costs_1 (rtx x, enum rtx_code code, enum rtx_code outer)
3820 enum machine_mode mode = GET_MODE (x);
3821 enum rtx_code subcode;
3822 int extra_cost;
3824 switch (code)
3826 case MEM:
3827 /* Memory costs quite a lot for the first word, but subsequent words
3828 load at the equivalent of a single insn each. */
3829 return (10 + 4 * ((GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD)
3830 + (GET_CODE (x) == SYMBOL_REF
3831 && CONSTANT_POOL_ADDRESS_P (x) ? 4 : 0));
3833 case DIV:
3834 case MOD:
3835 case UDIV:
3836 case UMOD:
3837 return optimize_size ? COSTS_N_INSNS (2) : 100;
3839 case ROTATE:
3840 if (mode == SImode && GET_CODE (XEXP (x, 1)) == REG)
3841 return 4;
3842 /* Fall through */
3843 case ROTATERT:
3844 if (mode != SImode)
3845 return 8;
3846 /* Fall through */
3847 case ASHIFT: case LSHIFTRT: case ASHIFTRT:
3848 if (mode == DImode)
3849 return (8 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : 8)
3850 + ((GET_CODE (XEXP (x, 0)) == REG
3851 || (GET_CODE (XEXP (x, 0)) == SUBREG
3852 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG))
3853 ? 0 : 8));
3854 return (1 + ((GET_CODE (XEXP (x, 0)) == REG
3855 || (GET_CODE (XEXP (x, 0)) == SUBREG
3856 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG))
3857 ? 0 : 4)
3858 + ((GET_CODE (XEXP (x, 1)) == REG
3859 || (GET_CODE (XEXP (x, 1)) == SUBREG
3860 && GET_CODE (SUBREG_REG (XEXP (x, 1))) == REG)
3861 || (GET_CODE (XEXP (x, 1)) == CONST_INT))
3862 ? 0 : 4));
3864 case MINUS:
3865 if (mode == DImode)
3866 return (4 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 8)
3867 + ((REG_OR_SUBREG_REG (XEXP (x, 0))
3868 || (GET_CODE (XEXP (x, 0)) == CONST_INT
3869 && const_ok_for_arm (INTVAL (XEXP (x, 0)))))
3870 ? 0 : 8));
3872 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
3873 return (2 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
3874 || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
3875 && arm_const_double_rtx (XEXP (x, 1))))
3876 ? 0 : 8)
3877 + ((REG_OR_SUBREG_REG (XEXP (x, 0))
3878 || (GET_CODE (XEXP (x, 0)) == CONST_DOUBLE
3879 && arm_const_double_rtx (XEXP (x, 0))))
3880 ? 0 : 8));
3882 if (((GET_CODE (XEXP (x, 0)) == CONST_INT
3883 && const_ok_for_arm (INTVAL (XEXP (x, 0)))
3884 && REG_OR_SUBREG_REG (XEXP (x, 1))))
3885 || (((subcode = GET_CODE (XEXP (x, 1))) == ASHIFT
3886 || subcode == ASHIFTRT || subcode == LSHIFTRT
3887 || subcode == ROTATE || subcode == ROTATERT
3888 || (subcode == MULT
3889 && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
3890 && ((INTVAL (XEXP (XEXP (x, 1), 1)) &
3891 (INTVAL (XEXP (XEXP (x, 1), 1)) - 1)) == 0)))
3892 && REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 0))
3893 && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 1))
3894 || GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT)
3895 && REG_OR_SUBREG_REG (XEXP (x, 0))))
3896 return 1;
3897 /* Fall through */
3899 case PLUS:
3900 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
3901 return (2 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8)
3902 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
3903 || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
3904 && arm_const_double_rtx (XEXP (x, 1))))
3905 ? 0 : 8));
3907 /* Fall through */
3908 case AND: case XOR: case IOR:
3909 extra_cost = 0;
3911 /* Normally the frame registers will be spilt into reg+const during
3912 reload, so it is a bad idea to combine them with other instructions,
3913 since then they might not be moved outside of loops. As a compromise
3914 we allow integration with ops that have a constant as their second
3915 operand. */
3916 if ((REG_OR_SUBREG_REG (XEXP (x, 0))
3917 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0)))
3918 && GET_CODE (XEXP (x, 1)) != CONST_INT)
3919 || (REG_OR_SUBREG_REG (XEXP (x, 0))
3920 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0)))))
3921 extra_cost = 4;
3923 if (mode == DImode)
3924 return (4 + extra_cost + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8)
3925 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
3926 || (GET_CODE (XEXP (x, 1)) == CONST_INT
3927 && const_ok_for_op (INTVAL (XEXP (x, 1)), code)))
3928 ? 0 : 8));
3930 if (REG_OR_SUBREG_REG (XEXP (x, 0)))
3931 return (1 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : extra_cost)
3932 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
3933 || (GET_CODE (XEXP (x, 1)) == CONST_INT
3934 && const_ok_for_op (INTVAL (XEXP (x, 1)), code)))
3935 ? 0 : 4));
3937 else if (REG_OR_SUBREG_REG (XEXP (x, 1)))
3938 return (1 + extra_cost
3939 + ((((subcode = GET_CODE (XEXP (x, 0))) == ASHIFT
3940 || subcode == LSHIFTRT || subcode == ASHIFTRT
3941 || subcode == ROTATE || subcode == ROTATERT
3942 || (subcode == MULT
3943 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
3944 && ((INTVAL (XEXP (XEXP (x, 0), 1)) &
3945 (INTVAL (XEXP (XEXP (x, 0), 1)) - 1)) == 0)))
3946 && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 0)))
3947 && ((REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 1)))
3948 || GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT))
3949 ? 0 : 4));
3951 return 8;
3953 case MULT:
3954 /* This should have been handled by the CPU specific routines. */
3955 abort ();
3957 case TRUNCATE:
3958 if (arm_arch3m && mode == SImode
3959 && GET_CODE (XEXP (x, 0)) == LSHIFTRT
3960 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
3961 && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0))
3962 == GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)))
3963 && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == ZERO_EXTEND
3964 || GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == SIGN_EXTEND))
3965 return 8;
3966 return 99;
3968 case NEG:
3969 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
3970 return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 6);
3971 /* Fall through */
3972 case NOT:
3973 if (mode == DImode)
3974 return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4);
3976 return 1 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4);
3978 case IF_THEN_ELSE:
3979 if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
3980 return 14;
3981 return 2;
3983 case COMPARE:
3984 return 1;
3986 case ABS:
3987 return 4 + (mode == DImode ? 4 : 0);
3989 case SIGN_EXTEND:
3990 if (GET_MODE (XEXP (x, 0)) == QImode)
3991 return (4 + (mode == DImode ? 4 : 0)
3992 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3993 /* Fall through */
3994 case ZERO_EXTEND:
3995 switch (GET_MODE (XEXP (x, 0)))
3997 case QImode:
3998 return (1 + (mode == DImode ? 4 : 0)
3999 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4001 case HImode:
4002 return (4 + (mode == DImode ? 4 : 0)
4003 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4005 case SImode:
4006 return (1 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4008 case V8QImode:
4009 case V4HImode:
4010 case V2SImode:
4011 case V4QImode:
4012 case V2HImode:
4013 return 1;
4015 default:
4016 break;
4018 abort ();
4020 case CONST_INT:
4021 if (const_ok_for_arm (INTVAL (x)))
4022 return outer == SET ? 2 : -1;
4023 else if (outer == AND
4024 && const_ok_for_arm (~INTVAL (x)))
4025 return -1;
4026 else if ((outer == COMPARE
4027 || outer == PLUS || outer == MINUS)
4028 && const_ok_for_arm (-INTVAL (x)))
4029 return -1;
4030 else
4031 return 5;
4033 case CONST:
4034 case LABEL_REF:
4035 case SYMBOL_REF:
4036 return 6;
4038 case CONST_DOUBLE:
4039 if (arm_const_double_rtx (x))
4040 return outer == SET ? 2 : -1;
4041 else if ((outer == COMPARE || outer == PLUS)
4042 && neg_const_double_rtx_ok_for_fpa (x))
4043 return -1;
4044 return 7;
4046 default:
4047 return 99;
4051 /* RTX costs when optimizing for size. */
4052 static bool
4053 arm_size_rtx_costs (rtx x, int code, int outer_code, int *total)
4055 enum machine_mode mode = GET_MODE (x);
4057 if (TARGET_THUMB)
4059 /* XXX TBD. For now, use the standard costs. */
4060 *total = thumb_rtx_costs (x, code, outer_code);
4061 return true;
4064 switch (code)
4066 case MEM:
4067 /* A memory access costs 1 insn if the mode is small, or the address is
4068 a single register, otherwise it costs one insn per word. */
4069 if (REG_P (XEXP (x, 0)))
4070 *total = COSTS_N_INSNS (1);
4071 else
4072 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4073 return true;
4075 case DIV:
4076 case MOD:
4077 case UDIV:
4078 case UMOD:
4079 /* Needs a libcall, so it costs about this. */
4080 *total = COSTS_N_INSNS (2);
4081 return false;
4083 case ROTATE:
4084 if (mode == SImode && GET_CODE (XEXP (x, 1)) == REG)
4086 *total = COSTS_N_INSNS (2) + rtx_cost (XEXP (x, 0), code);
4087 return true;
4089 /* Fall through */
4090 case ROTATERT:
4091 case ASHIFT:
4092 case LSHIFTRT:
4093 case ASHIFTRT:
4094 if (mode == DImode && GET_CODE (XEXP (x, 1)) == CONST_INT)
4096 *total = COSTS_N_INSNS (3) + rtx_cost (XEXP (x, 0), code);
4097 return true;
4099 else if (mode == SImode)
4101 *total = COSTS_N_INSNS (1) + rtx_cost (XEXP (x, 0), code);
4102 /* Slightly disparage register shifts, but not by much. */
4103 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
4104 *total += 1 + rtx_cost (XEXP (x, 1), code);
4105 return true;
4108 /* Needs a libcall. */
4109 *total = COSTS_N_INSNS (2);
4110 return false;
4112 case MINUS:
4113 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4115 *total = COSTS_N_INSNS (1);
4116 return false;
4119 if (mode == SImode)
4121 enum rtx_code subcode0 = GET_CODE (XEXP (x, 0));
4122 enum rtx_code subcode1 = GET_CODE (XEXP (x, 1));
4124 if (subcode0 == ROTATE || subcode0 == ROTATERT || subcode0 == ASHIFT
4125 || subcode0 == LSHIFTRT || subcode0 == ASHIFTRT
4126 || subcode1 == ROTATE || subcode1 == ROTATERT
4127 || subcode1 == ASHIFT || subcode1 == LSHIFTRT
4128 || subcode1 == ASHIFTRT)
4130 /* It's just the cost of the two operands. */
4131 *total = 0;
4132 return false;
4135 *total = COSTS_N_INSNS (1);
4136 return false;
4139 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4140 return false;
4142 case PLUS:
4143 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4145 *total = COSTS_N_INSNS (1);
4146 return false;
4149 /* Fall through */
4150 case AND: case XOR: case IOR:
4151 if (mode == SImode)
4153 enum rtx_code subcode = GET_CODE (XEXP (x, 0));
4155 if (subcode == ROTATE || subcode == ROTATERT || subcode == ASHIFT
4156 || subcode == LSHIFTRT || subcode == ASHIFTRT
4157 || (code == AND && subcode == NOT))
4159 /* It's just the cost of the two operands. */
4160 *total = 0;
4161 return false;
4165 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4166 return false;
4168 case MULT:
4169 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4170 return false;
4172 case NEG:
4173 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4174 *total = COSTS_N_INSNS (1);
4175 /* Fall through */
4176 case NOT:
4177 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4179 return false;
4181 case IF_THEN_ELSE:
4182 *total = 0;
4183 return false;
4185 case COMPARE:
4186 if (cc_register (XEXP (x, 0), VOIDmode))
4187 * total = 0;
4188 else
4189 *total = COSTS_N_INSNS (1);
4190 return false;
4192 case ABS:
4193 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4194 *total = COSTS_N_INSNS (1);
4195 else
4196 *total = COSTS_N_INSNS (1 + ARM_NUM_REGS (mode));
4197 return false;
4199 case SIGN_EXTEND:
4200 *total = 0;
4201 if (GET_MODE_SIZE (GET_MODE (XEXP (x, 0))) < 4)
4203 if (!(arm_arch4 && MEM_P (XEXP (x, 0))))
4204 *total += COSTS_N_INSNS (arm_arch6 ? 1 : 2);
4206 if (mode == DImode)
4207 *total += COSTS_N_INSNS (1);
4208 return false;
4210 case ZERO_EXTEND:
4211 *total = 0;
4212 if (!(arm_arch4 && MEM_P (XEXP (x, 0))))
4214 switch (GET_MODE (XEXP (x, 0)))
4216 case QImode:
4217 *total += COSTS_N_INSNS (1);
4218 break;
4220 case HImode:
4221 *total += COSTS_N_INSNS (arm_arch6 ? 1 : 2);
4223 case SImode:
4224 break;
4226 default:
4227 *total += COSTS_N_INSNS (2);
4231 if (mode == DImode)
4232 *total += COSTS_N_INSNS (1);
4234 return false;
4236 case CONST_INT:
4237 if (const_ok_for_arm (INTVAL (x)))
4238 *total = COSTS_N_INSNS (outer_code == SET ? 1 : 0);
4239 else if (const_ok_for_arm (~INTVAL (x)))
4240 *total = COSTS_N_INSNS (outer_code == AND ? 0 : 1);
4241 else if (const_ok_for_arm (-INTVAL (x)))
4243 if (outer_code == COMPARE || outer_code == PLUS
4244 || outer_code == MINUS)
4245 *total = 0;
4246 else
4247 *total = COSTS_N_INSNS (1);
4249 else
4250 *total = COSTS_N_INSNS (2);
4251 return true;
4253 case CONST:
4254 case LABEL_REF:
4255 case SYMBOL_REF:
4256 *total = COSTS_N_INSNS (2);
4257 return true;
4259 case CONST_DOUBLE:
4260 *total = COSTS_N_INSNS (4);
4261 return true;
4263 default:
4264 if (mode != VOIDmode)
4265 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4266 else
4267 *total = COSTS_N_INSNS (4); /* How knows? */
4268 return false;
4272 /* RTX costs for cores with a slow MUL implementation. */
4274 static bool
4275 arm_slowmul_rtx_costs (rtx x, int code, int outer_code, int *total)
4277 enum machine_mode mode = GET_MODE (x);
4279 if (TARGET_THUMB)
4281 *total = thumb_rtx_costs (x, code, outer_code);
4282 return true;
4285 switch (code)
4287 case MULT:
4288 if (GET_MODE_CLASS (mode) == MODE_FLOAT
4289 || mode == DImode)
4291 *total = 30;
4292 return true;
4295 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
4297 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
4298 & (unsigned HOST_WIDE_INT) 0xffffffff);
4299 int cost, const_ok = const_ok_for_arm (i);
4300 int j, booth_unit_size;
4302 /* Tune as appropriate. */
4303 cost = const_ok ? 4 : 8;
4304 booth_unit_size = 2;
4305 for (j = 0; i && j < 32; j += booth_unit_size)
4307 i >>= booth_unit_size;
4308 cost += 2;
4311 *total = cost;
4312 return true;
4315 *total = 30 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
4316 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
4317 return true;
4319 default:
4320 *total = arm_rtx_costs_1 (x, code, outer_code);
4321 return true;
4326 /* RTX cost for cores with a fast multiply unit (M variants). */
4328 static bool
4329 arm_fastmul_rtx_costs (rtx x, int code, int outer_code, int *total)
4331 enum machine_mode mode = GET_MODE (x);
4333 if (TARGET_THUMB)
4335 *total = thumb_rtx_costs (x, code, outer_code);
4336 return true;
4339 switch (code)
4341 case MULT:
4342 /* There is no point basing this on the tuning, since it is always the
4343 fast variant if it exists at all. */
4344 if (mode == DImode
4345 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
4346 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
4347 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
4349 *total = 8;
4350 return true;
4354 if (GET_MODE_CLASS (mode) == MODE_FLOAT
4355 || mode == DImode)
4357 *total = 30;
4358 return true;
4361 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
4363 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
4364 & (unsigned HOST_WIDE_INT) 0xffffffff);
4365 int cost, const_ok = const_ok_for_arm (i);
4366 int j, booth_unit_size;
4368 /* Tune as appropriate. */
4369 cost = const_ok ? 4 : 8;
4370 booth_unit_size = 8;
4371 for (j = 0; i && j < 32; j += booth_unit_size)
4373 i >>= booth_unit_size;
4374 cost += 2;
4377 *total = cost;
4378 return true;
4381 *total = 8 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
4382 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
4383 return true;
4385 default:
4386 *total = arm_rtx_costs_1 (x, code, outer_code);
4387 return true;
4392 /* RTX cost for XScale CPUs. */
4394 static bool
4395 arm_xscale_rtx_costs (rtx x, int code, int outer_code, int *total)
4397 enum machine_mode mode = GET_MODE (x);
4399 if (TARGET_THUMB)
4401 *total = thumb_rtx_costs (x, code, outer_code);
4402 return true;
4405 switch (code)
4407 case MULT:
4408 /* There is no point basing this on the tuning, since it is always the
4409 fast variant if it exists at all. */
4410 if (mode == DImode
4411 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
4412 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
4413 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
4415 *total = 8;
4416 return true;
4420 if (GET_MODE_CLASS (mode) == MODE_FLOAT
4421 || mode == DImode)
4423 *total = 30;
4424 return true;
4427 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
4429 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
4430 & (unsigned HOST_WIDE_INT) 0xffffffff);
4431 int cost, const_ok = const_ok_for_arm (i);
4432 unsigned HOST_WIDE_INT masked_const;
4434 /* The cost will be related to two insns.
4435 First a load of the constant (MOV or LDR), then a multiply. */
4436 cost = 2;
4437 if (! const_ok)
4438 cost += 1; /* LDR is probably more expensive because
4439 of longer result latency. */
4440 masked_const = i & 0xffff8000;
4441 if (masked_const != 0 && masked_const != 0xffff8000)
4443 masked_const = i & 0xf8000000;
4444 if (masked_const == 0 || masked_const == 0xf8000000)
4445 cost += 1;
4446 else
4447 cost += 2;
4449 *total = cost;
4450 return true;
4453 *total = 8 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
4454 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
4455 return true;
4457 case COMPARE:
4458 /* A COMPARE of a MULT is slow on XScale; the muls instruction
4459 will stall until the multiplication is complete. */
4460 if (GET_CODE (XEXP (x, 0)) == MULT)
4461 *total = 4 + rtx_cost (XEXP (x, 0), code);
4462 else
4463 *total = arm_rtx_costs_1 (x, code, outer_code);
4464 return true;
4466 default:
4467 *total = arm_rtx_costs_1 (x, code, outer_code);
4468 return true;
4473 /* RTX costs for 9e (and later) cores. */
4475 static bool
4476 arm_9e_rtx_costs (rtx x, int code, int outer_code, int *total)
4478 enum machine_mode mode = GET_MODE (x);
4479 int nonreg_cost;
4480 int cost;
4482 if (TARGET_THUMB)
4484 switch (code)
4486 case MULT:
4487 *total = COSTS_N_INSNS (3);
4488 return true;
4490 default:
4491 *total = thumb_rtx_costs (x, code, outer_code);
4492 return true;
4496 switch (code)
4498 case MULT:
4499 /* There is no point basing this on the tuning, since it is always the
4500 fast variant if it exists at all. */
4501 if (mode == DImode
4502 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
4503 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
4504 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
4506 *total = 3;
4507 return true;
4511 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
4513 *total = 30;
4514 return true;
4516 if (mode == DImode)
4518 cost = 7;
4519 nonreg_cost = 8;
4521 else
4523 cost = 2;
4524 nonreg_cost = 4;
4528 *total = cost + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : nonreg_cost)
4529 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : nonreg_cost);
4530 return true;
4532 default:
4533 *total = arm_rtx_costs_1 (x, code, outer_code);
4534 return true;
4537 /* All address computations that can be done are free, but rtx cost returns
4538 the same for practically all of them. So we weight the different types
4539 of address here in the order (most pref first):
4540 PRE/POST_INC/DEC, SHIFT or NON-INT sum, INT sum, REG, MEM or LABEL. */
4541 static inline int
4542 arm_arm_address_cost (rtx x)
4544 enum rtx_code c = GET_CODE (x);
4546 if (c == PRE_INC || c == PRE_DEC || c == POST_INC || c == POST_DEC)
4547 return 0;
4548 if (c == MEM || c == LABEL_REF || c == SYMBOL_REF)
4549 return 10;
4551 if (c == PLUS || c == MINUS)
4553 if (GET_CODE (XEXP (x, 0)) == CONST_INT)
4554 return 2;
4556 if (ARITHMETIC_P (XEXP (x, 0)) || ARITHMETIC_P (XEXP (x, 1)))
4557 return 3;
4559 return 4;
4562 return 6;
4565 static inline int
4566 arm_thumb_address_cost (rtx x)
4568 enum rtx_code c = GET_CODE (x);
4570 if (c == REG)
4571 return 1;
4572 if (c == PLUS
4573 && GET_CODE (XEXP (x, 0)) == REG
4574 && GET_CODE (XEXP (x, 1)) == CONST_INT)
4575 return 1;
4577 return 2;
4580 static int
4581 arm_address_cost (rtx x)
4583 return TARGET_ARM ? arm_arm_address_cost (x) : arm_thumb_address_cost (x);
4586 static int
4587 arm_adjust_cost (rtx insn, rtx link, rtx dep, int cost)
4589 rtx i_pat, d_pat;
4591 /* Some true dependencies can have a higher cost depending
4592 on precisely how certain input operands are used. */
4593 if (arm_tune_xscale
4594 && REG_NOTE_KIND (link) == 0
4595 && recog_memoized (insn) >= 0
4596 && recog_memoized (dep) >= 0)
4598 int shift_opnum = get_attr_shift (insn);
4599 enum attr_type attr_type = get_attr_type (dep);
4601 /* If nonzero, SHIFT_OPNUM contains the operand number of a shifted
4602 operand for INSN. If we have a shifted input operand and the
4603 instruction we depend on is another ALU instruction, then we may
4604 have to account for an additional stall. */
4605 if (shift_opnum != 0
4606 && (attr_type == TYPE_ALU_SHIFT || attr_type == TYPE_ALU_SHIFT_REG))
4608 rtx shifted_operand;
4609 int opno;
4611 /* Get the shifted operand. */
4612 extract_insn (insn);
4613 shifted_operand = recog_data.operand[shift_opnum];
4615 /* Iterate over all the operands in DEP. If we write an operand
4616 that overlaps with SHIFTED_OPERAND, then we have increase the
4617 cost of this dependency. */
4618 extract_insn (dep);
4619 preprocess_constraints ();
4620 for (opno = 0; opno < recog_data.n_operands; opno++)
4622 /* We can ignore strict inputs. */
4623 if (recog_data.operand_type[opno] == OP_IN)
4624 continue;
4626 if (reg_overlap_mentioned_p (recog_data.operand[opno],
4627 shifted_operand))
4628 return 2;
4633 /* XXX This is not strictly true for the FPA. */
4634 if (REG_NOTE_KIND (link) == REG_DEP_ANTI
4635 || REG_NOTE_KIND (link) == REG_DEP_OUTPUT)
4636 return 0;
4638 /* Call insns don't incur a stall, even if they follow a load. */
4639 if (REG_NOTE_KIND (link) == 0
4640 && GET_CODE (insn) == CALL_INSN)
4641 return 1;
4643 if ((i_pat = single_set (insn)) != NULL
4644 && GET_CODE (SET_SRC (i_pat)) == MEM
4645 && (d_pat = single_set (dep)) != NULL
4646 && GET_CODE (SET_DEST (d_pat)) == MEM)
4648 rtx src_mem = XEXP (SET_SRC (i_pat), 0);
4649 /* This is a load after a store, there is no conflict if the load reads
4650 from a cached area. Assume that loads from the stack, and from the
4651 constant pool are cached, and that others will miss. This is a
4652 hack. */
4654 if ((GET_CODE (src_mem) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (src_mem))
4655 || reg_mentioned_p (stack_pointer_rtx, src_mem)
4656 || reg_mentioned_p (frame_pointer_rtx, src_mem)
4657 || reg_mentioned_p (hard_frame_pointer_rtx, src_mem))
4658 return 1;
4661 return cost;
4664 static int fp_consts_inited = 0;
4666 /* Only zero is valid for VFP. Other values are also valid for FPA. */
4667 static const char * const strings_fp[8] =
4669 "0", "1", "2", "3",
4670 "4", "5", "0.5", "10"
4673 static REAL_VALUE_TYPE values_fp[8];
4675 static void
4676 init_fp_table (void)
4678 int i;
4679 REAL_VALUE_TYPE r;
4681 if (TARGET_VFP)
4682 fp_consts_inited = 1;
4683 else
4684 fp_consts_inited = 8;
4686 for (i = 0; i < fp_consts_inited; i++)
4688 r = REAL_VALUE_ATOF (strings_fp[i], DFmode);
4689 values_fp[i] = r;
4693 /* Return TRUE if rtx X is a valid immediate FP constant. */
4695 arm_const_double_rtx (rtx x)
4697 REAL_VALUE_TYPE r;
4698 int i;
4700 if (!fp_consts_inited)
4701 init_fp_table ();
4703 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
4704 if (REAL_VALUE_MINUS_ZERO (r))
4705 return 0;
4707 for (i = 0; i < fp_consts_inited; i++)
4708 if (REAL_VALUES_EQUAL (r, values_fp[i]))
4709 return 1;
4711 return 0;
4714 /* Return TRUE if rtx X is a valid immediate FPA constant. */
4716 neg_const_double_rtx_ok_for_fpa (rtx x)
4718 REAL_VALUE_TYPE r;
4719 int i;
4721 if (!fp_consts_inited)
4722 init_fp_table ();
4724 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
4725 r = REAL_VALUE_NEGATE (r);
4726 if (REAL_VALUE_MINUS_ZERO (r))
4727 return 0;
4729 for (i = 0; i < 8; i++)
4730 if (REAL_VALUES_EQUAL (r, values_fp[i]))
4731 return 1;
4733 return 0;
4736 /* Predicates for `match_operand' and `match_operator'. */
4738 /* Return nonzero if OP is a valid Cirrus memory address pattern. */
4740 cirrus_memory_offset (rtx op)
4742 /* Reject eliminable registers. */
4743 if (! (reload_in_progress || reload_completed)
4744 && ( reg_mentioned_p (frame_pointer_rtx, op)
4745 || reg_mentioned_p (arg_pointer_rtx, op)
4746 || reg_mentioned_p (virtual_incoming_args_rtx, op)
4747 || reg_mentioned_p (virtual_outgoing_args_rtx, op)
4748 || reg_mentioned_p (virtual_stack_dynamic_rtx, op)
4749 || reg_mentioned_p (virtual_stack_vars_rtx, op)))
4750 return 0;
4752 if (GET_CODE (op) == MEM)
4754 rtx ind;
4756 ind = XEXP (op, 0);
4758 /* Match: (mem (reg)). */
4759 if (GET_CODE (ind) == REG)
4760 return 1;
4762 /* Match:
4763 (mem (plus (reg)
4764 (const))). */
4765 if (GET_CODE (ind) == PLUS
4766 && GET_CODE (XEXP (ind, 0)) == REG
4767 && REG_MODE_OK_FOR_BASE_P (XEXP (ind, 0), VOIDmode)
4768 && GET_CODE (XEXP (ind, 1)) == CONST_INT)
4769 return 1;
4772 return 0;
4775 /* Return TRUE if OP is a valid VFP memory address pattern.
4776 WB if true if writeback address modes are allowed. */
4779 arm_coproc_mem_operand (rtx op, bool wb)
4781 rtx ind;
4783 /* Reject eliminable registers. */
4784 if (! (reload_in_progress || reload_completed)
4785 && ( reg_mentioned_p (frame_pointer_rtx, op)
4786 || reg_mentioned_p (arg_pointer_rtx, op)
4787 || reg_mentioned_p (virtual_incoming_args_rtx, op)
4788 || reg_mentioned_p (virtual_outgoing_args_rtx, op)
4789 || reg_mentioned_p (virtual_stack_dynamic_rtx, op)
4790 || reg_mentioned_p (virtual_stack_vars_rtx, op)))
4791 return FALSE;
4793 /* Constants are converted into offsets from labels. */
4794 if (GET_CODE (op) != MEM)
4795 return FALSE;
4797 ind = XEXP (op, 0);
4799 if (reload_completed
4800 && (GET_CODE (ind) == LABEL_REF
4801 || (GET_CODE (ind) == CONST
4802 && GET_CODE (XEXP (ind, 0)) == PLUS
4803 && GET_CODE (XEXP (XEXP (ind, 0), 0)) == LABEL_REF
4804 && GET_CODE (XEXP (XEXP (ind, 0), 1)) == CONST_INT)))
4805 return TRUE;
4807 /* Match: (mem (reg)). */
4808 if (GET_CODE (ind) == REG)
4809 return arm_address_register_rtx_p (ind, 0);
4811 /* Autoincremment addressing modes. */
4812 if (wb
4813 && (GET_CODE (ind) == PRE_INC
4814 || GET_CODE (ind) == POST_INC
4815 || GET_CODE (ind) == PRE_DEC
4816 || GET_CODE (ind) == POST_DEC))
4817 return arm_address_register_rtx_p (XEXP (ind, 0), 0);
4819 if (wb
4820 && (GET_CODE (ind) == POST_MODIFY || GET_CODE (ind) == PRE_MODIFY)
4821 && arm_address_register_rtx_p (XEXP (ind, 0), 0)
4822 && GET_CODE (XEXP (ind, 1)) == PLUS
4823 && rtx_equal_p (XEXP (XEXP (ind, 1), 0), XEXP (ind, 0)))
4824 ind = XEXP (ind, 1);
4826 /* Match:
4827 (plus (reg)
4828 (const)). */
4829 if (GET_CODE (ind) == PLUS
4830 && GET_CODE (XEXP (ind, 0)) == REG
4831 && REG_MODE_OK_FOR_BASE_P (XEXP (ind, 0), VOIDmode)
4832 && GET_CODE (XEXP (ind, 1)) == CONST_INT
4833 && INTVAL (XEXP (ind, 1)) > -1024
4834 && INTVAL (XEXP (ind, 1)) < 1024
4835 && (INTVAL (XEXP (ind, 1)) & 3) == 0)
4836 return TRUE;
4838 return FALSE;
4841 /* Return true if X is a register that will be eliminated later on. */
4843 arm_eliminable_register (rtx x)
4845 return REG_P (x) && (REGNO (x) == FRAME_POINTER_REGNUM
4846 || REGNO (x) == ARG_POINTER_REGNUM
4847 || (REGNO (x) >= FIRST_VIRTUAL_REGISTER
4848 && REGNO (x) <= LAST_VIRTUAL_REGISTER));
4851 /* Return GENERAL_REGS if a scratch register required to reload x to/from
4852 VFP registers. Otherwise return NO_REGS. */
4854 enum reg_class
4855 vfp_secondary_reload_class (enum machine_mode mode, rtx x)
4857 if (arm_coproc_mem_operand (x, FALSE) || s_register_operand (x, mode))
4858 return NO_REGS;
4860 return GENERAL_REGS;
4864 /* Returns TRUE if INSN is an "LDR REG, ADDR" instruction.
4865 Use by the Cirrus Maverick code which has to workaround
4866 a hardware bug triggered by such instructions. */
4867 static bool
4868 arm_memory_load_p (rtx insn)
4870 rtx body, lhs, rhs;;
4872 if (insn == NULL_RTX || GET_CODE (insn) != INSN)
4873 return false;
4875 body = PATTERN (insn);
4877 if (GET_CODE (body) != SET)
4878 return false;
4880 lhs = XEXP (body, 0);
4881 rhs = XEXP (body, 1);
4883 lhs = REG_OR_SUBREG_RTX (lhs);
4885 /* If the destination is not a general purpose
4886 register we do not have to worry. */
4887 if (GET_CODE (lhs) != REG
4888 || REGNO_REG_CLASS (REGNO (lhs)) != GENERAL_REGS)
4889 return false;
4891 /* As well as loads from memory we also have to react
4892 to loads of invalid constants which will be turned
4893 into loads from the minipool. */
4894 return (GET_CODE (rhs) == MEM
4895 || GET_CODE (rhs) == SYMBOL_REF
4896 || note_invalid_constants (insn, -1, false));
4899 /* Return TRUE if INSN is a Cirrus instruction. */
4900 static bool
4901 arm_cirrus_insn_p (rtx insn)
4903 enum attr_cirrus attr;
4905 /* get_attr aborts on USE and CLOBBER. */
4906 if (!insn
4907 || GET_CODE (insn) != INSN
4908 || GET_CODE (PATTERN (insn)) == USE
4909 || GET_CODE (PATTERN (insn)) == CLOBBER)
4910 return 0;
4912 attr = get_attr_cirrus (insn);
4914 return attr != CIRRUS_NOT;
4917 /* Cirrus reorg for invalid instruction combinations. */
4918 static void
4919 cirrus_reorg (rtx first)
4921 enum attr_cirrus attr;
4922 rtx body = PATTERN (first);
4923 rtx t;
4924 int nops;
4926 /* Any branch must be followed by 2 non Cirrus instructions. */
4927 if (GET_CODE (first) == JUMP_INSN && GET_CODE (body) != RETURN)
4929 nops = 0;
4930 t = next_nonnote_insn (first);
4932 if (arm_cirrus_insn_p (t))
4933 ++ nops;
4935 if (arm_cirrus_insn_p (next_nonnote_insn (t)))
4936 ++ nops;
4938 while (nops --)
4939 emit_insn_after (gen_nop (), first);
4941 return;
4944 /* (float (blah)) is in parallel with a clobber. */
4945 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) > 0)
4946 body = XVECEXP (body, 0, 0);
4948 if (GET_CODE (body) == SET)
4950 rtx lhs = XEXP (body, 0), rhs = XEXP (body, 1);
4952 /* cfldrd, cfldr64, cfstrd, cfstr64 must
4953 be followed by a non Cirrus insn. */
4954 if (get_attr_cirrus (first) == CIRRUS_DOUBLE)
4956 if (arm_cirrus_insn_p (next_nonnote_insn (first)))
4957 emit_insn_after (gen_nop (), first);
4959 return;
4961 else if (arm_memory_load_p (first))
4963 unsigned int arm_regno;
4965 /* Any ldr/cfmvdlr, ldr/cfmvdhr, ldr/cfmvsr, ldr/cfmv64lr,
4966 ldr/cfmv64hr combination where the Rd field is the same
4967 in both instructions must be split with a non Cirrus
4968 insn. Example:
4970 ldr r0, blah
4972 cfmvsr mvf0, r0. */
4974 /* Get Arm register number for ldr insn. */
4975 if (GET_CODE (lhs) == REG)
4976 arm_regno = REGNO (lhs);
4977 else if (GET_CODE (rhs) == REG)
4978 arm_regno = REGNO (rhs);
4979 else
4980 abort ();
4982 /* Next insn. */
4983 first = next_nonnote_insn (first);
4985 if (! arm_cirrus_insn_p (first))
4986 return;
4988 body = PATTERN (first);
4990 /* (float (blah)) is in parallel with a clobber. */
4991 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0))
4992 body = XVECEXP (body, 0, 0);
4994 if (GET_CODE (body) == FLOAT)
4995 body = XEXP (body, 0);
4997 if (get_attr_cirrus (first) == CIRRUS_MOVE
4998 && GET_CODE (XEXP (body, 1)) == REG
4999 && arm_regno == REGNO (XEXP (body, 1)))
5000 emit_insn_after (gen_nop (), first);
5002 return;
5006 /* get_attr aborts on USE and CLOBBER. */
5007 if (!first
5008 || GET_CODE (first) != INSN
5009 || GET_CODE (PATTERN (first)) == USE
5010 || GET_CODE (PATTERN (first)) == CLOBBER)
5011 return;
5013 attr = get_attr_cirrus (first);
5015 /* Any coprocessor compare instruction (cfcmps, cfcmpd, ...)
5016 must be followed by a non-coprocessor instruction. */
5017 if (attr == CIRRUS_COMPARE)
5019 nops = 0;
5021 t = next_nonnote_insn (first);
5023 if (arm_cirrus_insn_p (t))
5024 ++ nops;
5026 if (arm_cirrus_insn_p (next_nonnote_insn (t)))
5027 ++ nops;
5029 while (nops --)
5030 emit_insn_after (gen_nop (), first);
5032 return;
5036 /* Return TRUE if X references a SYMBOL_REF. */
5038 symbol_mentioned_p (rtx x)
5040 const char * fmt;
5041 int i;
5043 if (GET_CODE (x) == SYMBOL_REF)
5044 return 1;
5046 fmt = GET_RTX_FORMAT (GET_CODE (x));
5048 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5050 if (fmt[i] == 'E')
5052 int j;
5054 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5055 if (symbol_mentioned_p (XVECEXP (x, i, j)))
5056 return 1;
5058 else if (fmt[i] == 'e' && symbol_mentioned_p (XEXP (x, i)))
5059 return 1;
5062 return 0;
5065 /* Return TRUE if X references a LABEL_REF. */
5067 label_mentioned_p (rtx x)
5069 const char * fmt;
5070 int i;
5072 if (GET_CODE (x) == LABEL_REF)
5073 return 1;
5075 fmt = GET_RTX_FORMAT (GET_CODE (x));
5076 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5078 if (fmt[i] == 'E')
5080 int j;
5082 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5083 if (label_mentioned_p (XVECEXP (x, i, j)))
5084 return 1;
5086 else if (fmt[i] == 'e' && label_mentioned_p (XEXP (x, i)))
5087 return 1;
5090 return 0;
5093 enum rtx_code
5094 minmax_code (rtx x)
5096 enum rtx_code code = GET_CODE (x);
5098 if (code == SMAX)
5099 return GE;
5100 else if (code == SMIN)
5101 return LE;
5102 else if (code == UMIN)
5103 return LEU;
5104 else if (code == UMAX)
5105 return GEU;
5107 abort ();
5110 /* Return 1 if memory locations are adjacent. */
5112 adjacent_mem_locations (rtx a, rtx b)
5114 if ((GET_CODE (XEXP (a, 0)) == REG
5115 || (GET_CODE (XEXP (a, 0)) == PLUS
5116 && GET_CODE (XEXP (XEXP (a, 0), 1)) == CONST_INT))
5117 && (GET_CODE (XEXP (b, 0)) == REG
5118 || (GET_CODE (XEXP (b, 0)) == PLUS
5119 && GET_CODE (XEXP (XEXP (b, 0), 1)) == CONST_INT)))
5121 HOST_WIDE_INT val0 = 0, val1 = 0;
5122 rtx reg0, reg1;
5123 int val_diff;
5125 if (GET_CODE (XEXP (a, 0)) == PLUS)
5127 reg0 = XEXP (XEXP (a, 0), 0);
5128 val0 = INTVAL (XEXP (XEXP (a, 0), 1));
5130 else
5131 reg0 = XEXP (a, 0);
5133 if (GET_CODE (XEXP (b, 0)) == PLUS)
5135 reg1 = XEXP (XEXP (b, 0), 0);
5136 val1 = INTVAL (XEXP (XEXP (b, 0), 1));
5138 else
5139 reg1 = XEXP (b, 0);
5141 /* Don't accept any offset that will require multiple
5142 instructions to handle, since this would cause the
5143 arith_adjacentmem pattern to output an overlong sequence. */
5144 if (!const_ok_for_op (PLUS, val0) || !const_ok_for_op (PLUS, val1))
5145 return 0;
5147 /* Don't allow an eliminable register: register elimination can make
5148 the offset too large. */
5149 if (arm_eliminable_register (reg0))
5150 return 0;
5152 val_diff = val1 - val0;
5153 return ((REGNO (reg0) == REGNO (reg1))
5154 && (val_diff == 4 || val_diff == -4));
5157 return 0;
5161 load_multiple_sequence (rtx *operands, int nops, int *regs, int *base,
5162 HOST_WIDE_INT *load_offset)
5164 int unsorted_regs[4];
5165 HOST_WIDE_INT unsorted_offsets[4];
5166 int order[4];
5167 int base_reg = -1;
5168 int i;
5170 /* Can only handle 2, 3, or 4 insns at present,
5171 though could be easily extended if required. */
5172 if (nops < 2 || nops > 4)
5173 abort ();
5175 /* Loop over the operands and check that the memory references are
5176 suitable (i.e. immediate offsets from the same base register). At
5177 the same time, extract the target register, and the memory
5178 offsets. */
5179 for (i = 0; i < nops; i++)
5181 rtx reg;
5182 rtx offset;
5184 /* Convert a subreg of a mem into the mem itself. */
5185 if (GET_CODE (operands[nops + i]) == SUBREG)
5186 operands[nops + i] = alter_subreg (operands + (nops + i));
5188 if (GET_CODE (operands[nops + i]) != MEM)
5189 abort ();
5191 /* Don't reorder volatile memory references; it doesn't seem worth
5192 looking for the case where the order is ok anyway. */
5193 if (MEM_VOLATILE_P (operands[nops + i]))
5194 return 0;
5196 offset = const0_rtx;
5198 if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG
5199 || (GET_CODE (reg) == SUBREG
5200 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5201 || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
5202 && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0))
5203 == REG)
5204 || (GET_CODE (reg) == SUBREG
5205 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5206 && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1))
5207 == CONST_INT)))
5209 if (i == 0)
5211 base_reg = REGNO (reg);
5212 unsorted_regs[0] = (GET_CODE (operands[i]) == REG
5213 ? REGNO (operands[i])
5214 : REGNO (SUBREG_REG (operands[i])));
5215 order[0] = 0;
5217 else
5219 if (base_reg != (int) REGNO (reg))
5220 /* Not addressed from the same base register. */
5221 return 0;
5223 unsorted_regs[i] = (GET_CODE (operands[i]) == REG
5224 ? REGNO (operands[i])
5225 : REGNO (SUBREG_REG (operands[i])));
5226 if (unsorted_regs[i] < unsorted_regs[order[0]])
5227 order[0] = i;
5230 /* If it isn't an integer register, or if it overwrites the
5231 base register but isn't the last insn in the list, then
5232 we can't do this. */
5233 if (unsorted_regs[i] < 0 || unsorted_regs[i] > 14
5234 || (i != nops - 1 && unsorted_regs[i] == base_reg))
5235 return 0;
5237 unsorted_offsets[i] = INTVAL (offset);
5239 else
5240 /* Not a suitable memory address. */
5241 return 0;
5244 /* All the useful information has now been extracted from the
5245 operands into unsorted_regs and unsorted_offsets; additionally,
5246 order[0] has been set to the lowest numbered register in the
5247 list. Sort the registers into order, and check that the memory
5248 offsets are ascending and adjacent. */
5250 for (i = 1; i < nops; i++)
5252 int j;
5254 order[i] = order[i - 1];
5255 for (j = 0; j < nops; j++)
5256 if (unsorted_regs[j] > unsorted_regs[order[i - 1]]
5257 && (order[i] == order[i - 1]
5258 || unsorted_regs[j] < unsorted_regs[order[i]]))
5259 order[i] = j;
5261 /* Have we found a suitable register? if not, one must be used more
5262 than once. */
5263 if (order[i] == order[i - 1])
5264 return 0;
5266 /* Is the memory address adjacent and ascending? */
5267 if (unsorted_offsets[order[i]] != unsorted_offsets[order[i - 1]] + 4)
5268 return 0;
5271 if (base)
5273 *base = base_reg;
5275 for (i = 0; i < nops; i++)
5276 regs[i] = unsorted_regs[order[i]];
5278 *load_offset = unsorted_offsets[order[0]];
5281 if (unsorted_offsets[order[0]] == 0)
5282 return 1; /* ldmia */
5284 if (unsorted_offsets[order[0]] == 4)
5285 return 2; /* ldmib */
5287 if (unsorted_offsets[order[nops - 1]] == 0)
5288 return 3; /* ldmda */
5290 if (unsorted_offsets[order[nops - 1]] == -4)
5291 return 4; /* ldmdb */
5293 /* For ARM8,9 & StrongARM, 2 ldr instructions are faster than an ldm
5294 if the offset isn't small enough. The reason 2 ldrs are faster
5295 is because these ARMs are able to do more than one cache access
5296 in a single cycle. The ARM9 and StrongARM have Harvard caches,
5297 whilst the ARM8 has a double bandwidth cache. This means that
5298 these cores can do both an instruction fetch and a data fetch in
5299 a single cycle, so the trick of calculating the address into a
5300 scratch register (one of the result regs) and then doing a load
5301 multiple actually becomes slower (and no smaller in code size).
5302 That is the transformation
5304 ldr rd1, [rbase + offset]
5305 ldr rd2, [rbase + offset + 4]
5309 add rd1, rbase, offset
5310 ldmia rd1, {rd1, rd2}
5312 produces worse code -- '3 cycles + any stalls on rd2' instead of
5313 '2 cycles + any stalls on rd2'. On ARMs with only one cache
5314 access per cycle, the first sequence could never complete in less
5315 than 6 cycles, whereas the ldm sequence would only take 5 and
5316 would make better use of sequential accesses if not hitting the
5317 cache.
5319 We cheat here and test 'arm_ld_sched' which we currently know to
5320 only be true for the ARM8, ARM9 and StrongARM. If this ever
5321 changes, then the test below needs to be reworked. */
5322 if (nops == 2 && arm_ld_sched)
5323 return 0;
5325 /* Can't do it without setting up the offset, only do this if it takes
5326 no more than one insn. */
5327 return (const_ok_for_arm (unsorted_offsets[order[0]])
5328 || const_ok_for_arm (-unsorted_offsets[order[0]])) ? 5 : 0;
5331 const char *
5332 emit_ldm_seq (rtx *operands, int nops)
5334 int regs[4];
5335 int base_reg;
5336 HOST_WIDE_INT offset;
5337 char buf[100];
5338 int i;
5340 switch (load_multiple_sequence (operands, nops, regs, &base_reg, &offset))
5342 case 1:
5343 strcpy (buf, "ldm%?ia\t");
5344 break;
5346 case 2:
5347 strcpy (buf, "ldm%?ib\t");
5348 break;
5350 case 3:
5351 strcpy (buf, "ldm%?da\t");
5352 break;
5354 case 4:
5355 strcpy (buf, "ldm%?db\t");
5356 break;
5358 case 5:
5359 if (offset >= 0)
5360 sprintf (buf, "add%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX,
5361 reg_names[regs[0]], REGISTER_PREFIX, reg_names[base_reg],
5362 (long) offset);
5363 else
5364 sprintf (buf, "sub%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX,
5365 reg_names[regs[0]], REGISTER_PREFIX, reg_names[base_reg],
5366 (long) -offset);
5367 output_asm_insn (buf, operands);
5368 base_reg = regs[0];
5369 strcpy (buf, "ldm%?ia\t");
5370 break;
5372 default:
5373 abort ();
5376 sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
5377 reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
5379 for (i = 1; i < nops; i++)
5380 sprintf (buf + strlen (buf), ", %s%s", REGISTER_PREFIX,
5381 reg_names[regs[i]]);
5383 strcat (buf, "}\t%@ phole ldm");
5385 output_asm_insn (buf, operands);
5386 return "";
5390 store_multiple_sequence (rtx *operands, int nops, int *regs, int *base,
5391 HOST_WIDE_INT * load_offset)
5393 int unsorted_regs[4];
5394 HOST_WIDE_INT unsorted_offsets[4];
5395 int order[4];
5396 int base_reg = -1;
5397 int i;
5399 /* Can only handle 2, 3, or 4 insns at present, though could be easily
5400 extended if required. */
5401 if (nops < 2 || nops > 4)
5402 abort ();
5404 /* Loop over the operands and check that the memory references are
5405 suitable (i.e. immediate offsets from the same base register). At
5406 the same time, extract the target register, and the memory
5407 offsets. */
5408 for (i = 0; i < nops; i++)
5410 rtx reg;
5411 rtx offset;
5413 /* Convert a subreg of a mem into the mem itself. */
5414 if (GET_CODE (operands[nops + i]) == SUBREG)
5415 operands[nops + i] = alter_subreg (operands + (nops + i));
5417 if (GET_CODE (operands[nops + i]) != MEM)
5418 abort ();
5420 /* Don't reorder volatile memory references; it doesn't seem worth
5421 looking for the case where the order is ok anyway. */
5422 if (MEM_VOLATILE_P (operands[nops + i]))
5423 return 0;
5425 offset = const0_rtx;
5427 if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG
5428 || (GET_CODE (reg) == SUBREG
5429 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5430 || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
5431 && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0))
5432 == REG)
5433 || (GET_CODE (reg) == SUBREG
5434 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5435 && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1))
5436 == CONST_INT)))
5438 if (i == 0)
5440 base_reg = REGNO (reg);
5441 unsorted_regs[0] = (GET_CODE (operands[i]) == REG
5442 ? REGNO (operands[i])
5443 : REGNO (SUBREG_REG (operands[i])));
5444 order[0] = 0;
5446 else
5448 if (base_reg != (int) REGNO (reg))
5449 /* Not addressed from the same base register. */
5450 return 0;
5452 unsorted_regs[i] = (GET_CODE (operands[i]) == REG
5453 ? REGNO (operands[i])
5454 : REGNO (SUBREG_REG (operands[i])));
5455 if (unsorted_regs[i] < unsorted_regs[order[0]])
5456 order[0] = i;
5459 /* If it isn't an integer register, then we can't do this. */
5460 if (unsorted_regs[i] < 0 || unsorted_regs[i] > 14)
5461 return 0;
5463 unsorted_offsets[i] = INTVAL (offset);
5465 else
5466 /* Not a suitable memory address. */
5467 return 0;
5470 /* All the useful information has now been extracted from the
5471 operands into unsorted_regs and unsorted_offsets; additionally,
5472 order[0] has been set to the lowest numbered register in the
5473 list. Sort the registers into order, and check that the memory
5474 offsets are ascending and adjacent. */
5476 for (i = 1; i < nops; i++)
5478 int j;
5480 order[i] = order[i - 1];
5481 for (j = 0; j < nops; j++)
5482 if (unsorted_regs[j] > unsorted_regs[order[i - 1]]
5483 && (order[i] == order[i - 1]
5484 || unsorted_regs[j] < unsorted_regs[order[i]]))
5485 order[i] = j;
5487 /* Have we found a suitable register? if not, one must be used more
5488 than once. */
5489 if (order[i] == order[i - 1])
5490 return 0;
5492 /* Is the memory address adjacent and ascending? */
5493 if (unsorted_offsets[order[i]] != unsorted_offsets[order[i - 1]] + 4)
5494 return 0;
5497 if (base)
5499 *base = base_reg;
5501 for (i = 0; i < nops; i++)
5502 regs[i] = unsorted_regs[order[i]];
5504 *load_offset = unsorted_offsets[order[0]];
5507 if (unsorted_offsets[order[0]] == 0)
5508 return 1; /* stmia */
5510 if (unsorted_offsets[order[0]] == 4)
5511 return 2; /* stmib */
5513 if (unsorted_offsets[order[nops - 1]] == 0)
5514 return 3; /* stmda */
5516 if (unsorted_offsets[order[nops - 1]] == -4)
5517 return 4; /* stmdb */
5519 return 0;
5522 const char *
5523 emit_stm_seq (rtx *operands, int nops)
5525 int regs[4];
5526 int base_reg;
5527 HOST_WIDE_INT offset;
5528 char buf[100];
5529 int i;
5531 switch (store_multiple_sequence (operands, nops, regs, &base_reg, &offset))
5533 case 1:
5534 strcpy (buf, "stm%?ia\t");
5535 break;
5537 case 2:
5538 strcpy (buf, "stm%?ib\t");
5539 break;
5541 case 3:
5542 strcpy (buf, "stm%?da\t");
5543 break;
5545 case 4:
5546 strcpy (buf, "stm%?db\t");
5547 break;
5549 default:
5550 abort ();
5553 sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
5554 reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
5556 for (i = 1; i < nops; i++)
5557 sprintf (buf + strlen (buf), ", %s%s", REGISTER_PREFIX,
5558 reg_names[regs[i]]);
5560 strcat (buf, "}\t%@ phole stm");
5562 output_asm_insn (buf, operands);
5563 return "";
5567 /* Routines for use in generating RTL. */
5570 arm_gen_load_multiple (int base_regno, int count, rtx from, int up,
5571 int write_back, rtx basemem, HOST_WIDE_INT *offsetp)
5573 HOST_WIDE_INT offset = *offsetp;
5574 int i = 0, j;
5575 rtx result;
5576 int sign = up ? 1 : -1;
5577 rtx mem, addr;
5579 /* XScale has load-store double instructions, but they have stricter
5580 alignment requirements than load-store multiple, so we cannot
5581 use them.
5583 For XScale ldm requires 2 + NREGS cycles to complete and blocks
5584 the pipeline until completion.
5586 NREGS CYCLES
5592 An ldr instruction takes 1-3 cycles, but does not block the
5593 pipeline.
5595 NREGS CYCLES
5596 1 1-3
5597 2 2-6
5598 3 3-9
5599 4 4-12
5601 Best case ldr will always win. However, the more ldr instructions
5602 we issue, the less likely we are to be able to schedule them well.
5603 Using ldr instructions also increases code size.
5605 As a compromise, we use ldr for counts of 1 or 2 regs, and ldm
5606 for counts of 3 or 4 regs. */
5607 if (arm_tune_xscale && count <= 2 && ! optimize_size)
5609 rtx seq;
5611 start_sequence ();
5613 for (i = 0; i < count; i++)
5615 addr = plus_constant (from, i * 4 * sign);
5616 mem = adjust_automodify_address (basemem, SImode, addr, offset);
5617 emit_move_insn (gen_rtx_REG (SImode, base_regno + i), mem);
5618 offset += 4 * sign;
5621 if (write_back)
5623 emit_move_insn (from, plus_constant (from, count * 4 * sign));
5624 *offsetp = offset;
5627 seq = get_insns ();
5628 end_sequence ();
5630 return seq;
5633 result = gen_rtx_PARALLEL (VOIDmode,
5634 rtvec_alloc (count + (write_back ? 1 : 0)));
5635 if (write_back)
5637 XVECEXP (result, 0, 0)
5638 = gen_rtx_SET (GET_MODE (from), from,
5639 plus_constant (from, count * 4 * sign));
5640 i = 1;
5641 count++;
5644 for (j = 0; i < count; i++, j++)
5646 addr = plus_constant (from, j * 4 * sign);
5647 mem = adjust_automodify_address_nv (basemem, SImode, addr, offset);
5648 XVECEXP (result, 0, i)
5649 = gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, base_regno + j), mem);
5650 offset += 4 * sign;
5653 if (write_back)
5654 *offsetp = offset;
5656 return result;
5660 arm_gen_store_multiple (int base_regno, int count, rtx to, int up,
5661 int write_back, rtx basemem, HOST_WIDE_INT *offsetp)
5663 HOST_WIDE_INT offset = *offsetp;
5664 int i = 0, j;
5665 rtx result;
5666 int sign = up ? 1 : -1;
5667 rtx mem, addr;
5669 /* See arm_gen_load_multiple for discussion of
5670 the pros/cons of ldm/stm usage for XScale. */
5671 if (arm_tune_xscale && count <= 2 && ! optimize_size)
5673 rtx seq;
5675 start_sequence ();
5677 for (i = 0; i < count; i++)
5679 addr = plus_constant (to, i * 4 * sign);
5680 mem = adjust_automodify_address (basemem, SImode, addr, offset);
5681 emit_move_insn (mem, gen_rtx_REG (SImode, base_regno + i));
5682 offset += 4 * sign;
5685 if (write_back)
5687 emit_move_insn (to, plus_constant (to, count * 4 * sign));
5688 *offsetp = offset;
5691 seq = get_insns ();
5692 end_sequence ();
5694 return seq;
5697 result = gen_rtx_PARALLEL (VOIDmode,
5698 rtvec_alloc (count + (write_back ? 1 : 0)));
5699 if (write_back)
5701 XVECEXP (result, 0, 0)
5702 = gen_rtx_SET (GET_MODE (to), to,
5703 plus_constant (to, count * 4 * sign));
5704 i = 1;
5705 count++;
5708 for (j = 0; i < count; i++, j++)
5710 addr = plus_constant (to, j * 4 * sign);
5711 mem = adjust_automodify_address_nv (basemem, SImode, addr, offset);
5712 XVECEXP (result, 0, i)
5713 = gen_rtx_SET (VOIDmode, mem, gen_rtx_REG (SImode, base_regno + j));
5714 offset += 4 * sign;
5717 if (write_back)
5718 *offsetp = offset;
5720 return result;
5724 arm_gen_movmemqi (rtx *operands)
5726 HOST_WIDE_INT in_words_to_go, out_words_to_go, last_bytes;
5727 HOST_WIDE_INT srcoffset, dstoffset;
5728 int i;
5729 rtx src, dst, srcbase, dstbase;
5730 rtx part_bytes_reg = NULL;
5731 rtx mem;
5733 if (GET_CODE (operands[2]) != CONST_INT
5734 || GET_CODE (operands[3]) != CONST_INT
5735 || INTVAL (operands[2]) > 64
5736 || INTVAL (operands[3]) & 3)
5737 return 0;
5739 dstbase = operands[0];
5740 srcbase = operands[1];
5742 dst = copy_to_mode_reg (SImode, XEXP (dstbase, 0));
5743 src = copy_to_mode_reg (SImode, XEXP (srcbase, 0));
5745 in_words_to_go = ARM_NUM_INTS (INTVAL (operands[2]));
5746 out_words_to_go = INTVAL (operands[2]) / 4;
5747 last_bytes = INTVAL (operands[2]) & 3;
5748 dstoffset = srcoffset = 0;
5750 if (out_words_to_go != in_words_to_go && ((in_words_to_go - 1) & 3) != 0)
5751 part_bytes_reg = gen_rtx_REG (SImode, (in_words_to_go - 1) & 3);
5753 for (i = 0; in_words_to_go >= 2; i+=4)
5755 if (in_words_to_go > 4)
5756 emit_insn (arm_gen_load_multiple (0, 4, src, TRUE, TRUE,
5757 srcbase, &srcoffset));
5758 else
5759 emit_insn (arm_gen_load_multiple (0, in_words_to_go, src, TRUE,
5760 FALSE, srcbase, &srcoffset));
5762 if (out_words_to_go)
5764 if (out_words_to_go > 4)
5765 emit_insn (arm_gen_store_multiple (0, 4, dst, TRUE, TRUE,
5766 dstbase, &dstoffset));
5767 else if (out_words_to_go != 1)
5768 emit_insn (arm_gen_store_multiple (0, out_words_to_go,
5769 dst, TRUE,
5770 (last_bytes == 0
5771 ? FALSE : TRUE),
5772 dstbase, &dstoffset));
5773 else
5775 mem = adjust_automodify_address (dstbase, SImode, dst, dstoffset);
5776 emit_move_insn (mem, gen_rtx_REG (SImode, 0));
5777 if (last_bytes != 0)
5779 emit_insn (gen_addsi3 (dst, dst, GEN_INT (4)));
5780 dstoffset += 4;
5785 in_words_to_go -= in_words_to_go < 4 ? in_words_to_go : 4;
5786 out_words_to_go -= out_words_to_go < 4 ? out_words_to_go : 4;
5789 /* OUT_WORDS_TO_GO will be zero here if there are byte stores to do. */
5790 if (out_words_to_go)
5792 rtx sreg;
5794 mem = adjust_automodify_address (srcbase, SImode, src, srcoffset);
5795 sreg = copy_to_reg (mem);
5797 mem = adjust_automodify_address (dstbase, SImode, dst, dstoffset);
5798 emit_move_insn (mem, sreg);
5799 in_words_to_go--;
5801 if (in_words_to_go) /* Sanity check */
5802 abort ();
5805 if (in_words_to_go)
5807 if (in_words_to_go < 0)
5808 abort ();
5810 mem = adjust_automodify_address (srcbase, SImode, src, srcoffset);
5811 part_bytes_reg = copy_to_mode_reg (SImode, mem);
5814 if (last_bytes && part_bytes_reg == NULL)
5815 abort ();
5817 if (BYTES_BIG_ENDIAN && last_bytes)
5819 rtx tmp = gen_reg_rtx (SImode);
5821 /* The bytes we want are in the top end of the word. */
5822 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg,
5823 GEN_INT (8 * (4 - last_bytes))));
5824 part_bytes_reg = tmp;
5826 while (last_bytes)
5828 mem = adjust_automodify_address (dstbase, QImode,
5829 plus_constant (dst, last_bytes - 1),
5830 dstoffset + last_bytes - 1);
5831 emit_move_insn (mem, gen_lowpart (QImode, part_bytes_reg));
5833 if (--last_bytes)
5835 tmp = gen_reg_rtx (SImode);
5836 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (8)));
5837 part_bytes_reg = tmp;
5842 else
5844 if (last_bytes > 1)
5846 mem = adjust_automodify_address (dstbase, HImode, dst, dstoffset);
5847 emit_move_insn (mem, gen_lowpart (HImode, part_bytes_reg));
5848 last_bytes -= 2;
5849 if (last_bytes)
5851 rtx tmp = gen_reg_rtx (SImode);
5852 emit_insn (gen_addsi3 (dst, dst, const2_rtx));
5853 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (16)));
5854 part_bytes_reg = tmp;
5855 dstoffset += 2;
5859 if (last_bytes)
5861 mem = adjust_automodify_address (dstbase, QImode, dst, dstoffset);
5862 emit_move_insn (mem, gen_lowpart (QImode, part_bytes_reg));
5866 return 1;
5869 /* Generate a memory reference for a half word, such that it will be loaded
5870 into the top 16 bits of the word. We can assume that the address is
5871 known to be alignable and of the form reg, or plus (reg, const). */
5874 arm_gen_rotated_half_load (rtx memref)
5876 HOST_WIDE_INT offset = 0;
5877 rtx base = XEXP (memref, 0);
5879 if (GET_CODE (base) == PLUS)
5881 offset = INTVAL (XEXP (base, 1));
5882 base = XEXP (base, 0);
5885 /* If we aren't allowed to generate unaligned addresses, then fail. */
5886 if ((BYTES_BIG_ENDIAN ? 1 : 0) ^ ((offset & 2) == 0))
5887 return NULL;
5889 base = gen_rtx_MEM (SImode, plus_constant (base, offset & ~2));
5891 if ((BYTES_BIG_ENDIAN ? 1 : 0) ^ ((offset & 2) == 2))
5892 return base;
5894 return gen_rtx_ROTATE (SImode, base, GEN_INT (16));
5897 /* Select a dominance comparison mode if possible for a test of the general
5898 form (OP (COND_OR (X) (Y)) (const_int 0)). We support three forms.
5899 COND_OR == DOM_CC_X_AND_Y => (X && Y)
5900 COND_OR == DOM_CC_NX_OR_Y => ((! X) || Y)
5901 COND_OR == DOM_CC_X_OR_Y => (X || Y)
5902 In all cases OP will be either EQ or NE, but we don't need to know which
5903 here. If we are unable to support a dominance comparison we return
5904 CC mode. This will then fail to match for the RTL expressions that
5905 generate this call. */
5906 enum machine_mode
5907 arm_select_dominance_cc_mode (rtx x, rtx y, HOST_WIDE_INT cond_or)
5909 enum rtx_code cond1, cond2;
5910 int swapped = 0;
5912 /* Currently we will probably get the wrong result if the individual
5913 comparisons are not simple. This also ensures that it is safe to
5914 reverse a comparison if necessary. */
5915 if ((arm_select_cc_mode (cond1 = GET_CODE (x), XEXP (x, 0), XEXP (x, 1))
5916 != CCmode)
5917 || (arm_select_cc_mode (cond2 = GET_CODE (y), XEXP (y, 0), XEXP (y, 1))
5918 != CCmode))
5919 return CCmode;
5921 /* The if_then_else variant of this tests the second condition if the
5922 first passes, but is true if the first fails. Reverse the first
5923 condition to get a true "inclusive-or" expression. */
5924 if (cond_or == DOM_CC_NX_OR_Y)
5925 cond1 = reverse_condition (cond1);
5927 /* If the comparisons are not equal, and one doesn't dominate the other,
5928 then we can't do this. */
5929 if (cond1 != cond2
5930 && !comparison_dominates_p (cond1, cond2)
5931 && (swapped = 1, !comparison_dominates_p (cond2, cond1)))
5932 return CCmode;
5934 if (swapped)
5936 enum rtx_code temp = cond1;
5937 cond1 = cond2;
5938 cond2 = temp;
5941 switch (cond1)
5943 case EQ:
5944 if (cond2 == EQ || cond_or == DOM_CC_X_AND_Y)
5945 return CC_DEQmode;
5947 switch (cond2)
5949 case LE: return CC_DLEmode;
5950 case LEU: return CC_DLEUmode;
5951 case GE: return CC_DGEmode;
5952 case GEU: return CC_DGEUmode;
5953 default: break;
5956 break;
5958 case LT:
5959 if (cond2 == LT || cond_or == DOM_CC_X_AND_Y)
5960 return CC_DLTmode;
5961 if (cond2 == LE)
5962 return CC_DLEmode;
5963 if (cond2 == NE)
5964 return CC_DNEmode;
5965 break;
5967 case GT:
5968 if (cond2 == GT || cond_or == DOM_CC_X_AND_Y)
5969 return CC_DGTmode;
5970 if (cond2 == GE)
5971 return CC_DGEmode;
5972 if (cond2 == NE)
5973 return CC_DNEmode;
5974 break;
5976 case LTU:
5977 if (cond2 == LTU || cond_or == DOM_CC_X_AND_Y)
5978 return CC_DLTUmode;
5979 if (cond2 == LEU)
5980 return CC_DLEUmode;
5981 if (cond2 == NE)
5982 return CC_DNEmode;
5983 break;
5985 case GTU:
5986 if (cond2 == GTU || cond_or == DOM_CC_X_AND_Y)
5987 return CC_DGTUmode;
5988 if (cond2 == GEU)
5989 return CC_DGEUmode;
5990 if (cond2 == NE)
5991 return CC_DNEmode;
5992 break;
5994 /* The remaining cases only occur when both comparisons are the
5995 same. */
5996 case NE:
5997 return CC_DNEmode;
5999 case LE:
6000 return CC_DLEmode;
6002 case GE:
6003 return CC_DGEmode;
6005 case LEU:
6006 return CC_DLEUmode;
6008 case GEU:
6009 return CC_DGEUmode;
6011 default:
6012 break;
6015 abort ();
6018 enum machine_mode
6019 arm_select_cc_mode (enum rtx_code op, rtx x, rtx y)
6021 /* All floating point compares return CCFP if it is an equality
6022 comparison, and CCFPE otherwise. */
6023 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
6025 switch (op)
6027 case EQ:
6028 case NE:
6029 case UNORDERED:
6030 case ORDERED:
6031 case UNLT:
6032 case UNLE:
6033 case UNGT:
6034 case UNGE:
6035 case UNEQ:
6036 case LTGT:
6037 return CCFPmode;
6039 case LT:
6040 case LE:
6041 case GT:
6042 case GE:
6043 if (TARGET_HARD_FLOAT && TARGET_MAVERICK)
6044 return CCFPmode;
6045 return CCFPEmode;
6047 default:
6048 abort ();
6052 /* A compare with a shifted operand. Because of canonicalization, the
6053 comparison will have to be swapped when we emit the assembler. */
6054 if (GET_MODE (y) == SImode && GET_CODE (y) == REG
6055 && (GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
6056 || GET_CODE (x) == LSHIFTRT || GET_CODE (x) == ROTATE
6057 || GET_CODE (x) == ROTATERT))
6058 return CC_SWPmode;
6060 /* This is a special case that is used by combine to allow a
6061 comparison of a shifted byte load to be split into a zero-extend
6062 followed by a comparison of the shifted integer (only valid for
6063 equalities and unsigned inequalities). */
6064 if (GET_MODE (x) == SImode
6065 && GET_CODE (x) == ASHIFT
6066 && GET_CODE (XEXP (x, 1)) == CONST_INT && INTVAL (XEXP (x, 1)) == 24
6067 && GET_CODE (XEXP (x, 0)) == SUBREG
6068 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == MEM
6069 && GET_MODE (SUBREG_REG (XEXP (x, 0))) == QImode
6070 && (op == EQ || op == NE
6071 || op == GEU || op == GTU || op == LTU || op == LEU)
6072 && GET_CODE (y) == CONST_INT)
6073 return CC_Zmode;
6075 /* A construct for a conditional compare, if the false arm contains
6076 0, then both conditions must be true, otherwise either condition
6077 must be true. Not all conditions are possible, so CCmode is
6078 returned if it can't be done. */
6079 if (GET_CODE (x) == IF_THEN_ELSE
6080 && (XEXP (x, 2) == const0_rtx
6081 || XEXP (x, 2) == const1_rtx)
6082 && COMPARISON_P (XEXP (x, 0))
6083 && COMPARISON_P (XEXP (x, 1)))
6084 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
6085 INTVAL (XEXP (x, 2)));
6087 /* Alternate canonicalizations of the above. These are somewhat cleaner. */
6088 if (GET_CODE (x) == AND
6089 && COMPARISON_P (XEXP (x, 0))
6090 && COMPARISON_P (XEXP (x, 1)))
6091 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
6092 DOM_CC_X_AND_Y);
6094 if (GET_CODE (x) == IOR
6095 && COMPARISON_P (XEXP (x, 0))
6096 && COMPARISON_P (XEXP (x, 1)))
6097 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
6098 DOM_CC_X_OR_Y);
6100 /* An operation (on Thumb) where we want to test for a single bit.
6101 This is done by shifting that bit up into the top bit of a
6102 scratch register; we can then branch on the sign bit. */
6103 if (TARGET_THUMB
6104 && GET_MODE (x) == SImode
6105 && (op == EQ || op == NE)
6106 && (GET_CODE (x) == ZERO_EXTRACT))
6107 return CC_Nmode;
6109 /* An operation that sets the condition codes as a side-effect, the
6110 V flag is not set correctly, so we can only use comparisons where
6111 this doesn't matter. (For LT and GE we can use "mi" and "pl"
6112 instead.) */
6113 if (GET_MODE (x) == SImode
6114 && y == const0_rtx
6115 && (op == EQ || op == NE || op == LT || op == GE)
6116 && (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
6117 || GET_CODE (x) == AND || GET_CODE (x) == IOR
6118 || GET_CODE (x) == XOR || GET_CODE (x) == MULT
6119 || GET_CODE (x) == NOT || GET_CODE (x) == NEG
6120 || GET_CODE (x) == LSHIFTRT
6121 || GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
6122 || GET_CODE (x) == ROTATERT
6123 || (TARGET_ARM && GET_CODE (x) == ZERO_EXTRACT)))
6124 return CC_NOOVmode;
6126 if (GET_MODE (x) == QImode && (op == EQ || op == NE))
6127 return CC_Zmode;
6129 if (GET_MODE (x) == SImode && (op == LTU || op == GEU)
6130 && GET_CODE (x) == PLUS
6131 && (rtx_equal_p (XEXP (x, 0), y) || rtx_equal_p (XEXP (x, 1), y)))
6132 return CC_Cmode;
6134 return CCmode;
6137 /* X and Y are two things to compare using CODE. Emit the compare insn and
6138 return the rtx for register 0 in the proper mode. FP means this is a
6139 floating point compare: I don't think that it is needed on the arm. */
6141 arm_gen_compare_reg (enum rtx_code code, rtx x, rtx y)
6143 enum machine_mode mode = SELECT_CC_MODE (code, x, y);
6144 rtx cc_reg = gen_rtx_REG (mode, CC_REGNUM);
6146 emit_insn (gen_rtx_SET (VOIDmode, cc_reg,
6147 gen_rtx_COMPARE (mode, x, y)));
6149 return cc_reg;
6152 /* Generate a sequence of insns that will generate the correct return
6153 address mask depending on the physical architecture that the program
6154 is running on. */
6156 arm_gen_return_addr_mask (void)
6158 rtx reg = gen_reg_rtx (Pmode);
6160 emit_insn (gen_return_addr_mask (reg));
6161 return reg;
6164 void
6165 arm_reload_in_hi (rtx *operands)
6167 rtx ref = operands[1];
6168 rtx base, scratch;
6169 HOST_WIDE_INT offset = 0;
6171 if (GET_CODE (ref) == SUBREG)
6173 offset = SUBREG_BYTE (ref);
6174 ref = SUBREG_REG (ref);
6177 if (GET_CODE (ref) == REG)
6179 /* We have a pseudo which has been spilt onto the stack; there
6180 are two cases here: the first where there is a simple
6181 stack-slot replacement and a second where the stack-slot is
6182 out of range, or is used as a subreg. */
6183 if (reg_equiv_mem[REGNO (ref)])
6185 ref = reg_equiv_mem[REGNO (ref)];
6186 base = find_replacement (&XEXP (ref, 0));
6188 else
6189 /* The slot is out of range, or was dressed up in a SUBREG. */
6190 base = reg_equiv_address[REGNO (ref)];
6192 else
6193 base = find_replacement (&XEXP (ref, 0));
6195 /* Handle the case where the address is too complex to be offset by 1. */
6196 if (GET_CODE (base) == MINUS
6197 || (GET_CODE (base) == PLUS && GET_CODE (XEXP (base, 1)) != CONST_INT))
6199 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6201 emit_insn (gen_rtx_SET (VOIDmode, base_plus, base));
6202 base = base_plus;
6204 else if (GET_CODE (base) == PLUS)
6206 /* The addend must be CONST_INT, or we would have dealt with it above. */
6207 HOST_WIDE_INT hi, lo;
6209 offset += INTVAL (XEXP (base, 1));
6210 base = XEXP (base, 0);
6212 /* Rework the address into a legal sequence of insns. */
6213 /* Valid range for lo is -4095 -> 4095 */
6214 lo = (offset >= 0
6215 ? (offset & 0xfff)
6216 : -((-offset) & 0xfff));
6218 /* Corner case, if lo is the max offset then we would be out of range
6219 once we have added the additional 1 below, so bump the msb into the
6220 pre-loading insn(s). */
6221 if (lo == 4095)
6222 lo &= 0x7ff;
6224 hi = ((((offset - lo) & (HOST_WIDE_INT) 0xffffffff)
6225 ^ (HOST_WIDE_INT) 0x80000000)
6226 - (HOST_WIDE_INT) 0x80000000);
6228 if (hi + lo != offset)
6229 abort ();
6231 if (hi != 0)
6233 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6235 /* Get the base address; addsi3 knows how to handle constants
6236 that require more than one insn. */
6237 emit_insn (gen_addsi3 (base_plus, base, GEN_INT (hi)));
6238 base = base_plus;
6239 offset = lo;
6243 /* Operands[2] may overlap operands[0] (though it won't overlap
6244 operands[1]), that's why we asked for a DImode reg -- so we can
6245 use the bit that does not overlap. */
6246 if (REGNO (operands[2]) == REGNO (operands[0]))
6247 scratch = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6248 else
6249 scratch = gen_rtx_REG (SImode, REGNO (operands[2]));
6251 emit_insn (gen_zero_extendqisi2 (scratch,
6252 gen_rtx_MEM (QImode,
6253 plus_constant (base,
6254 offset))));
6255 emit_insn (gen_zero_extendqisi2 (gen_rtx_SUBREG (SImode, operands[0], 0),
6256 gen_rtx_MEM (QImode,
6257 plus_constant (base,
6258 offset + 1))));
6259 if (!BYTES_BIG_ENDIAN)
6260 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_SUBREG (SImode, operands[0], 0),
6261 gen_rtx_IOR (SImode,
6262 gen_rtx_ASHIFT
6263 (SImode,
6264 gen_rtx_SUBREG (SImode, operands[0], 0),
6265 GEN_INT (8)),
6266 scratch)));
6267 else
6268 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_SUBREG (SImode, operands[0], 0),
6269 gen_rtx_IOR (SImode,
6270 gen_rtx_ASHIFT (SImode, scratch,
6271 GEN_INT (8)),
6272 gen_rtx_SUBREG (SImode, operands[0],
6273 0))));
6276 /* Handle storing a half-word to memory during reload by synthesizing as two
6277 byte stores. Take care not to clobber the input values until after we
6278 have moved them somewhere safe. This code assumes that if the DImode
6279 scratch in operands[2] overlaps either the input value or output address
6280 in some way, then that value must die in this insn (we absolutely need
6281 two scratch registers for some corner cases). */
6282 void
6283 arm_reload_out_hi (rtx *operands)
6285 rtx ref = operands[0];
6286 rtx outval = operands[1];
6287 rtx base, scratch;
6288 HOST_WIDE_INT offset = 0;
6290 if (GET_CODE (ref) == SUBREG)
6292 offset = SUBREG_BYTE (ref);
6293 ref = SUBREG_REG (ref);
6296 if (GET_CODE (ref) == REG)
6298 /* We have a pseudo which has been spilt onto the stack; there
6299 are two cases here: the first where there is a simple
6300 stack-slot replacement and a second where the stack-slot is
6301 out of range, or is used as a subreg. */
6302 if (reg_equiv_mem[REGNO (ref)])
6304 ref = reg_equiv_mem[REGNO (ref)];
6305 base = find_replacement (&XEXP (ref, 0));
6307 else
6308 /* The slot is out of range, or was dressed up in a SUBREG. */
6309 base = reg_equiv_address[REGNO (ref)];
6311 else
6312 base = find_replacement (&XEXP (ref, 0));
6314 scratch = gen_rtx_REG (SImode, REGNO (operands[2]));
6316 /* Handle the case where the address is too complex to be offset by 1. */
6317 if (GET_CODE (base) == MINUS
6318 || (GET_CODE (base) == PLUS && GET_CODE (XEXP (base, 1)) != CONST_INT))
6320 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6322 /* Be careful not to destroy OUTVAL. */
6323 if (reg_overlap_mentioned_p (base_plus, outval))
6325 /* Updating base_plus might destroy outval, see if we can
6326 swap the scratch and base_plus. */
6327 if (!reg_overlap_mentioned_p (scratch, outval))
6329 rtx tmp = scratch;
6330 scratch = base_plus;
6331 base_plus = tmp;
6333 else
6335 rtx scratch_hi = gen_rtx_REG (HImode, REGNO (operands[2]));
6337 /* Be conservative and copy OUTVAL into the scratch now,
6338 this should only be necessary if outval is a subreg
6339 of something larger than a word. */
6340 /* XXX Might this clobber base? I can't see how it can,
6341 since scratch is known to overlap with OUTVAL, and
6342 must be wider than a word. */
6343 emit_insn (gen_movhi (scratch_hi, outval));
6344 outval = scratch_hi;
6348 emit_insn (gen_rtx_SET (VOIDmode, base_plus, base));
6349 base = base_plus;
6351 else if (GET_CODE (base) == PLUS)
6353 /* The addend must be CONST_INT, or we would have dealt with it above. */
6354 HOST_WIDE_INT hi, lo;
6356 offset += INTVAL (XEXP (base, 1));
6357 base = XEXP (base, 0);
6359 /* Rework the address into a legal sequence of insns. */
6360 /* Valid range for lo is -4095 -> 4095 */
6361 lo = (offset >= 0
6362 ? (offset & 0xfff)
6363 : -((-offset) & 0xfff));
6365 /* Corner case, if lo is the max offset then we would be out of range
6366 once we have added the additional 1 below, so bump the msb into the
6367 pre-loading insn(s). */
6368 if (lo == 4095)
6369 lo &= 0x7ff;
6371 hi = ((((offset - lo) & (HOST_WIDE_INT) 0xffffffff)
6372 ^ (HOST_WIDE_INT) 0x80000000)
6373 - (HOST_WIDE_INT) 0x80000000);
6375 if (hi + lo != offset)
6376 abort ();
6378 if (hi != 0)
6380 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6382 /* Be careful not to destroy OUTVAL. */
6383 if (reg_overlap_mentioned_p (base_plus, outval))
6385 /* Updating base_plus might destroy outval, see if we
6386 can swap the scratch and base_plus. */
6387 if (!reg_overlap_mentioned_p (scratch, outval))
6389 rtx tmp = scratch;
6390 scratch = base_plus;
6391 base_plus = tmp;
6393 else
6395 rtx scratch_hi = gen_rtx_REG (HImode, REGNO (operands[2]));
6397 /* Be conservative and copy outval into scratch now,
6398 this should only be necessary if outval is a
6399 subreg of something larger than a word. */
6400 /* XXX Might this clobber base? I can't see how it
6401 can, since scratch is known to overlap with
6402 outval. */
6403 emit_insn (gen_movhi (scratch_hi, outval));
6404 outval = scratch_hi;
6408 /* Get the base address; addsi3 knows how to handle constants
6409 that require more than one insn. */
6410 emit_insn (gen_addsi3 (base_plus, base, GEN_INT (hi)));
6411 base = base_plus;
6412 offset = lo;
6416 if (BYTES_BIG_ENDIAN)
6418 emit_insn (gen_movqi (gen_rtx_MEM (QImode,
6419 plus_constant (base, offset + 1)),
6420 gen_lowpart (QImode, outval)));
6421 emit_insn (gen_lshrsi3 (scratch,
6422 gen_rtx_SUBREG (SImode, outval, 0),
6423 GEN_INT (8)));
6424 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (base, offset)),
6425 gen_lowpart (QImode, scratch)));
6427 else
6429 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (base, offset)),
6430 gen_lowpart (QImode, outval)));
6431 emit_insn (gen_lshrsi3 (scratch,
6432 gen_rtx_SUBREG (SImode, outval, 0),
6433 GEN_INT (8)));
6434 emit_insn (gen_movqi (gen_rtx_MEM (QImode,
6435 plus_constant (base, offset + 1)),
6436 gen_lowpart (QImode, scratch)));
6440 /* Print a symbolic form of X to the debug file, F. */
6441 static void
6442 arm_print_value (FILE *f, rtx x)
6444 switch (GET_CODE (x))
6446 case CONST_INT:
6447 fprintf (f, HOST_WIDE_INT_PRINT_HEX, INTVAL (x));
6448 return;
6450 case CONST_DOUBLE:
6451 fprintf (f, "<0x%lx,0x%lx>", (long)XWINT (x, 2), (long)XWINT (x, 3));
6452 return;
6454 case CONST_VECTOR:
6456 int i;
6458 fprintf (f, "<");
6459 for (i = 0; i < CONST_VECTOR_NUNITS (x); i++)
6461 fprintf (f, HOST_WIDE_INT_PRINT_HEX, INTVAL (CONST_VECTOR_ELT (x, i)));
6462 if (i < (CONST_VECTOR_NUNITS (x) - 1))
6463 fputc (',', f);
6465 fprintf (f, ">");
6467 return;
6469 case CONST_STRING:
6470 fprintf (f, "\"%s\"", XSTR (x, 0));
6471 return;
6473 case SYMBOL_REF:
6474 fprintf (f, "`%s'", XSTR (x, 0));
6475 return;
6477 case LABEL_REF:
6478 fprintf (f, "L%d", INSN_UID (XEXP (x, 0)));
6479 return;
6481 case CONST:
6482 arm_print_value (f, XEXP (x, 0));
6483 return;
6485 case PLUS:
6486 arm_print_value (f, XEXP (x, 0));
6487 fprintf (f, "+");
6488 arm_print_value (f, XEXP (x, 1));
6489 return;
6491 case PC:
6492 fprintf (f, "pc");
6493 return;
6495 default:
6496 fprintf (f, "????");
6497 return;
6501 /* Routines for manipulation of the constant pool. */
6503 /* Arm instructions cannot load a large constant directly into a
6504 register; they have to come from a pc relative load. The constant
6505 must therefore be placed in the addressable range of the pc
6506 relative load. Depending on the precise pc relative load
6507 instruction the range is somewhere between 256 bytes and 4k. This
6508 means that we often have to dump a constant inside a function, and
6509 generate code to branch around it.
6511 It is important to minimize this, since the branches will slow
6512 things down and make the code larger.
6514 Normally we can hide the table after an existing unconditional
6515 branch so that there is no interruption of the flow, but in the
6516 worst case the code looks like this:
6518 ldr rn, L1
6520 b L2
6521 align
6522 L1: .long value
6526 ldr rn, L3
6528 b L4
6529 align
6530 L3: .long value
6534 We fix this by performing a scan after scheduling, which notices
6535 which instructions need to have their operands fetched from the
6536 constant table and builds the table.
6538 The algorithm starts by building a table of all the constants that
6539 need fixing up and all the natural barriers in the function (places
6540 where a constant table can be dropped without breaking the flow).
6541 For each fixup we note how far the pc-relative replacement will be
6542 able to reach and the offset of the instruction into the function.
6544 Having built the table we then group the fixes together to form
6545 tables that are as large as possible (subject to addressing
6546 constraints) and emit each table of constants after the last
6547 barrier that is within range of all the instructions in the group.
6548 If a group does not contain a barrier, then we forcibly create one
6549 by inserting a jump instruction into the flow. Once the table has
6550 been inserted, the insns are then modified to reference the
6551 relevant entry in the pool.
6553 Possible enhancements to the algorithm (not implemented) are:
6555 1) For some processors and object formats, there may be benefit in
6556 aligning the pools to the start of cache lines; this alignment
6557 would need to be taken into account when calculating addressability
6558 of a pool. */
6560 /* These typedefs are located at the start of this file, so that
6561 they can be used in the prototypes there. This comment is to
6562 remind readers of that fact so that the following structures
6563 can be understood more easily.
6565 typedef struct minipool_node Mnode;
6566 typedef struct minipool_fixup Mfix; */
6568 struct minipool_node
6570 /* Doubly linked chain of entries. */
6571 Mnode * next;
6572 Mnode * prev;
6573 /* The maximum offset into the code that this entry can be placed. While
6574 pushing fixes for forward references, all entries are sorted in order
6575 of increasing max_address. */
6576 HOST_WIDE_INT max_address;
6577 /* Similarly for an entry inserted for a backwards ref. */
6578 HOST_WIDE_INT min_address;
6579 /* The number of fixes referencing this entry. This can become zero
6580 if we "unpush" an entry. In this case we ignore the entry when we
6581 come to emit the code. */
6582 int refcount;
6583 /* The offset from the start of the minipool. */
6584 HOST_WIDE_INT offset;
6585 /* The value in table. */
6586 rtx value;
6587 /* The mode of value. */
6588 enum machine_mode mode;
6589 /* The size of the value. With iWMMXt enabled
6590 sizes > 4 also imply an alignment of 8-bytes. */
6591 int fix_size;
6594 struct minipool_fixup
6596 Mfix * next;
6597 rtx insn;
6598 HOST_WIDE_INT address;
6599 rtx * loc;
6600 enum machine_mode mode;
6601 int fix_size;
6602 rtx value;
6603 Mnode * minipool;
6604 HOST_WIDE_INT forwards;
6605 HOST_WIDE_INT backwards;
6608 /* Fixes less than a word need padding out to a word boundary. */
6609 #define MINIPOOL_FIX_SIZE(mode) \
6610 (GET_MODE_SIZE ((mode)) >= 4 ? GET_MODE_SIZE ((mode)) : 4)
6612 static Mnode * minipool_vector_head;
6613 static Mnode * minipool_vector_tail;
6614 static rtx minipool_vector_label;
6616 /* The linked list of all minipool fixes required for this function. */
6617 Mfix * minipool_fix_head;
6618 Mfix * minipool_fix_tail;
6619 /* The fix entry for the current minipool, once it has been placed. */
6620 Mfix * minipool_barrier;
6622 /* Determines if INSN is the start of a jump table. Returns the end
6623 of the TABLE or NULL_RTX. */
6624 static rtx
6625 is_jump_table (rtx insn)
6627 rtx table;
6629 if (GET_CODE (insn) == JUMP_INSN
6630 && JUMP_LABEL (insn) != NULL
6631 && ((table = next_real_insn (JUMP_LABEL (insn)))
6632 == next_real_insn (insn))
6633 && table != NULL
6634 && GET_CODE (table) == JUMP_INSN
6635 && (GET_CODE (PATTERN (table)) == ADDR_VEC
6636 || GET_CODE (PATTERN (table)) == ADDR_DIFF_VEC))
6637 return table;
6639 return NULL_RTX;
6642 #ifndef JUMP_TABLES_IN_TEXT_SECTION
6643 #define JUMP_TABLES_IN_TEXT_SECTION 0
6644 #endif
6646 static HOST_WIDE_INT
6647 get_jump_table_size (rtx insn)
6649 /* ADDR_VECs only take room if read-only data does into the text
6650 section. */
6651 if (JUMP_TABLES_IN_TEXT_SECTION
6652 #if !defined(READONLY_DATA_SECTION) && !defined(READONLY_DATA_SECTION_ASM_OP)
6653 || 1
6654 #endif
6657 rtx body = PATTERN (insn);
6658 int elt = GET_CODE (body) == ADDR_DIFF_VEC ? 1 : 0;
6660 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, elt);
6663 return 0;
6666 /* Move a minipool fix MP from its current location to before MAX_MP.
6667 If MAX_MP is NULL, then MP doesn't need moving, but the addressing
6668 constraints may need updating. */
6669 static Mnode *
6670 move_minipool_fix_forward_ref (Mnode *mp, Mnode *max_mp,
6671 HOST_WIDE_INT max_address)
6673 /* This should never be true and the code below assumes these are
6674 different. */
6675 if (mp == max_mp)
6676 abort ();
6678 if (max_mp == NULL)
6680 if (max_address < mp->max_address)
6681 mp->max_address = max_address;
6683 else
6685 if (max_address > max_mp->max_address - mp->fix_size)
6686 mp->max_address = max_mp->max_address - mp->fix_size;
6687 else
6688 mp->max_address = max_address;
6690 /* Unlink MP from its current position. Since max_mp is non-null,
6691 mp->prev must be non-null. */
6692 mp->prev->next = mp->next;
6693 if (mp->next != NULL)
6694 mp->next->prev = mp->prev;
6695 else
6696 minipool_vector_tail = mp->prev;
6698 /* Re-insert it before MAX_MP. */
6699 mp->next = max_mp;
6700 mp->prev = max_mp->prev;
6701 max_mp->prev = mp;
6703 if (mp->prev != NULL)
6704 mp->prev->next = mp;
6705 else
6706 minipool_vector_head = mp;
6709 /* Save the new entry. */
6710 max_mp = mp;
6712 /* Scan over the preceding entries and adjust their addresses as
6713 required. */
6714 while (mp->prev != NULL
6715 && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
6717 mp->prev->max_address = mp->max_address - mp->prev->fix_size;
6718 mp = mp->prev;
6721 return max_mp;
6724 /* Add a constant to the minipool for a forward reference. Returns the
6725 node added or NULL if the constant will not fit in this pool. */
6726 static Mnode *
6727 add_minipool_forward_ref (Mfix *fix)
6729 /* If set, max_mp is the first pool_entry that has a lower
6730 constraint than the one we are trying to add. */
6731 Mnode * max_mp = NULL;
6732 HOST_WIDE_INT max_address = fix->address + fix->forwards;
6733 Mnode * mp;
6735 /* If this fix's address is greater than the address of the first
6736 entry, then we can't put the fix in this pool. We subtract the
6737 size of the current fix to ensure that if the table is fully
6738 packed we still have enough room to insert this value by suffling
6739 the other fixes forwards. */
6740 if (minipool_vector_head &&
6741 fix->address >= minipool_vector_head->max_address - fix->fix_size)
6742 return NULL;
6744 /* Scan the pool to see if a constant with the same value has
6745 already been added. While we are doing this, also note the
6746 location where we must insert the constant if it doesn't already
6747 exist. */
6748 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
6750 if (GET_CODE (fix->value) == GET_CODE (mp->value)
6751 && fix->mode == mp->mode
6752 && (GET_CODE (fix->value) != CODE_LABEL
6753 || (CODE_LABEL_NUMBER (fix->value)
6754 == CODE_LABEL_NUMBER (mp->value)))
6755 && rtx_equal_p (fix->value, mp->value))
6757 /* More than one fix references this entry. */
6758 mp->refcount++;
6759 return move_minipool_fix_forward_ref (mp, max_mp, max_address);
6762 /* Note the insertion point if necessary. */
6763 if (max_mp == NULL
6764 && mp->max_address > max_address)
6765 max_mp = mp;
6767 /* If we are inserting an 8-bytes aligned quantity and
6768 we have not already found an insertion point, then
6769 make sure that all such 8-byte aligned quantities are
6770 placed at the start of the pool. */
6771 if (ARM_DOUBLEWORD_ALIGN
6772 && max_mp == NULL
6773 && fix->fix_size == 8
6774 && mp->fix_size != 8)
6776 max_mp = mp;
6777 max_address = mp->max_address;
6781 /* The value is not currently in the minipool, so we need to create
6782 a new entry for it. If MAX_MP is NULL, the entry will be put on
6783 the end of the list since the placement is less constrained than
6784 any existing entry. Otherwise, we insert the new fix before
6785 MAX_MP and, if necessary, adjust the constraints on the other
6786 entries. */
6787 mp = xmalloc (sizeof (* mp));
6788 mp->fix_size = fix->fix_size;
6789 mp->mode = fix->mode;
6790 mp->value = fix->value;
6791 mp->refcount = 1;
6792 /* Not yet required for a backwards ref. */
6793 mp->min_address = -65536;
6795 if (max_mp == NULL)
6797 mp->max_address = max_address;
6798 mp->next = NULL;
6799 mp->prev = minipool_vector_tail;
6801 if (mp->prev == NULL)
6803 minipool_vector_head = mp;
6804 minipool_vector_label = gen_label_rtx ();
6806 else
6807 mp->prev->next = mp;
6809 minipool_vector_tail = mp;
6811 else
6813 if (max_address > max_mp->max_address - mp->fix_size)
6814 mp->max_address = max_mp->max_address - mp->fix_size;
6815 else
6816 mp->max_address = max_address;
6818 mp->next = max_mp;
6819 mp->prev = max_mp->prev;
6820 max_mp->prev = mp;
6821 if (mp->prev != NULL)
6822 mp->prev->next = mp;
6823 else
6824 minipool_vector_head = mp;
6827 /* Save the new entry. */
6828 max_mp = mp;
6830 /* Scan over the preceding entries and adjust their addresses as
6831 required. */
6832 while (mp->prev != NULL
6833 && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
6835 mp->prev->max_address = mp->max_address - mp->prev->fix_size;
6836 mp = mp->prev;
6839 return max_mp;
6842 static Mnode *
6843 move_minipool_fix_backward_ref (Mnode *mp, Mnode *min_mp,
6844 HOST_WIDE_INT min_address)
6846 HOST_WIDE_INT offset;
6848 /* This should never be true, and the code below assumes these are
6849 different. */
6850 if (mp == min_mp)
6851 abort ();
6853 if (min_mp == NULL)
6855 if (min_address > mp->min_address)
6856 mp->min_address = min_address;
6858 else
6860 /* We will adjust this below if it is too loose. */
6861 mp->min_address = min_address;
6863 /* Unlink MP from its current position. Since min_mp is non-null,
6864 mp->next must be non-null. */
6865 mp->next->prev = mp->prev;
6866 if (mp->prev != NULL)
6867 mp->prev->next = mp->next;
6868 else
6869 minipool_vector_head = mp->next;
6871 /* Reinsert it after MIN_MP. */
6872 mp->prev = min_mp;
6873 mp->next = min_mp->next;
6874 min_mp->next = mp;
6875 if (mp->next != NULL)
6876 mp->next->prev = mp;
6877 else
6878 minipool_vector_tail = mp;
6881 min_mp = mp;
6883 offset = 0;
6884 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
6886 mp->offset = offset;
6887 if (mp->refcount > 0)
6888 offset += mp->fix_size;
6890 if (mp->next && mp->next->min_address < mp->min_address + mp->fix_size)
6891 mp->next->min_address = mp->min_address + mp->fix_size;
6894 return min_mp;
6897 /* Add a constant to the minipool for a backward reference. Returns the
6898 node added or NULL if the constant will not fit in this pool.
6900 Note that the code for insertion for a backwards reference can be
6901 somewhat confusing because the calculated offsets for each fix do
6902 not take into account the size of the pool (which is still under
6903 construction. */
6904 static Mnode *
6905 add_minipool_backward_ref (Mfix *fix)
6907 /* If set, min_mp is the last pool_entry that has a lower constraint
6908 than the one we are trying to add. */
6909 Mnode *min_mp = NULL;
6910 /* This can be negative, since it is only a constraint. */
6911 HOST_WIDE_INT min_address = fix->address - fix->backwards;
6912 Mnode *mp;
6914 /* If we can't reach the current pool from this insn, or if we can't
6915 insert this entry at the end of the pool without pushing other
6916 fixes out of range, then we don't try. This ensures that we
6917 can't fail later on. */
6918 if (min_address >= minipool_barrier->address
6919 || (minipool_vector_tail->min_address + fix->fix_size
6920 >= minipool_barrier->address))
6921 return NULL;
6923 /* Scan the pool to see if a constant with the same value has
6924 already been added. While we are doing this, also note the
6925 location where we must insert the constant if it doesn't already
6926 exist. */
6927 for (mp = minipool_vector_tail; mp != NULL; mp = mp->prev)
6929 if (GET_CODE (fix->value) == GET_CODE (mp->value)
6930 && fix->mode == mp->mode
6931 && (GET_CODE (fix->value) != CODE_LABEL
6932 || (CODE_LABEL_NUMBER (fix->value)
6933 == CODE_LABEL_NUMBER (mp->value)))
6934 && rtx_equal_p (fix->value, mp->value)
6935 /* Check that there is enough slack to move this entry to the
6936 end of the table (this is conservative). */
6937 && (mp->max_address
6938 > (minipool_barrier->address
6939 + minipool_vector_tail->offset
6940 + minipool_vector_tail->fix_size)))
6942 mp->refcount++;
6943 return move_minipool_fix_backward_ref (mp, min_mp, min_address);
6946 if (min_mp != NULL)
6947 mp->min_address += fix->fix_size;
6948 else
6950 /* Note the insertion point if necessary. */
6951 if (mp->min_address < min_address)
6953 /* For now, we do not allow the insertion of 8-byte alignment
6954 requiring nodes anywhere but at the start of the pool. */
6955 if (ARM_DOUBLEWORD_ALIGN
6956 && fix->fix_size == 8 && mp->fix_size != 8)
6957 return NULL;
6958 else
6959 min_mp = mp;
6961 else if (mp->max_address
6962 < minipool_barrier->address + mp->offset + fix->fix_size)
6964 /* Inserting before this entry would push the fix beyond
6965 its maximum address (which can happen if we have
6966 re-located a forwards fix); force the new fix to come
6967 after it. */
6968 min_mp = mp;
6969 min_address = mp->min_address + fix->fix_size;
6971 /* If we are inserting an 8-bytes aligned quantity and
6972 we have not already found an insertion point, then
6973 make sure that all such 8-byte aligned quantities are
6974 placed at the start of the pool. */
6975 else if (ARM_DOUBLEWORD_ALIGN
6976 && min_mp == NULL
6977 && fix->fix_size == 8
6978 && mp->fix_size < 8)
6980 min_mp = mp;
6981 min_address = mp->min_address + fix->fix_size;
6986 /* We need to create a new entry. */
6987 mp = xmalloc (sizeof (* mp));
6988 mp->fix_size = fix->fix_size;
6989 mp->mode = fix->mode;
6990 mp->value = fix->value;
6991 mp->refcount = 1;
6992 mp->max_address = minipool_barrier->address + 65536;
6994 mp->min_address = min_address;
6996 if (min_mp == NULL)
6998 mp->prev = NULL;
6999 mp->next = minipool_vector_head;
7001 if (mp->next == NULL)
7003 minipool_vector_tail = mp;
7004 minipool_vector_label = gen_label_rtx ();
7006 else
7007 mp->next->prev = mp;
7009 minipool_vector_head = mp;
7011 else
7013 mp->next = min_mp->next;
7014 mp->prev = min_mp;
7015 min_mp->next = mp;
7017 if (mp->next != NULL)
7018 mp->next->prev = mp;
7019 else
7020 minipool_vector_tail = mp;
7023 /* Save the new entry. */
7024 min_mp = mp;
7026 if (mp->prev)
7027 mp = mp->prev;
7028 else
7029 mp->offset = 0;
7031 /* Scan over the following entries and adjust their offsets. */
7032 while (mp->next != NULL)
7034 if (mp->next->min_address < mp->min_address + mp->fix_size)
7035 mp->next->min_address = mp->min_address + mp->fix_size;
7037 if (mp->refcount)
7038 mp->next->offset = mp->offset + mp->fix_size;
7039 else
7040 mp->next->offset = mp->offset;
7042 mp = mp->next;
7045 return min_mp;
7048 static void
7049 assign_minipool_offsets (Mfix *barrier)
7051 HOST_WIDE_INT offset = 0;
7052 Mnode *mp;
7054 minipool_barrier = barrier;
7056 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7058 mp->offset = offset;
7060 if (mp->refcount > 0)
7061 offset += mp->fix_size;
7065 /* Output the literal table */
7066 static void
7067 dump_minipool (rtx scan)
7069 Mnode * mp;
7070 Mnode * nmp;
7071 int align64 = 0;
7073 if (ARM_DOUBLEWORD_ALIGN)
7074 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7075 if (mp->refcount > 0 && mp->fix_size == 8)
7077 align64 = 1;
7078 break;
7081 if (dump_file)
7082 fprintf (dump_file,
7083 ";; Emitting minipool after insn %u; address %ld; align %d (bytes)\n",
7084 INSN_UID (scan), (unsigned long) minipool_barrier->address, align64 ? 8 : 4);
7086 scan = emit_label_after (gen_label_rtx (), scan);
7087 scan = emit_insn_after (align64 ? gen_align_8 () : gen_align_4 (), scan);
7088 scan = emit_label_after (minipool_vector_label, scan);
7090 for (mp = minipool_vector_head; mp != NULL; mp = nmp)
7092 if (mp->refcount > 0)
7094 if (dump_file)
7096 fprintf (dump_file,
7097 ";; Offset %u, min %ld, max %ld ",
7098 (unsigned) mp->offset, (unsigned long) mp->min_address,
7099 (unsigned long) mp->max_address);
7100 arm_print_value (dump_file, mp->value);
7101 fputc ('\n', dump_file);
7104 switch (mp->fix_size)
7106 #ifdef HAVE_consttable_1
7107 case 1:
7108 scan = emit_insn_after (gen_consttable_1 (mp->value), scan);
7109 break;
7111 #endif
7112 #ifdef HAVE_consttable_2
7113 case 2:
7114 scan = emit_insn_after (gen_consttable_2 (mp->value), scan);
7115 break;
7117 #endif
7118 #ifdef HAVE_consttable_4
7119 case 4:
7120 scan = emit_insn_after (gen_consttable_4 (mp->value), scan);
7121 break;
7123 #endif
7124 #ifdef HAVE_consttable_8
7125 case 8:
7126 scan = emit_insn_after (gen_consttable_8 (mp->value), scan);
7127 break;
7129 #endif
7130 default:
7131 abort ();
7132 break;
7136 nmp = mp->next;
7137 free (mp);
7140 minipool_vector_head = minipool_vector_tail = NULL;
7141 scan = emit_insn_after (gen_consttable_end (), scan);
7142 scan = emit_barrier_after (scan);
7145 /* Return the cost of forcibly inserting a barrier after INSN. */
7146 static int
7147 arm_barrier_cost (rtx insn)
7149 /* Basing the location of the pool on the loop depth is preferable,
7150 but at the moment, the basic block information seems to be
7151 corrupt by this stage of the compilation. */
7152 int base_cost = 50;
7153 rtx next = next_nonnote_insn (insn);
7155 if (next != NULL && GET_CODE (next) == CODE_LABEL)
7156 base_cost -= 20;
7158 switch (GET_CODE (insn))
7160 case CODE_LABEL:
7161 /* It will always be better to place the table before the label, rather
7162 than after it. */
7163 return 50;
7165 case INSN:
7166 case CALL_INSN:
7167 return base_cost;
7169 case JUMP_INSN:
7170 return base_cost - 10;
7172 default:
7173 return base_cost + 10;
7177 /* Find the best place in the insn stream in the range
7178 (FIX->address,MAX_ADDRESS) to forcibly insert a minipool barrier.
7179 Create the barrier by inserting a jump and add a new fix entry for
7180 it. */
7181 static Mfix *
7182 create_fix_barrier (Mfix *fix, HOST_WIDE_INT max_address)
7184 HOST_WIDE_INT count = 0;
7185 rtx barrier;
7186 rtx from = fix->insn;
7187 rtx selected = from;
7188 int selected_cost;
7189 HOST_WIDE_INT selected_address;
7190 Mfix * new_fix;
7191 HOST_WIDE_INT max_count = max_address - fix->address;
7192 rtx label = gen_label_rtx ();
7194 selected_cost = arm_barrier_cost (from);
7195 selected_address = fix->address;
7197 while (from && count < max_count)
7199 rtx tmp;
7200 int new_cost;
7202 /* This code shouldn't have been called if there was a natural barrier
7203 within range. */
7204 if (GET_CODE (from) == BARRIER)
7205 abort ();
7207 /* Count the length of this insn. */
7208 count += get_attr_length (from);
7210 /* If there is a jump table, add its length. */
7211 tmp = is_jump_table (from);
7212 if (tmp != NULL)
7214 count += get_jump_table_size (tmp);
7216 /* Jump tables aren't in a basic block, so base the cost on
7217 the dispatch insn. If we select this location, we will
7218 still put the pool after the table. */
7219 new_cost = arm_barrier_cost (from);
7221 if (count < max_count && new_cost <= selected_cost)
7223 selected = tmp;
7224 selected_cost = new_cost;
7225 selected_address = fix->address + count;
7228 /* Continue after the dispatch table. */
7229 from = NEXT_INSN (tmp);
7230 continue;
7233 new_cost = arm_barrier_cost (from);
7235 if (count < max_count && new_cost <= selected_cost)
7237 selected = from;
7238 selected_cost = new_cost;
7239 selected_address = fix->address + count;
7242 from = NEXT_INSN (from);
7245 /* Create a new JUMP_INSN that branches around a barrier. */
7246 from = emit_jump_insn_after (gen_jump (label), selected);
7247 JUMP_LABEL (from) = label;
7248 barrier = emit_barrier_after (from);
7249 emit_label_after (label, barrier);
7251 /* Create a minipool barrier entry for the new barrier. */
7252 new_fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* new_fix));
7253 new_fix->insn = barrier;
7254 new_fix->address = selected_address;
7255 new_fix->next = fix->next;
7256 fix->next = new_fix;
7258 return new_fix;
7261 /* Record that there is a natural barrier in the insn stream at
7262 ADDRESS. */
7263 static void
7264 push_minipool_barrier (rtx insn, HOST_WIDE_INT address)
7266 Mfix * fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* fix));
7268 fix->insn = insn;
7269 fix->address = address;
7271 fix->next = NULL;
7272 if (minipool_fix_head != NULL)
7273 minipool_fix_tail->next = fix;
7274 else
7275 minipool_fix_head = fix;
7277 minipool_fix_tail = fix;
7280 /* Record INSN, which will need fixing up to load a value from the
7281 minipool. ADDRESS is the offset of the insn since the start of the
7282 function; LOC is a pointer to the part of the insn which requires
7283 fixing; VALUE is the constant that must be loaded, which is of type
7284 MODE. */
7285 static void
7286 push_minipool_fix (rtx insn, HOST_WIDE_INT address, rtx *loc,
7287 enum machine_mode mode, rtx value)
7289 Mfix * fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* fix));
7291 #ifdef AOF_ASSEMBLER
7292 /* PIC symbol references need to be converted into offsets into the
7293 based area. */
7294 /* XXX This shouldn't be done here. */
7295 if (flag_pic && GET_CODE (value) == SYMBOL_REF)
7296 value = aof_pic_entry (value);
7297 #endif /* AOF_ASSEMBLER */
7299 fix->insn = insn;
7300 fix->address = address;
7301 fix->loc = loc;
7302 fix->mode = mode;
7303 fix->fix_size = MINIPOOL_FIX_SIZE (mode);
7304 fix->value = value;
7305 fix->forwards = get_attr_pool_range (insn);
7306 fix->backwards = get_attr_neg_pool_range (insn);
7307 fix->minipool = NULL;
7309 /* If an insn doesn't have a range defined for it, then it isn't
7310 expecting to be reworked by this code. Better to abort now than
7311 to generate duff assembly code. */
7312 if (fix->forwards == 0 && fix->backwards == 0)
7313 abort ();
7315 /* With AAPCS/iWMMXt enabled, the pool is aligned to an 8-byte boundary.
7316 So there might be an empty word before the start of the pool.
7317 Hence we reduce the forward range by 4 to allow for this
7318 possibility. */
7319 if (ARM_DOUBLEWORD_ALIGN && fix->fix_size == 8)
7320 fix->forwards -= 4;
7322 if (dump_file)
7324 fprintf (dump_file,
7325 ";; %smode fixup for i%d; addr %lu, range (%ld,%ld): ",
7326 GET_MODE_NAME (mode),
7327 INSN_UID (insn), (unsigned long) address,
7328 -1 * (long)fix->backwards, (long)fix->forwards);
7329 arm_print_value (dump_file, fix->value);
7330 fprintf (dump_file, "\n");
7333 /* Add it to the chain of fixes. */
7334 fix->next = NULL;
7336 if (minipool_fix_head != NULL)
7337 minipool_fix_tail->next = fix;
7338 else
7339 minipool_fix_head = fix;
7341 minipool_fix_tail = fix;
7344 /* Return the cost of synthesizing the const_double VAL inline.
7345 Returns the number of insns needed, or 99 if we don't know how to
7346 do it. */
7348 arm_const_double_inline_cost (rtx val)
7350 long parts[2];
7352 if (GET_MODE (val) == DFmode)
7354 REAL_VALUE_TYPE r;
7355 if (!TARGET_SOFT_FLOAT)
7356 return 99;
7357 REAL_VALUE_FROM_CONST_DOUBLE (r, val);
7358 REAL_VALUE_TO_TARGET_DOUBLE (r, parts);
7360 else if (GET_MODE (val) != VOIDmode)
7361 return 99;
7362 else
7364 parts[0] = CONST_DOUBLE_LOW (val);
7365 parts[1] = CONST_DOUBLE_HIGH (val);
7368 return (arm_gen_constant (SET, SImode, NULL_RTX, parts[0],
7369 NULL_RTX, NULL_RTX, 0, 0)
7370 + arm_gen_constant (SET, SImode, NULL_RTX, parts[1],
7371 NULL_RTX, NULL_RTX, 0, 0));
7374 /* Determine if a CONST_DOUBLE should be pushed to the minipool */
7375 static bool
7376 const_double_needs_minipool (rtx val)
7378 /* thumb only knows to load a CONST_DOUBLE from memory at the moment */
7379 if (TARGET_THUMB)
7380 return true;
7382 /* Don't push anything to the minipool if a CONST_DOUBLE can be built with
7383 a few ALU insns directly. On balance, the optimum is likely to be around
7384 3 insns, except when there are no load delay slots where it should be 4.
7385 When optimizing for size, a limit of 3 allows saving at least one word
7386 except for cases where a single minipool entry could be shared more than
7387 2 times which is rather unlikely to outweight the overall savings. */
7388 return (arm_const_double_inline_cost (val)
7389 > ((optimize_size || arm_ld_sched) ? 3 : 4));
7392 /* Scan INSN and note any of its operands that need fixing.
7393 If DO_PUSHES is false we do not actually push any of the fixups
7394 needed. The function returns TRUE is any fixups were needed/pushed.
7395 This is used by arm_memory_load_p() which needs to know about loads
7396 of constants that will be converted into minipool loads. */
7397 static bool
7398 note_invalid_constants (rtx insn, HOST_WIDE_INT address, int do_pushes)
7400 bool result = false;
7401 int opno;
7403 extract_insn (insn);
7405 if (!constrain_operands (1))
7406 fatal_insn_not_found (insn);
7408 if (recog_data.n_alternatives == 0)
7409 return false;
7411 /* Fill in recog_op_alt with information about the constraints of this insn. */
7412 preprocess_constraints ();
7414 for (opno = 0; opno < recog_data.n_operands; opno++)
7416 /* Things we need to fix can only occur in inputs. */
7417 if (recog_data.operand_type[opno] != OP_IN)
7418 continue;
7420 /* If this alternative is a memory reference, then any mention
7421 of constants in this alternative is really to fool reload
7422 into allowing us to accept one there. We need to fix them up
7423 now so that we output the right code. */
7424 if (recog_op_alt[opno][which_alternative].memory_ok)
7426 rtx op = recog_data.operand[opno];
7428 if (CONSTANT_P (op)
7429 && (GET_CODE (op) != CONST_DOUBLE
7430 || const_double_needs_minipool (op)))
7432 if (do_pushes)
7433 push_minipool_fix (insn, address, recog_data.operand_loc[opno],
7434 recog_data.operand_mode[opno], op);
7435 result = true;
7437 else if (GET_CODE (op) == MEM
7438 && GET_CODE (XEXP (op, 0)) == SYMBOL_REF
7439 && CONSTANT_POOL_ADDRESS_P (XEXP (op, 0)))
7441 if (do_pushes)
7443 rtx cop = avoid_constant_pool_reference (op);
7445 /* Casting the address of something to a mode narrower
7446 than a word can cause avoid_constant_pool_reference()
7447 to return the pool reference itself. That's no good to
7448 us here. Lets just hope that we can use the
7449 constant pool value directly. */
7450 if (op == cop)
7451 cop = get_pool_constant (XEXP (op, 0));
7453 push_minipool_fix (insn, address,
7454 recog_data.operand_loc[opno],
7455 recog_data.operand_mode[opno], cop);
7458 result = true;
7463 return result;
7466 /* Gcc puts the pool in the wrong place for ARM, since we can only
7467 load addresses a limited distance around the pc. We do some
7468 special munging to move the constant pool values to the correct
7469 point in the code. */
7470 static void
7471 arm_reorg (void)
7473 rtx insn;
7474 HOST_WIDE_INT address = 0;
7475 Mfix * fix;
7477 minipool_fix_head = minipool_fix_tail = NULL;
7479 /* The first insn must always be a note, or the code below won't
7480 scan it properly. */
7481 insn = get_insns ();
7482 if (GET_CODE (insn) != NOTE)
7483 abort ();
7485 /* Scan all the insns and record the operands that will need fixing. */
7486 for (insn = next_nonnote_insn (insn); insn; insn = next_nonnote_insn (insn))
7488 if (TARGET_CIRRUS_FIX_INVALID_INSNS
7489 && (arm_cirrus_insn_p (insn)
7490 || GET_CODE (insn) == JUMP_INSN
7491 || arm_memory_load_p (insn)))
7492 cirrus_reorg (insn);
7494 if (GET_CODE (insn) == BARRIER)
7495 push_minipool_barrier (insn, address);
7496 else if (INSN_P (insn))
7498 rtx table;
7500 note_invalid_constants (insn, address, true);
7501 address += get_attr_length (insn);
7503 /* If the insn is a vector jump, add the size of the table
7504 and skip the table. */
7505 if ((table = is_jump_table (insn)) != NULL)
7507 address += get_jump_table_size (table);
7508 insn = table;
7513 fix = minipool_fix_head;
7515 /* Now scan the fixups and perform the required changes. */
7516 while (fix)
7518 Mfix * ftmp;
7519 Mfix * fdel;
7520 Mfix * last_added_fix;
7521 Mfix * last_barrier = NULL;
7522 Mfix * this_fix;
7524 /* Skip any further barriers before the next fix. */
7525 while (fix && GET_CODE (fix->insn) == BARRIER)
7526 fix = fix->next;
7528 /* No more fixes. */
7529 if (fix == NULL)
7530 break;
7532 last_added_fix = NULL;
7534 for (ftmp = fix; ftmp; ftmp = ftmp->next)
7536 if (GET_CODE (ftmp->insn) == BARRIER)
7538 if (ftmp->address >= minipool_vector_head->max_address)
7539 break;
7541 last_barrier = ftmp;
7543 else if ((ftmp->minipool = add_minipool_forward_ref (ftmp)) == NULL)
7544 break;
7546 last_added_fix = ftmp; /* Keep track of the last fix added. */
7549 /* If we found a barrier, drop back to that; any fixes that we
7550 could have reached but come after the barrier will now go in
7551 the next mini-pool. */
7552 if (last_barrier != NULL)
7554 /* Reduce the refcount for those fixes that won't go into this
7555 pool after all. */
7556 for (fdel = last_barrier->next;
7557 fdel && fdel != ftmp;
7558 fdel = fdel->next)
7560 fdel->minipool->refcount--;
7561 fdel->minipool = NULL;
7564 ftmp = last_barrier;
7566 else
7568 /* ftmp is first fix that we can't fit into this pool and
7569 there no natural barriers that we could use. Insert a
7570 new barrier in the code somewhere between the previous
7571 fix and this one, and arrange to jump around it. */
7572 HOST_WIDE_INT max_address;
7574 /* The last item on the list of fixes must be a barrier, so
7575 we can never run off the end of the list of fixes without
7576 last_barrier being set. */
7577 if (ftmp == NULL)
7578 abort ();
7580 max_address = minipool_vector_head->max_address;
7581 /* Check that there isn't another fix that is in range that
7582 we couldn't fit into this pool because the pool was
7583 already too large: we need to put the pool before such an
7584 instruction. */
7585 if (ftmp->address < max_address)
7586 max_address = ftmp->address;
7588 last_barrier = create_fix_barrier (last_added_fix, max_address);
7591 assign_minipool_offsets (last_barrier);
7593 while (ftmp)
7595 if (GET_CODE (ftmp->insn) != BARRIER
7596 && ((ftmp->minipool = add_minipool_backward_ref (ftmp))
7597 == NULL))
7598 break;
7600 ftmp = ftmp->next;
7603 /* Scan over the fixes we have identified for this pool, fixing them
7604 up and adding the constants to the pool itself. */
7605 for (this_fix = fix; this_fix && ftmp != this_fix;
7606 this_fix = this_fix->next)
7607 if (GET_CODE (this_fix->insn) != BARRIER)
7609 rtx addr
7610 = plus_constant (gen_rtx_LABEL_REF (VOIDmode,
7611 minipool_vector_label),
7612 this_fix->minipool->offset);
7613 *this_fix->loc = gen_rtx_MEM (this_fix->mode, addr);
7616 dump_minipool (last_barrier->insn);
7617 fix = ftmp;
7620 /* From now on we must synthesize any constants that we can't handle
7621 directly. This can happen if the RTL gets split during final
7622 instruction generation. */
7623 after_arm_reorg = 1;
7625 /* Free the minipool memory. */
7626 obstack_free (&minipool_obstack, minipool_startobj);
7629 /* Routines to output assembly language. */
7631 /* If the rtx is the correct value then return the string of the number.
7632 In this way we can ensure that valid double constants are generated even
7633 when cross compiling. */
7634 const char *
7635 fp_immediate_constant (rtx x)
7637 REAL_VALUE_TYPE r;
7638 int i;
7640 if (!fp_consts_inited)
7641 init_fp_table ();
7643 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
7644 for (i = 0; i < 8; i++)
7645 if (REAL_VALUES_EQUAL (r, values_fp[i]))
7646 return strings_fp[i];
7648 abort ();
7651 /* As for fp_immediate_constant, but value is passed directly, not in rtx. */
7652 static const char *
7653 fp_const_from_val (REAL_VALUE_TYPE *r)
7655 int i;
7657 if (!fp_consts_inited)
7658 init_fp_table ();
7660 for (i = 0; i < 8; i++)
7661 if (REAL_VALUES_EQUAL (*r, values_fp[i]))
7662 return strings_fp[i];
7664 abort ();
7667 /* Output the operands of a LDM/STM instruction to STREAM.
7668 MASK is the ARM register set mask of which only bits 0-15 are important.
7669 REG is the base register, either the frame pointer or the stack pointer,
7670 INSTR is the possibly suffixed load or store instruction. */
7671 static void
7672 print_multi_reg (FILE *stream, const char *instr, int reg, int mask)
7674 int i;
7675 int not_first = FALSE;
7677 fputc ('\t', stream);
7678 asm_fprintf (stream, instr, reg);
7679 fputs (", {", stream);
7681 for (i = 0; i <= LAST_ARM_REGNUM; i++)
7682 if (mask & (1 << i))
7684 if (not_first)
7685 fprintf (stream, ", ");
7687 asm_fprintf (stream, "%r", i);
7688 not_first = TRUE;
7691 fprintf (stream, "}\n");
7695 /* Output a FLDMX instruction to STREAM.
7696 BASE if the register containing the address.
7697 REG and COUNT specify the register range.
7698 Extra registers may be added to avoid hardware bugs. */
7700 static void
7701 arm_output_fldmx (FILE * stream, unsigned int base, int reg, int count)
7703 int i;
7705 /* Workaround ARM10 VFPr1 bug. */
7706 if (count == 2 && !arm_arch6)
7708 if (reg == 15)
7709 reg--;
7710 count++;
7713 fputc ('\t', stream);
7714 asm_fprintf (stream, "fldmfdx\t%r!, {", base);
7716 for (i = reg; i < reg + count; i++)
7718 if (i > reg)
7719 fputs (", ", stream);
7720 asm_fprintf (stream, "d%d", i);
7722 fputs ("}\n", stream);
7727 /* Output the assembly for a store multiple. */
7729 const char *
7730 vfp_output_fstmx (rtx * operands)
7732 char pattern[100];
7733 int p;
7734 int base;
7735 int i;
7737 strcpy (pattern, "fstmfdx\t%m0!, {%P1");
7738 p = strlen (pattern);
7740 if (GET_CODE (operands[1]) != REG)
7741 abort ();
7743 base = (REGNO (operands[1]) - FIRST_VFP_REGNUM) / 2;
7744 for (i = 1; i < XVECLEN (operands[2], 0); i++)
7746 p += sprintf (&pattern[p], ", d%d", base + i);
7748 strcpy (&pattern[p], "}");
7750 output_asm_insn (pattern, operands);
7751 return "";
7755 /* Emit RTL to save block of VFP register pairs to the stack. Returns the
7756 number of bytes pushed. */
7758 static int
7759 vfp_emit_fstmx (int base_reg, int count)
7761 rtx par;
7762 rtx dwarf;
7763 rtx tmp, reg;
7764 int i;
7766 /* Workaround ARM10 VFPr1 bug. Data corruption can occur when exactly two
7767 register pairs are stored by a store multiple insn. We avoid this
7768 by pushing an extra pair. */
7769 if (count == 2 && !arm_arch6)
7771 if (base_reg == LAST_VFP_REGNUM - 3)
7772 base_reg -= 2;
7773 count++;
7776 /* ??? The frame layout is implementation defined. We describe
7777 standard format 1 (equivalent to a FSTMD insn and unused pad word).
7778 We really need some way of representing the whole block so that the
7779 unwinder can figure it out at runtime. */
7780 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
7781 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (count + 1));
7783 reg = gen_rtx_REG (DFmode, base_reg);
7784 base_reg += 2;
7786 XVECEXP (par, 0, 0)
7787 = gen_rtx_SET (VOIDmode,
7788 gen_rtx_MEM (BLKmode,
7789 gen_rtx_PRE_DEC (BLKmode, stack_pointer_rtx)),
7790 gen_rtx_UNSPEC (BLKmode,
7791 gen_rtvec (1, reg),
7792 UNSPEC_PUSH_MULT));
7794 tmp = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7795 gen_rtx_PLUS (SImode, stack_pointer_rtx,
7796 GEN_INT (-(count * 8 + 4))));
7797 RTX_FRAME_RELATED_P (tmp) = 1;
7798 XVECEXP (dwarf, 0, 0) = tmp;
7800 tmp = gen_rtx_SET (VOIDmode,
7801 gen_rtx_MEM (DFmode, stack_pointer_rtx),
7802 reg);
7803 RTX_FRAME_RELATED_P (tmp) = 1;
7804 XVECEXP (dwarf, 0, 1) = tmp;
7806 for (i = 1; i < count; i++)
7808 reg = gen_rtx_REG (DFmode, base_reg);
7809 base_reg += 2;
7810 XVECEXP (par, 0, i) = gen_rtx_USE (VOIDmode, reg);
7812 tmp = gen_rtx_SET (VOIDmode,
7813 gen_rtx_MEM (DFmode,
7814 gen_rtx_PLUS (SImode,
7815 stack_pointer_rtx,
7816 GEN_INT (i * 8))),
7817 reg);
7818 RTX_FRAME_RELATED_P (tmp) = 1;
7819 XVECEXP (dwarf, 0, i + 1) = tmp;
7822 par = emit_insn (par);
7823 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
7824 REG_NOTES (par));
7825 RTX_FRAME_RELATED_P (par) = 1;
7827 return count * 8 + 4;
7831 /* Output a 'call' insn. */
7832 const char *
7833 output_call (rtx *operands)
7835 if (arm_arch5)
7836 abort (); /* Patterns should call blx <reg> directly. */
7838 /* Handle calls to lr using ip (which may be clobbered in subr anyway). */
7839 if (REGNO (operands[0]) == LR_REGNUM)
7841 operands[0] = gen_rtx_REG (SImode, IP_REGNUM);
7842 output_asm_insn ("mov%?\t%0, %|lr", operands);
7845 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
7847 if (TARGET_INTERWORK || arm_arch4t)
7848 output_asm_insn ("bx%?\t%0", operands);
7849 else
7850 output_asm_insn ("mov%?\t%|pc, %0", operands);
7852 return "";
7855 /* Output a 'call' insn that is a reference in memory. */
7856 const char *
7857 output_call_mem (rtx *operands)
7859 if (TARGET_INTERWORK && !arm_arch5)
7861 output_asm_insn ("ldr%?\t%|ip, %0", operands);
7862 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
7863 output_asm_insn ("bx%?\t%|ip", operands);
7865 else if (regno_use_in (LR_REGNUM, operands[0]))
7867 /* LR is used in the memory address. We load the address in the
7868 first instruction. It's safe to use IP as the target of the
7869 load since the call will kill it anyway. */
7870 output_asm_insn ("ldr%?\t%|ip, %0", operands);
7871 if (arm_arch5)
7872 output_asm_insn ("blx%?\t%|ip", operands);
7873 else
7875 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
7876 if (arm_arch4t)
7877 output_asm_insn ("bx%?\t%|ip", operands);
7878 else
7879 output_asm_insn ("mov%?\t%|pc, %|ip", operands);
7882 else
7884 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
7885 output_asm_insn ("ldr%?\t%|pc, %0", operands);
7888 return "";
7892 /* Output a move from arm registers to an fpa registers.
7893 OPERANDS[0] is an fpa register.
7894 OPERANDS[1] is the first registers of an arm register pair. */
7895 const char *
7896 output_mov_long_double_fpa_from_arm (rtx *operands)
7898 int arm_reg0 = REGNO (operands[1]);
7899 rtx ops[3];
7901 if (arm_reg0 == IP_REGNUM)
7902 abort ();
7904 ops[0] = gen_rtx_REG (SImode, arm_reg0);
7905 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
7906 ops[2] = gen_rtx_REG (SImode, 2 + arm_reg0);
7908 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1, %2}", ops);
7909 output_asm_insn ("ldf%?e\t%0, [%|sp], #12", operands);
7911 return "";
7914 /* Output a move from an fpa register to arm registers.
7915 OPERANDS[0] is the first registers of an arm register pair.
7916 OPERANDS[1] is an fpa register. */
7917 const char *
7918 output_mov_long_double_arm_from_fpa (rtx *operands)
7920 int arm_reg0 = REGNO (operands[0]);
7921 rtx ops[3];
7923 if (arm_reg0 == IP_REGNUM)
7924 abort ();
7926 ops[0] = gen_rtx_REG (SImode, arm_reg0);
7927 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
7928 ops[2] = gen_rtx_REG (SImode, 2 + arm_reg0);
7930 output_asm_insn ("stf%?e\t%1, [%|sp, #-12]!", operands);
7931 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1, %2}", ops);
7932 return "";
7935 /* Output a move from arm registers to arm registers of a long double
7936 OPERANDS[0] is the destination.
7937 OPERANDS[1] is the source. */
7938 const char *
7939 output_mov_long_double_arm_from_arm (rtx *operands)
7941 /* We have to be careful here because the two might overlap. */
7942 int dest_start = REGNO (operands[0]);
7943 int src_start = REGNO (operands[1]);
7944 rtx ops[2];
7945 int i;
7947 if (dest_start < src_start)
7949 for (i = 0; i < 3; i++)
7951 ops[0] = gen_rtx_REG (SImode, dest_start + i);
7952 ops[1] = gen_rtx_REG (SImode, src_start + i);
7953 output_asm_insn ("mov%?\t%0, %1", ops);
7956 else
7958 for (i = 2; i >= 0; i--)
7960 ops[0] = gen_rtx_REG (SImode, dest_start + i);
7961 ops[1] = gen_rtx_REG (SImode, src_start + i);
7962 output_asm_insn ("mov%?\t%0, %1", ops);
7966 return "";
7970 /* Output a move from arm registers to an fpa registers.
7971 OPERANDS[0] is an fpa register.
7972 OPERANDS[1] is the first registers of an arm register pair. */
7973 const char *
7974 output_mov_double_fpa_from_arm (rtx *operands)
7976 int arm_reg0 = REGNO (operands[1]);
7977 rtx ops[2];
7979 if (arm_reg0 == IP_REGNUM)
7980 abort ();
7982 ops[0] = gen_rtx_REG (SImode, arm_reg0);
7983 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
7984 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1}", ops);
7985 output_asm_insn ("ldf%?d\t%0, [%|sp], #8", operands);
7986 return "";
7989 /* Output a move from an fpa register to arm registers.
7990 OPERANDS[0] is the first registers of an arm register pair.
7991 OPERANDS[1] is an fpa register. */
7992 const char *
7993 output_mov_double_arm_from_fpa (rtx *operands)
7995 int arm_reg0 = REGNO (operands[0]);
7996 rtx ops[2];
7998 if (arm_reg0 == IP_REGNUM)
7999 abort ();
8001 ops[0] = gen_rtx_REG (SImode, arm_reg0);
8002 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
8003 output_asm_insn ("stf%?d\t%1, [%|sp, #-8]!", operands);
8004 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1}", ops);
8005 return "";
8008 /* Output a move between double words.
8009 It must be REG<-REG, REG<-CONST_DOUBLE, REG<-CONST_INT, REG<-MEM
8010 or MEM<-REG and all MEMs must be offsettable addresses. */
8011 const char *
8012 output_move_double (rtx *operands)
8014 enum rtx_code code0 = GET_CODE (operands[0]);
8015 enum rtx_code code1 = GET_CODE (operands[1]);
8016 rtx otherops[3];
8018 if (code0 == REG)
8020 int reg0 = REGNO (operands[0]);
8022 otherops[0] = gen_rtx_REG (SImode, 1 + reg0);
8024 if (code1 == REG)
8026 int reg1 = REGNO (operands[1]);
8027 if (reg1 == IP_REGNUM)
8028 abort ();
8030 /* Ensure the second source is not overwritten. */
8031 if (reg1 == reg0 + (WORDS_BIG_ENDIAN ? -1 : 1))
8032 output_asm_insn ("mov%?\t%Q0, %Q1\n\tmov%?\t%R0, %R1", operands);
8033 else
8034 output_asm_insn ("mov%?\t%R0, %R1\n\tmov%?\t%Q0, %Q1", operands);
8036 else if (code1 == CONST_VECTOR)
8038 HOST_WIDE_INT hint = 0;
8040 switch (GET_MODE (operands[1]))
8042 case V2SImode:
8043 otherops[1] = GEN_INT (INTVAL (CONST_VECTOR_ELT (operands[1], 1)));
8044 operands[1] = GEN_INT (INTVAL (CONST_VECTOR_ELT (operands[1], 0)));
8045 break;
8047 case V4HImode:
8048 if (BYTES_BIG_ENDIAN)
8050 hint = INTVAL (CONST_VECTOR_ELT (operands[1], 2));
8051 hint <<= 16;
8052 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 3));
8054 else
8056 hint = INTVAL (CONST_VECTOR_ELT (operands[1], 3));
8057 hint <<= 16;
8058 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 2));
8061 otherops[1] = GEN_INT (hint);
8062 hint = 0;
8064 if (BYTES_BIG_ENDIAN)
8066 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 0));
8067 hint <<= 16;
8068 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 1));
8070 else
8072 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 1));
8073 hint <<= 16;
8074 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 0));
8077 operands[1] = GEN_INT (hint);
8078 break;
8080 case V8QImode:
8081 if (BYTES_BIG_ENDIAN)
8083 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 4));
8084 hint <<= 8;
8085 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 5));
8086 hint <<= 8;
8087 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 6));
8088 hint <<= 8;
8089 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 7));
8091 else
8093 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 7));
8094 hint <<= 8;
8095 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 6));
8096 hint <<= 8;
8097 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 5));
8098 hint <<= 8;
8099 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 4));
8102 otherops[1] = GEN_INT (hint);
8103 hint = 0;
8105 if (BYTES_BIG_ENDIAN)
8107 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 0));
8108 hint <<= 8;
8109 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 1));
8110 hint <<= 8;
8111 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 2));
8112 hint <<= 8;
8113 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 3));
8115 else
8117 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 3));
8118 hint <<= 8;
8119 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 2));
8120 hint <<= 8;
8121 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 1));
8122 hint <<= 8;
8123 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 0));
8126 operands[1] = GEN_INT (hint);
8127 break;
8129 default:
8130 abort ();
8132 output_mov_immediate (operands);
8133 output_mov_immediate (otherops);
8135 else if (code1 == CONST_DOUBLE)
8137 if (GET_MODE (operands[1]) == DFmode)
8139 REAL_VALUE_TYPE r;
8140 long l[2];
8142 REAL_VALUE_FROM_CONST_DOUBLE (r, operands[1]);
8143 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
8144 otherops[1] = GEN_INT (l[1]);
8145 operands[1] = GEN_INT (l[0]);
8147 else if (GET_MODE (operands[1]) != VOIDmode)
8148 abort ();
8149 else if (WORDS_BIG_ENDIAN)
8151 otherops[1] = GEN_INT (CONST_DOUBLE_LOW (operands[1]));
8152 operands[1] = GEN_INT (CONST_DOUBLE_HIGH (operands[1]));
8154 else
8156 otherops[1] = GEN_INT (CONST_DOUBLE_HIGH (operands[1]));
8157 operands[1] = GEN_INT (CONST_DOUBLE_LOW (operands[1]));
8160 output_mov_immediate (operands);
8161 output_mov_immediate (otherops);
8163 else if (code1 == CONST_INT)
8165 #if HOST_BITS_PER_WIDE_INT > 32
8166 /* If HOST_WIDE_INT is more than 32 bits, the intval tells us
8167 what the upper word is. */
8168 if (WORDS_BIG_ENDIAN)
8170 otherops[1] = GEN_INT (ARM_SIGN_EXTEND (INTVAL (operands[1])));
8171 operands[1] = GEN_INT (INTVAL (operands[1]) >> 32);
8173 else
8175 otherops[1] = GEN_INT (INTVAL (operands[1]) >> 32);
8176 operands[1] = GEN_INT (ARM_SIGN_EXTEND (INTVAL (operands[1])));
8178 #else
8179 /* Sign extend the intval into the high-order word. */
8180 if (WORDS_BIG_ENDIAN)
8182 otherops[1] = operands[1];
8183 operands[1] = (INTVAL (operands[1]) < 0
8184 ? constm1_rtx : const0_rtx);
8186 else
8187 otherops[1] = INTVAL (operands[1]) < 0 ? constm1_rtx : const0_rtx;
8188 #endif
8189 output_mov_immediate (otherops);
8190 output_mov_immediate (operands);
8192 else if (code1 == MEM)
8194 switch (GET_CODE (XEXP (operands[1], 0)))
8196 case REG:
8197 output_asm_insn ("ldm%?ia\t%m1, %M0", operands);
8198 break;
8200 case PRE_INC:
8201 if (!TARGET_LDRD)
8202 abort (); /* Should never happen now. */
8203 output_asm_insn ("ldr%?d\t%0, [%m1, #8]!", operands);
8204 break;
8206 case PRE_DEC:
8207 output_asm_insn ("ldm%?db\t%m1!, %M0", operands);
8208 break;
8210 case POST_INC:
8211 output_asm_insn ("ldm%?ia\t%m1!, %M0", operands);
8212 break;
8214 case POST_DEC:
8215 if (!TARGET_LDRD)
8216 abort (); /* Should never happen now. */
8217 output_asm_insn ("ldr%?d\t%0, [%m1], #-8", operands);
8218 break;
8220 case PRE_MODIFY:
8221 case POST_MODIFY:
8222 otherops[0] = operands[0];
8223 otherops[1] = XEXP (XEXP (XEXP (operands[1], 0), 1), 0);
8224 otherops[2] = XEXP (XEXP (XEXP (operands[1], 0), 1), 1);
8226 if (GET_CODE (XEXP (operands[1], 0)) == PRE_MODIFY)
8228 if (reg_overlap_mentioned_p (otherops[0], otherops[2]))
8230 /* Registers overlap so split out the increment. */
8231 output_asm_insn ("add%?\t%1, %1, %2", otherops);
8232 output_asm_insn ("ldr%?d\t%0, [%1] @split", otherops);
8234 else
8235 output_asm_insn ("ldr%?d\t%0, [%1, %2]!", otherops);
8237 else
8239 /* We only allow constant increments, so this is safe. */
8240 output_asm_insn ("ldr%?d\t%0, [%1], %2", otherops);
8242 break;
8244 case LABEL_REF:
8245 case CONST:
8246 output_asm_insn ("adr%?\t%0, %1", operands);
8247 output_asm_insn ("ldm%?ia\t%0, %M0", operands);
8248 break;
8250 default:
8251 if (arm_add_operand (XEXP (XEXP (operands[1], 0), 1),
8252 GET_MODE (XEXP (XEXP (operands[1], 0), 1))))
8254 otherops[0] = operands[0];
8255 otherops[1] = XEXP (XEXP (operands[1], 0), 0);
8256 otherops[2] = XEXP (XEXP (operands[1], 0), 1);
8258 if (GET_CODE (XEXP (operands[1], 0)) == PLUS)
8260 if (GET_CODE (otherops[2]) == CONST_INT)
8262 switch ((int) INTVAL (otherops[2]))
8264 case -8:
8265 output_asm_insn ("ldm%?db\t%1, %M0", otherops);
8266 return "";
8267 case -4:
8268 output_asm_insn ("ldm%?da\t%1, %M0", otherops);
8269 return "";
8270 case 4:
8271 output_asm_insn ("ldm%?ib\t%1, %M0", otherops);
8272 return "";
8275 if (TARGET_LDRD
8276 && (GET_CODE (otherops[2]) == REG
8277 || (GET_CODE (otherops[2]) == CONST_INT
8278 && INTVAL (otherops[2]) > -256
8279 && INTVAL (otherops[2]) < 256)))
8281 if (reg_overlap_mentioned_p (otherops[0],
8282 otherops[2]))
8284 /* Swap base and index registers over to
8285 avoid a conflict. */
8286 otherops[1] = XEXP (XEXP (operands[1], 0), 1);
8287 otherops[2] = XEXP (XEXP (operands[1], 0), 0);
8290 /* If both registers conflict, it will usually
8291 have been fixed by a splitter. */
8292 if (reg_overlap_mentioned_p (otherops[0],
8293 otherops[2]))
8295 output_asm_insn ("add%?\t%1, %1, %2", otherops);
8296 output_asm_insn ("ldr%?d\t%0, [%1]",
8297 otherops);
8298 return "";
8300 else
8302 output_asm_insn ("ldr%?d\t%0, [%1, %2]",
8303 otherops);
8304 return "";
8307 if (GET_CODE (otherops[2]) == CONST_INT)
8309 if (!(const_ok_for_arm (INTVAL (otherops[2]))))
8310 output_asm_insn ("sub%?\t%0, %1, #%n2", otherops);
8311 else
8312 output_asm_insn ("add%?\t%0, %1, %2", otherops);
8314 else
8315 output_asm_insn ("add%?\t%0, %1, %2", otherops);
8317 else
8318 output_asm_insn ("sub%?\t%0, %1, %2", otherops);
8320 return "ldm%?ia\t%0, %M0";
8322 else
8324 otherops[1] = adjust_address (operands[1], SImode, 4);
8325 /* Take care of overlapping base/data reg. */
8326 if (reg_mentioned_p (operands[0], operands[1]))
8328 output_asm_insn ("ldr%?\t%0, %1", otherops);
8329 output_asm_insn ("ldr%?\t%0, %1", operands);
8331 else
8333 output_asm_insn ("ldr%?\t%0, %1", operands);
8334 output_asm_insn ("ldr%?\t%0, %1", otherops);
8339 else
8340 abort (); /* Constraints should prevent this. */
8342 else if (code0 == MEM && code1 == REG)
8344 if (REGNO (operands[1]) == IP_REGNUM)
8345 abort ();
8347 switch (GET_CODE (XEXP (operands[0], 0)))
8349 case REG:
8350 output_asm_insn ("stm%?ia\t%m0, %M1", operands);
8351 break;
8353 case PRE_INC:
8354 if (!TARGET_LDRD)
8355 abort (); /* Should never happen now. */
8356 output_asm_insn ("str%?d\t%1, [%m0, #8]!", operands);
8357 break;
8359 case PRE_DEC:
8360 output_asm_insn ("stm%?db\t%m0!, %M1", operands);
8361 break;
8363 case POST_INC:
8364 output_asm_insn ("stm%?ia\t%m0!, %M1", operands);
8365 break;
8367 case POST_DEC:
8368 if (!TARGET_LDRD)
8369 abort (); /* Should never happen now. */
8370 output_asm_insn ("str%?d\t%1, [%m0], #-8", operands);
8371 break;
8373 case PRE_MODIFY:
8374 case POST_MODIFY:
8375 otherops[0] = operands[1];
8376 otherops[1] = XEXP (XEXP (XEXP (operands[0], 0), 1), 0);
8377 otherops[2] = XEXP (XEXP (XEXP (operands[0], 0), 1), 1);
8379 if (GET_CODE (XEXP (operands[0], 0)) == PRE_MODIFY)
8380 output_asm_insn ("str%?d\t%0, [%1, %2]!", otherops);
8381 else
8382 output_asm_insn ("str%?d\t%0, [%1], %2", otherops);
8383 break;
8385 case PLUS:
8386 otherops[2] = XEXP (XEXP (operands[0], 0), 1);
8387 if (GET_CODE (otherops[2]) == CONST_INT)
8389 switch ((int) INTVAL (XEXP (XEXP (operands[0], 0), 1)))
8391 case -8:
8392 output_asm_insn ("stm%?db\t%m0, %M1", operands);
8393 return "";
8395 case -4:
8396 output_asm_insn ("stm%?da\t%m0, %M1", operands);
8397 return "";
8399 case 4:
8400 output_asm_insn ("stm%?ib\t%m0, %M1", operands);
8401 return "";
8404 if (TARGET_LDRD
8405 && (GET_CODE (otherops[2]) == REG
8406 || (GET_CODE (otherops[2]) == CONST_INT
8407 && INTVAL (otherops[2]) > -256
8408 && INTVAL (otherops[2]) < 256)))
8410 otherops[0] = operands[1];
8411 otherops[1] = XEXP (XEXP (operands[0], 0), 0);
8412 output_asm_insn ("str%?d\t%0, [%1, %2]", otherops);
8413 return "";
8415 /* Fall through */
8417 default:
8418 otherops[0] = adjust_address (operands[0], SImode, 4);
8419 otherops[1] = gen_rtx_REG (SImode, 1 + REGNO (operands[1]));
8420 output_asm_insn ("str%?\t%1, %0", operands);
8421 output_asm_insn ("str%?\t%1, %0", otherops);
8424 else
8425 /* Constraints should prevent this. */
8426 abort ();
8428 return "";
8432 /* Output an arbitrary MOV reg, #n.
8433 OPERANDS[0] is a register. OPERANDS[1] is a const_int. */
8434 const char *
8435 output_mov_immediate (rtx *operands)
8437 HOST_WIDE_INT n = INTVAL (operands[1]);
8439 /* Try to use one MOV. */
8440 if (const_ok_for_arm (n))
8441 output_asm_insn ("mov%?\t%0, %1", operands);
8443 /* Try to use one MVN. */
8444 else if (const_ok_for_arm (~n))
8446 operands[1] = GEN_INT (~n);
8447 output_asm_insn ("mvn%?\t%0, %1", operands);
8449 else
8451 int n_ones = 0;
8452 int i;
8454 /* If all else fails, make it out of ORRs or BICs as appropriate. */
8455 for (i = 0; i < 32; i++)
8456 if (n & 1 << i)
8457 n_ones++;
8459 if (n_ones > 16) /* Shorter to use MVN with BIC in this case. */
8460 output_multi_immediate (operands, "mvn%?\t%0, %1", "bic%?\t%0, %0, %1", 1, ~ n);
8461 else
8462 output_multi_immediate (operands, "mov%?\t%0, %1", "orr%?\t%0, %0, %1", 1, n);
8465 return "";
8468 /* Output an ADD r, s, #n where n may be too big for one instruction.
8469 If adding zero to one register, output nothing. */
8470 const char *
8471 output_add_immediate (rtx *operands)
8473 HOST_WIDE_INT n = INTVAL (operands[2]);
8475 if (n != 0 || REGNO (operands[0]) != REGNO (operands[1]))
8477 if (n < 0)
8478 output_multi_immediate (operands,
8479 "sub%?\t%0, %1, %2", "sub%?\t%0, %0, %2", 2,
8480 -n);
8481 else
8482 output_multi_immediate (operands,
8483 "add%?\t%0, %1, %2", "add%?\t%0, %0, %2", 2,
8487 return "";
8490 /* Output a multiple immediate operation.
8491 OPERANDS is the vector of operands referred to in the output patterns.
8492 INSTR1 is the output pattern to use for the first constant.
8493 INSTR2 is the output pattern to use for subsequent constants.
8494 IMMED_OP is the index of the constant slot in OPERANDS.
8495 N is the constant value. */
8496 static const char *
8497 output_multi_immediate (rtx *operands, const char *instr1, const char *instr2,
8498 int immed_op, HOST_WIDE_INT n)
8500 #if HOST_BITS_PER_WIDE_INT > 32
8501 n &= 0xffffffff;
8502 #endif
8504 if (n == 0)
8506 /* Quick and easy output. */
8507 operands[immed_op] = const0_rtx;
8508 output_asm_insn (instr1, operands);
8510 else
8512 int i;
8513 const char * instr = instr1;
8515 /* Note that n is never zero here (which would give no output). */
8516 for (i = 0; i < 32; i += 2)
8518 if (n & (3 << i))
8520 operands[immed_op] = GEN_INT (n & (255 << i));
8521 output_asm_insn (instr, operands);
8522 instr = instr2;
8523 i += 6;
8528 return "";
8531 /* Return the appropriate ARM instruction for the operation code.
8532 The returned result should not be overwritten. OP is the rtx of the
8533 operation. SHIFT_FIRST_ARG is TRUE if the first argument of the operator
8534 was shifted. */
8535 const char *
8536 arithmetic_instr (rtx op, int shift_first_arg)
8538 switch (GET_CODE (op))
8540 case PLUS:
8541 return "add";
8543 case MINUS:
8544 return shift_first_arg ? "rsb" : "sub";
8546 case IOR:
8547 return "orr";
8549 case XOR:
8550 return "eor";
8552 case AND:
8553 return "and";
8555 default:
8556 abort ();
8560 /* Ensure valid constant shifts and return the appropriate shift mnemonic
8561 for the operation code. The returned result should not be overwritten.
8562 OP is the rtx code of the shift.
8563 On exit, *AMOUNTP will be -1 if the shift is by a register, or a constant
8564 shift. */
8565 static const char *
8566 shift_op (rtx op, HOST_WIDE_INT *amountp)
8568 const char * mnem;
8569 enum rtx_code code = GET_CODE (op);
8571 if (GET_CODE (XEXP (op, 1)) == REG || GET_CODE (XEXP (op, 1)) == SUBREG)
8572 *amountp = -1;
8573 else if (GET_CODE (XEXP (op, 1)) == CONST_INT)
8574 *amountp = INTVAL (XEXP (op, 1));
8575 else
8576 abort ();
8578 switch (code)
8580 case ASHIFT:
8581 mnem = "asl";
8582 break;
8584 case ASHIFTRT:
8585 mnem = "asr";
8586 break;
8588 case LSHIFTRT:
8589 mnem = "lsr";
8590 break;
8592 case ROTATE:
8593 if (*amountp == -1)
8594 abort ();
8595 *amountp = 32 - *amountp;
8597 /* Fall through. */
8599 case ROTATERT:
8600 mnem = "ror";
8601 break;
8603 case MULT:
8604 /* We never have to worry about the amount being other than a
8605 power of 2, since this case can never be reloaded from a reg. */
8606 if (*amountp != -1)
8607 *amountp = int_log2 (*amountp);
8608 else
8609 abort ();
8610 return "asl";
8612 default:
8613 abort ();
8616 if (*amountp != -1)
8618 /* This is not 100% correct, but follows from the desire to merge
8619 multiplication by a power of 2 with the recognizer for a
8620 shift. >=32 is not a valid shift for "asl", so we must try and
8621 output a shift that produces the correct arithmetical result.
8622 Using lsr #32 is identical except for the fact that the carry bit
8623 is not set correctly if we set the flags; but we never use the
8624 carry bit from such an operation, so we can ignore that. */
8625 if (code == ROTATERT)
8626 /* Rotate is just modulo 32. */
8627 *amountp &= 31;
8628 else if (*amountp != (*amountp & 31))
8630 if (code == ASHIFT)
8631 mnem = "lsr";
8632 *amountp = 32;
8635 /* Shifts of 0 are no-ops. */
8636 if (*amountp == 0)
8637 return NULL;
8640 return mnem;
8643 /* Obtain the shift from the POWER of two. */
8645 static HOST_WIDE_INT
8646 int_log2 (HOST_WIDE_INT power)
8648 HOST_WIDE_INT shift = 0;
8650 while ((((HOST_WIDE_INT) 1 << shift) & power) == 0)
8652 if (shift > 31)
8653 abort ();
8654 shift++;
8657 return shift;
8660 /* Output a .ascii pseudo-op, keeping track of lengths. This is
8661 because /bin/as is horribly restrictive. The judgement about
8662 whether or not each character is 'printable' (and can be output as
8663 is) or not (and must be printed with an octal escape) must be made
8664 with reference to the *host* character set -- the situation is
8665 similar to that discussed in the comments above pp_c_char in
8666 c-pretty-print.c. */
8668 #define MAX_ASCII_LEN 51
8670 void
8671 output_ascii_pseudo_op (FILE *stream, const unsigned char *p, int len)
8673 int i;
8674 int len_so_far = 0;
8676 fputs ("\t.ascii\t\"", stream);
8678 for (i = 0; i < len; i++)
8680 int c = p[i];
8682 if (len_so_far >= MAX_ASCII_LEN)
8684 fputs ("\"\n\t.ascii\t\"", stream);
8685 len_so_far = 0;
8688 if (ISPRINT (c))
8690 if (c == '\\' || c == '\"')
8692 putc ('\\', stream);
8693 len_so_far++;
8695 putc (c, stream);
8696 len_so_far++;
8698 else
8700 fprintf (stream, "\\%03o", c);
8701 len_so_far += 4;
8705 fputs ("\"\n", stream);
8708 /* Compute the register save mask for registers 0 through 12
8709 inclusive. This code is used by arm_compute_save_reg_mask. */
8710 static unsigned long
8711 arm_compute_save_reg0_reg12_mask (void)
8713 unsigned long func_type = arm_current_func_type ();
8714 unsigned int save_reg_mask = 0;
8715 unsigned int reg;
8717 if (IS_INTERRUPT (func_type))
8719 unsigned int max_reg;
8720 /* Interrupt functions must not corrupt any registers,
8721 even call clobbered ones. If this is a leaf function
8722 we can just examine the registers used by the RTL, but
8723 otherwise we have to assume that whatever function is
8724 called might clobber anything, and so we have to save
8725 all the call-clobbered registers as well. */
8726 if (ARM_FUNC_TYPE (func_type) == ARM_FT_FIQ)
8727 /* FIQ handlers have registers r8 - r12 banked, so
8728 we only need to check r0 - r7, Normal ISRs only
8729 bank r14 and r15, so we must check up to r12.
8730 r13 is the stack pointer which is always preserved,
8731 so we do not need to consider it here. */
8732 max_reg = 7;
8733 else
8734 max_reg = 12;
8736 for (reg = 0; reg <= max_reg; reg++)
8737 if (regs_ever_live[reg]
8738 || (! current_function_is_leaf && call_used_regs [reg]))
8739 save_reg_mask |= (1 << reg);
8741 /* Also save the pic base register if necessary. */
8742 if (flag_pic
8743 && !TARGET_SINGLE_PIC_BASE
8744 && current_function_uses_pic_offset_table)
8745 save_reg_mask |= 1 << PIC_OFFSET_TABLE_REGNUM;
8747 else
8749 /* In the normal case we only need to save those registers
8750 which are call saved and which are used by this function. */
8751 for (reg = 0; reg <= 10; reg++)
8752 if (regs_ever_live[reg] && ! call_used_regs [reg])
8753 save_reg_mask |= (1 << reg);
8755 /* Handle the frame pointer as a special case. */
8756 if (! TARGET_APCS_FRAME
8757 && ! frame_pointer_needed
8758 && regs_ever_live[HARD_FRAME_POINTER_REGNUM]
8759 && ! call_used_regs[HARD_FRAME_POINTER_REGNUM])
8760 save_reg_mask |= 1 << HARD_FRAME_POINTER_REGNUM;
8762 /* If we aren't loading the PIC register,
8763 don't stack it even though it may be live. */
8764 if (flag_pic
8765 && !TARGET_SINGLE_PIC_BASE
8766 && (regs_ever_live[PIC_OFFSET_TABLE_REGNUM]
8767 || current_function_uses_pic_offset_table))
8768 save_reg_mask |= 1 << PIC_OFFSET_TABLE_REGNUM;
8771 /* Save registers so the exception handler can modify them. */
8772 if (current_function_calls_eh_return)
8774 unsigned int i;
8776 for (i = 0; ; i++)
8778 reg = EH_RETURN_DATA_REGNO (i);
8779 if (reg == INVALID_REGNUM)
8780 break;
8781 save_reg_mask |= 1 << reg;
8785 return save_reg_mask;
8788 /* Compute a bit mask of which registers need to be
8789 saved on the stack for the current function. */
8791 static unsigned long
8792 arm_compute_save_reg_mask (void)
8794 unsigned int save_reg_mask = 0;
8795 unsigned long func_type = arm_current_func_type ();
8797 if (IS_NAKED (func_type))
8798 /* This should never really happen. */
8799 return 0;
8801 /* If we are creating a stack frame, then we must save the frame pointer,
8802 IP (which will hold the old stack pointer), LR and the PC. */
8803 if (frame_pointer_needed)
8804 save_reg_mask |=
8805 (1 << ARM_HARD_FRAME_POINTER_REGNUM)
8806 | (1 << IP_REGNUM)
8807 | (1 << LR_REGNUM)
8808 | (1 << PC_REGNUM);
8810 /* Volatile functions do not return, so there
8811 is no need to save any other registers. */
8812 if (IS_VOLATILE (func_type))
8813 return save_reg_mask;
8815 save_reg_mask |= arm_compute_save_reg0_reg12_mask ();
8817 /* Decide if we need to save the link register.
8818 Interrupt routines have their own banked link register,
8819 so they never need to save it.
8820 Otherwise if we do not use the link register we do not need to save
8821 it. If we are pushing other registers onto the stack however, we
8822 can save an instruction in the epilogue by pushing the link register
8823 now and then popping it back into the PC. This incurs extra memory
8824 accesses though, so we only do it when optimizing for size, and only
8825 if we know that we will not need a fancy return sequence. */
8826 if (regs_ever_live [LR_REGNUM]
8827 || (save_reg_mask
8828 && optimize_size
8829 && ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
8830 && !current_function_calls_eh_return))
8831 save_reg_mask |= 1 << LR_REGNUM;
8833 if (cfun->machine->lr_save_eliminated)
8834 save_reg_mask &= ~ (1 << LR_REGNUM);
8836 if (TARGET_REALLY_IWMMXT
8837 && ((bit_count (save_reg_mask)
8838 + ARM_NUM_INTS (current_function_pretend_args_size)) % 2) != 0)
8840 unsigned int reg;
8842 /* The total number of registers that are going to be pushed
8843 onto the stack is odd. We need to ensure that the stack
8844 is 64-bit aligned before we start to save iWMMXt registers,
8845 and also before we start to create locals. (A local variable
8846 might be a double or long long which we will load/store using
8847 an iWMMXt instruction). Therefore we need to push another
8848 ARM register, so that the stack will be 64-bit aligned. We
8849 try to avoid using the arg registers (r0 -r3) as they might be
8850 used to pass values in a tail call. */
8851 for (reg = 4; reg <= 12; reg++)
8852 if ((save_reg_mask & (1 << reg)) == 0)
8853 break;
8855 if (reg <= 12)
8856 save_reg_mask |= (1 << reg);
8857 else
8859 cfun->machine->sibcall_blocked = 1;
8860 save_reg_mask |= (1 << 3);
8864 return save_reg_mask;
8868 /* Compute a bit mask of which registers need to be
8869 saved on the stack for the current function. */
8870 static unsigned long
8871 thumb_compute_save_reg_mask (void)
8873 unsigned long mask;
8874 int reg;
8876 mask = 0;
8877 for (reg = 0; reg < 12; reg ++)
8879 if (regs_ever_live[reg] && !call_used_regs[reg])
8880 mask |= 1 << reg;
8883 if (flag_pic && !TARGET_SINGLE_PIC_BASE)
8884 mask |= (1 << PIC_OFFSET_TABLE_REGNUM);
8885 if (TARGET_SINGLE_PIC_BASE)
8886 mask &= ~(1 << arm_pic_register);
8887 /* See if we might need r11 for calls to _interwork_r11_call_via_rN(). */
8888 if (!frame_pointer_needed && CALLER_INTERWORKING_SLOT_SIZE > 0)
8889 mask |= 1 << ARM_HARD_FRAME_POINTER_REGNUM;
8891 /* lr will also be pushed if any lo regs are pushed. */
8892 if (mask & 0xff || thumb_force_lr_save ())
8893 mask |= (1 << LR_REGNUM);
8895 /* Make sure we have a low work register if we need one. */
8896 if (((mask & 0xff) == 0 && regs_ever_live[LAST_ARG_REGNUM])
8897 && ((mask & 0x0f00) || TARGET_BACKTRACE))
8898 mask |= 1 << LAST_LO_REGNUM;
8900 return mask;
8904 /* Return the number of bytes required to save VFP registers. */
8905 static int
8906 arm_get_vfp_saved_size (void)
8908 unsigned int regno;
8909 int count;
8910 int saved;
8912 saved = 0;
8913 /* Space for saved VFP registers. */
8914 if (TARGET_HARD_FLOAT && TARGET_VFP)
8916 count = 0;
8917 for (regno = FIRST_VFP_REGNUM;
8918 regno < LAST_VFP_REGNUM;
8919 regno += 2)
8921 if ((!regs_ever_live[regno] || call_used_regs[regno])
8922 && (!regs_ever_live[regno + 1] || call_used_regs[regno + 1]))
8924 if (count > 0)
8926 /* Workaround ARM10 VFPr1 bug. */
8927 if (count == 2 && !arm_arch6)
8928 count++;
8929 saved += count * 8 + 4;
8931 count = 0;
8933 else
8934 count++;
8936 if (count > 0)
8938 if (count == 2 && !arm_arch6)
8939 count++;
8940 saved += count * 8 + 4;
8943 return saved;
8947 /* Generate a function exit sequence. If REALLY_RETURN is false, then do
8948 everything bar the final return instruction. */
8949 const char *
8950 output_return_instruction (rtx operand, int really_return, int reverse)
8952 char conditional[10];
8953 char instr[100];
8954 int reg;
8955 unsigned long live_regs_mask;
8956 unsigned long func_type;
8957 arm_stack_offsets *offsets;
8959 func_type = arm_current_func_type ();
8961 if (IS_NAKED (func_type))
8962 return "";
8964 if (IS_VOLATILE (func_type) && TARGET_ABORT_NORETURN)
8966 /* If this function was declared non-returning, and we have
8967 found a tail call, then we have to trust that the called
8968 function won't return. */
8969 if (really_return)
8971 rtx ops[2];
8973 /* Otherwise, trap an attempted return by aborting. */
8974 ops[0] = operand;
8975 ops[1] = gen_rtx_SYMBOL_REF (Pmode, NEED_PLT_RELOC ? "abort(PLT)"
8976 : "abort");
8977 assemble_external_libcall (ops[1]);
8978 output_asm_insn (reverse ? "bl%D0\t%a1" : "bl%d0\t%a1", ops);
8981 return "";
8984 if (current_function_calls_alloca && !really_return)
8985 abort ();
8987 sprintf (conditional, "%%?%%%c0", reverse ? 'D' : 'd');
8989 return_used_this_function = 1;
8991 live_regs_mask = arm_compute_save_reg_mask ();
8993 if (live_regs_mask)
8995 const char * return_reg;
8997 /* If we do not have any special requirements for function exit
8998 (e.g. interworking, or ISR) then we can load the return address
8999 directly into the PC. Otherwise we must load it into LR. */
9000 if (really_return
9001 && ! TARGET_INTERWORK)
9002 return_reg = reg_names[PC_REGNUM];
9003 else
9004 return_reg = reg_names[LR_REGNUM];
9006 if ((live_regs_mask & (1 << IP_REGNUM)) == (1 << IP_REGNUM))
9008 /* There are three possible reasons for the IP register
9009 being saved. 1) a stack frame was created, in which case
9010 IP contains the old stack pointer, or 2) an ISR routine
9011 corrupted it, or 3) it was saved to align the stack on
9012 iWMMXt. In case 1, restore IP into SP, otherwise just
9013 restore IP. */
9014 if (frame_pointer_needed)
9016 live_regs_mask &= ~ (1 << IP_REGNUM);
9017 live_regs_mask |= (1 << SP_REGNUM);
9019 else
9021 if (! IS_INTERRUPT (func_type)
9022 && ! TARGET_REALLY_IWMMXT)
9023 abort ();
9027 /* On some ARM architectures it is faster to use LDR rather than
9028 LDM to load a single register. On other architectures, the
9029 cost is the same. In 26 bit mode, or for exception handlers,
9030 we have to use LDM to load the PC so that the CPSR is also
9031 restored. */
9032 for (reg = 0; reg <= LAST_ARM_REGNUM; reg++)
9034 if (live_regs_mask == (unsigned int)(1 << reg))
9035 break;
9037 if (reg <= LAST_ARM_REGNUM
9038 && (reg != LR_REGNUM
9039 || ! really_return
9040 || ! IS_INTERRUPT (func_type)))
9042 sprintf (instr, "ldr%s\t%%|%s, [%%|sp], #4", conditional,
9043 (reg == LR_REGNUM) ? return_reg : reg_names[reg]);
9045 else
9047 char *p;
9048 int first = 1;
9050 /* Generate the load multiple instruction to restore the
9051 registers. Note we can get here, even if
9052 frame_pointer_needed is true, but only if sp already
9053 points to the base of the saved core registers. */
9054 if (live_regs_mask & (1 << SP_REGNUM))
9056 unsigned HOST_WIDE_INT stack_adjust;
9058 offsets = arm_get_frame_offsets ();
9059 stack_adjust = offsets->outgoing_args - offsets->saved_regs;
9060 if (stack_adjust != 0 && stack_adjust != 4)
9061 abort ();
9063 if (stack_adjust && arm_arch5)
9064 sprintf (instr, "ldm%sib\t%%|sp, {", conditional);
9065 else
9067 /* If we can't use ldmib (SA110 bug), then try to pop r3
9068 instead. */
9069 if (stack_adjust)
9070 live_regs_mask |= 1 << 3;
9071 sprintf (instr, "ldm%sfd\t%%|sp, {", conditional);
9074 else
9075 sprintf (instr, "ldm%sfd\t%%|sp!, {", conditional);
9077 p = instr + strlen (instr);
9079 for (reg = 0; reg <= SP_REGNUM; reg++)
9080 if (live_regs_mask & (1 << reg))
9082 int l = strlen (reg_names[reg]);
9084 if (first)
9085 first = 0;
9086 else
9088 memcpy (p, ", ", 2);
9089 p += 2;
9092 memcpy (p, "%|", 2);
9093 memcpy (p + 2, reg_names[reg], l);
9094 p += l + 2;
9097 if (live_regs_mask & (1 << LR_REGNUM))
9099 sprintf (p, "%s%%|%s}", first ? "" : ", ", return_reg);
9100 /* If returning from an interrupt, restore the CPSR. */
9101 if (IS_INTERRUPT (func_type))
9102 strcat (p, "^");
9104 else
9105 strcpy (p, "}");
9108 output_asm_insn (instr, & operand);
9110 /* See if we need to generate an extra instruction to
9111 perform the actual function return. */
9112 if (really_return
9113 && func_type != ARM_FT_INTERWORKED
9114 && (live_regs_mask & (1 << LR_REGNUM)) != 0)
9116 /* The return has already been handled
9117 by loading the LR into the PC. */
9118 really_return = 0;
9122 if (really_return)
9124 switch ((int) ARM_FUNC_TYPE (func_type))
9126 case ARM_FT_ISR:
9127 case ARM_FT_FIQ:
9128 sprintf (instr, "sub%ss\t%%|pc, %%|lr, #4", conditional);
9129 break;
9131 case ARM_FT_INTERWORKED:
9132 sprintf (instr, "bx%s\t%%|lr", conditional);
9133 break;
9135 case ARM_FT_EXCEPTION:
9136 sprintf (instr, "mov%ss\t%%|pc, %%|lr", conditional);
9137 break;
9139 default:
9140 /* Use bx if it's available. */
9141 if (arm_arch5 || arm_arch4t)
9142 sprintf (instr, "bx%s\t%%|lr", conditional);
9143 else
9144 sprintf (instr, "mov%s\t%%|pc, %%|lr", conditional);
9145 break;
9148 output_asm_insn (instr, & operand);
9151 return "";
9154 /* Write the function name into the code section, directly preceding
9155 the function prologue.
9157 Code will be output similar to this:
9159 .ascii "arm_poke_function_name", 0
9160 .align
9162 .word 0xff000000 + (t1 - t0)
9163 arm_poke_function_name
9164 mov ip, sp
9165 stmfd sp!, {fp, ip, lr, pc}
9166 sub fp, ip, #4
9168 When performing a stack backtrace, code can inspect the value
9169 of 'pc' stored at 'fp' + 0. If the trace function then looks
9170 at location pc - 12 and the top 8 bits are set, then we know
9171 that there is a function name embedded immediately preceding this
9172 location and has length ((pc[-3]) & 0xff000000).
9174 We assume that pc is declared as a pointer to an unsigned long.
9176 It is of no benefit to output the function name if we are assembling
9177 a leaf function. These function types will not contain a stack
9178 backtrace structure, therefore it is not possible to determine the
9179 function name. */
9180 void
9181 arm_poke_function_name (FILE *stream, const char *name)
9183 unsigned long alignlength;
9184 unsigned long length;
9185 rtx x;
9187 length = strlen (name) + 1;
9188 alignlength = ROUND_UP_WORD (length);
9190 ASM_OUTPUT_ASCII (stream, name, length);
9191 ASM_OUTPUT_ALIGN (stream, 2);
9192 x = GEN_INT ((unsigned HOST_WIDE_INT) 0xff000000 + alignlength);
9193 assemble_aligned_integer (UNITS_PER_WORD, x);
9196 /* Place some comments into the assembler stream
9197 describing the current function. */
9198 static void
9199 arm_output_function_prologue (FILE *f, HOST_WIDE_INT frame_size)
9201 unsigned long func_type;
9203 if (!TARGET_ARM)
9205 thumb_output_function_prologue (f, frame_size);
9206 return;
9209 /* Sanity check. */
9210 if (arm_ccfsm_state || arm_target_insn)
9211 abort ();
9213 func_type = arm_current_func_type ();
9215 switch ((int) ARM_FUNC_TYPE (func_type))
9217 default:
9218 case ARM_FT_NORMAL:
9219 break;
9220 case ARM_FT_INTERWORKED:
9221 asm_fprintf (f, "\t%@ Function supports interworking.\n");
9222 break;
9223 case ARM_FT_ISR:
9224 asm_fprintf (f, "\t%@ Interrupt Service Routine.\n");
9225 break;
9226 case ARM_FT_FIQ:
9227 asm_fprintf (f, "\t%@ Fast Interrupt Service Routine.\n");
9228 break;
9229 case ARM_FT_EXCEPTION:
9230 asm_fprintf (f, "\t%@ ARM Exception Handler.\n");
9231 break;
9234 if (IS_NAKED (func_type))
9235 asm_fprintf (f, "\t%@ Naked Function: prologue and epilogue provided by programmer.\n");
9237 if (IS_VOLATILE (func_type))
9238 asm_fprintf (f, "\t%@ Volatile: function does not return.\n");
9240 if (IS_NESTED (func_type))
9241 asm_fprintf (f, "\t%@ Nested: function declared inside another function.\n");
9243 asm_fprintf (f, "\t%@ args = %d, pretend = %d, frame = %wd\n",
9244 current_function_args_size,
9245 current_function_pretend_args_size, frame_size);
9247 asm_fprintf (f, "\t%@ frame_needed = %d, uses_anonymous_args = %d\n",
9248 frame_pointer_needed,
9249 cfun->machine->uses_anonymous_args);
9251 if (cfun->machine->lr_save_eliminated)
9252 asm_fprintf (f, "\t%@ link register save eliminated.\n");
9254 if (current_function_calls_eh_return)
9255 asm_fprintf (f, "\t@ Calls __builtin_eh_return.\n");
9257 #ifdef AOF_ASSEMBLER
9258 if (flag_pic)
9259 asm_fprintf (f, "\tmov\t%r, %r\n", IP_REGNUM, PIC_OFFSET_TABLE_REGNUM);
9260 #endif
9262 return_used_this_function = 0;
9265 const char *
9266 arm_output_epilogue (rtx sibling)
9268 int reg;
9269 unsigned long saved_regs_mask;
9270 unsigned long func_type;
9271 /* Floats_offset is the offset from the "virtual" frame. In an APCS
9272 frame that is $fp + 4 for a non-variadic function. */
9273 int floats_offset = 0;
9274 rtx operands[3];
9275 FILE * f = asm_out_file;
9276 unsigned int lrm_count = 0;
9277 int really_return = (sibling == NULL);
9278 int start_reg;
9279 arm_stack_offsets *offsets;
9281 /* If we have already generated the return instruction
9282 then it is futile to generate anything else. */
9283 if (use_return_insn (FALSE, sibling) && return_used_this_function)
9284 return "";
9286 func_type = arm_current_func_type ();
9288 if (IS_NAKED (func_type))
9289 /* Naked functions don't have epilogues. */
9290 return "";
9292 if (IS_VOLATILE (func_type) && TARGET_ABORT_NORETURN)
9294 rtx op;
9296 /* A volatile function should never return. Call abort. */
9297 op = gen_rtx_SYMBOL_REF (Pmode, NEED_PLT_RELOC ? "abort(PLT)" : "abort");
9298 assemble_external_libcall (op);
9299 output_asm_insn ("bl\t%a0", &op);
9301 return "";
9304 if (current_function_calls_eh_return
9305 && ! really_return)
9306 /* If we are throwing an exception, then we really must
9307 be doing a return, so we can't tail-call. */
9308 abort ();
9310 offsets = arm_get_frame_offsets ();
9311 saved_regs_mask = arm_compute_save_reg_mask ();
9313 if (TARGET_IWMMXT)
9314 lrm_count = bit_count (saved_regs_mask);
9316 floats_offset = offsets->saved_args;
9317 /* Compute how far away the floats will be. */
9318 for (reg = 0; reg <= LAST_ARM_REGNUM; reg++)
9319 if (saved_regs_mask & (1 << reg))
9320 floats_offset += 4;
9322 if (frame_pointer_needed)
9324 /* This variable is for the Virtual Frame Pointer, not VFP regs. */
9325 int vfp_offset = offsets->frame;
9327 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
9329 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
9330 if (regs_ever_live[reg] && !call_used_regs[reg])
9332 floats_offset += 12;
9333 asm_fprintf (f, "\tldfe\t%r, [%r, #-%d]\n",
9334 reg, FP_REGNUM, floats_offset - vfp_offset);
9337 else
9339 start_reg = LAST_FPA_REGNUM;
9341 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
9343 if (regs_ever_live[reg] && !call_used_regs[reg])
9345 floats_offset += 12;
9347 /* We can't unstack more than four registers at once. */
9348 if (start_reg - reg == 3)
9350 asm_fprintf (f, "\tlfm\t%r, 4, [%r, #-%d]\n",
9351 reg, FP_REGNUM, floats_offset - vfp_offset);
9352 start_reg = reg - 1;
9355 else
9357 if (reg != start_reg)
9358 asm_fprintf (f, "\tlfm\t%r, %d, [%r, #-%d]\n",
9359 reg + 1, start_reg - reg,
9360 FP_REGNUM, floats_offset - vfp_offset);
9361 start_reg = reg - 1;
9365 /* Just in case the last register checked also needs unstacking. */
9366 if (reg != start_reg)
9367 asm_fprintf (f, "\tlfm\t%r, %d, [%r, #-%d]\n",
9368 reg + 1, start_reg - reg,
9369 FP_REGNUM, floats_offset - vfp_offset);
9372 if (TARGET_HARD_FLOAT && TARGET_VFP)
9374 int saved_size;
9376 /* The fldmx insn does not have base+offset addressing modes,
9377 so we use IP to hold the address. */
9378 saved_size = arm_get_vfp_saved_size ();
9380 if (saved_size > 0)
9382 floats_offset += saved_size;
9383 asm_fprintf (f, "\tsub\t%r, %r, #%d\n", IP_REGNUM,
9384 FP_REGNUM, floats_offset - vfp_offset);
9386 start_reg = FIRST_VFP_REGNUM;
9387 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
9389 if ((!regs_ever_live[reg] || call_used_regs[reg])
9390 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
9392 if (start_reg != reg)
9393 arm_output_fldmx (f, IP_REGNUM,
9394 (start_reg - FIRST_VFP_REGNUM) / 2,
9395 (reg - start_reg) / 2);
9396 start_reg = reg + 2;
9399 if (start_reg != reg)
9400 arm_output_fldmx (f, IP_REGNUM,
9401 (start_reg - FIRST_VFP_REGNUM) / 2,
9402 (reg - start_reg) / 2);
9405 if (TARGET_IWMMXT)
9407 /* The frame pointer is guaranteed to be non-double-word aligned.
9408 This is because it is set to (old_stack_pointer - 4) and the
9409 old_stack_pointer was double word aligned. Thus the offset to
9410 the iWMMXt registers to be loaded must also be non-double-word
9411 sized, so that the resultant address *is* double-word aligned.
9412 We can ignore floats_offset since that was already included in
9413 the live_regs_mask. */
9414 lrm_count += (lrm_count % 2 ? 2 : 1);
9416 for (reg = LAST_IWMMXT_REGNUM; reg >= FIRST_IWMMXT_REGNUM; reg--)
9417 if (regs_ever_live[reg] && !call_used_regs[reg])
9419 asm_fprintf (f, "\twldrd\t%r, [%r, #-%d]\n",
9420 reg, FP_REGNUM, lrm_count * 4);
9421 lrm_count += 2;
9425 /* saved_regs_mask should contain the IP, which at the time of stack
9426 frame generation actually contains the old stack pointer. So a
9427 quick way to unwind the stack is just pop the IP register directly
9428 into the stack pointer. */
9429 if ((saved_regs_mask & (1 << IP_REGNUM)) == 0)
9430 abort ();
9431 saved_regs_mask &= ~ (1 << IP_REGNUM);
9432 saved_regs_mask |= (1 << SP_REGNUM);
9434 /* There are two registers left in saved_regs_mask - LR and PC. We
9435 only need to restore the LR register (the return address), but to
9436 save time we can load it directly into the PC, unless we need a
9437 special function exit sequence, or we are not really returning. */
9438 if (really_return
9439 && ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
9440 && !current_function_calls_eh_return)
9441 /* Delete the LR from the register mask, so that the LR on
9442 the stack is loaded into the PC in the register mask. */
9443 saved_regs_mask &= ~ (1 << LR_REGNUM);
9444 else
9445 saved_regs_mask &= ~ (1 << PC_REGNUM);
9447 /* We must use SP as the base register, because SP is one of the
9448 registers being restored. If an interrupt or page fault
9449 happens in the ldm instruction, the SP might or might not
9450 have been restored. That would be bad, as then SP will no
9451 longer indicate the safe area of stack, and we can get stack
9452 corruption. Using SP as the base register means that it will
9453 be reset correctly to the original value, should an interrupt
9454 occur. If the stack pointer already points at the right
9455 place, then omit the subtraction. */
9456 if (offsets->outgoing_args != (1 + (int) bit_count (saved_regs_mask))
9457 || current_function_calls_alloca)
9458 asm_fprintf (f, "\tsub\t%r, %r, #%d\n", SP_REGNUM, FP_REGNUM,
9459 4 * bit_count (saved_regs_mask));
9460 print_multi_reg (f, "ldmfd\t%r", SP_REGNUM, saved_regs_mask);
9462 if (IS_INTERRUPT (func_type))
9463 /* Interrupt handlers will have pushed the
9464 IP onto the stack, so restore it now. */
9465 print_multi_reg (f, "ldmfd\t%r!", SP_REGNUM, 1 << IP_REGNUM);
9467 else
9469 /* Restore stack pointer if necessary. */
9470 if (offsets->outgoing_args != offsets->saved_regs)
9472 operands[0] = operands[1] = stack_pointer_rtx;
9473 operands[2] = GEN_INT (offsets->outgoing_args - offsets->saved_regs);
9474 output_add_immediate (operands);
9477 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
9479 for (reg = FIRST_FPA_REGNUM; reg <= LAST_FPA_REGNUM; reg++)
9480 if (regs_ever_live[reg] && !call_used_regs[reg])
9481 asm_fprintf (f, "\tldfe\t%r, [%r], #12\n",
9482 reg, SP_REGNUM);
9484 else
9486 start_reg = FIRST_FPA_REGNUM;
9488 for (reg = FIRST_FPA_REGNUM; reg <= LAST_FPA_REGNUM; reg++)
9490 if (regs_ever_live[reg] && !call_used_regs[reg])
9492 if (reg - start_reg == 3)
9494 asm_fprintf (f, "\tlfmfd\t%r, 4, [%r]!\n",
9495 start_reg, SP_REGNUM);
9496 start_reg = reg + 1;
9499 else
9501 if (reg != start_reg)
9502 asm_fprintf (f, "\tlfmfd\t%r, %d, [%r]!\n",
9503 start_reg, reg - start_reg,
9504 SP_REGNUM);
9506 start_reg = reg + 1;
9510 /* Just in case the last register checked also needs unstacking. */
9511 if (reg != start_reg)
9512 asm_fprintf (f, "\tlfmfd\t%r, %d, [%r]!\n",
9513 start_reg, reg - start_reg, SP_REGNUM);
9516 if (TARGET_HARD_FLOAT && TARGET_VFP)
9518 start_reg = FIRST_VFP_REGNUM;
9519 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
9521 if ((!regs_ever_live[reg] || call_used_regs[reg])
9522 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
9524 if (start_reg != reg)
9525 arm_output_fldmx (f, SP_REGNUM,
9526 (start_reg - FIRST_VFP_REGNUM) / 2,
9527 (reg - start_reg) / 2);
9528 start_reg = reg + 2;
9531 if (start_reg != reg)
9532 arm_output_fldmx (f, SP_REGNUM,
9533 (start_reg - FIRST_VFP_REGNUM) / 2,
9534 (reg - start_reg) / 2);
9536 if (TARGET_IWMMXT)
9537 for (reg = FIRST_IWMMXT_REGNUM; reg <= LAST_IWMMXT_REGNUM; reg++)
9538 if (regs_ever_live[reg] && !call_used_regs[reg])
9539 asm_fprintf (f, "\twldrd\t%r, [%r], #8\n", reg, SP_REGNUM);
9541 /* If we can, restore the LR into the PC. */
9542 if (ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
9543 && really_return
9544 && current_function_pretend_args_size == 0
9545 && saved_regs_mask & (1 << LR_REGNUM)
9546 && !current_function_calls_eh_return)
9548 saved_regs_mask &= ~ (1 << LR_REGNUM);
9549 saved_regs_mask |= (1 << PC_REGNUM);
9552 /* Load the registers off the stack. If we only have one register
9553 to load use the LDR instruction - it is faster. */
9554 if (saved_regs_mask == (1 << LR_REGNUM))
9556 asm_fprintf (f, "\tldr\t%r, [%r], #4\n", LR_REGNUM, SP_REGNUM);
9558 else if (saved_regs_mask)
9560 if (saved_regs_mask & (1 << SP_REGNUM))
9561 /* Note - write back to the stack register is not enabled
9562 (i.e. "ldmfd sp!..."). We know that the stack pointer is
9563 in the list of registers and if we add writeback the
9564 instruction becomes UNPREDICTABLE. */
9565 print_multi_reg (f, "ldmfd\t%r", SP_REGNUM, saved_regs_mask);
9566 else
9567 print_multi_reg (f, "ldmfd\t%r!", SP_REGNUM, saved_regs_mask);
9570 if (current_function_pretend_args_size)
9572 /* Unwind the pre-pushed regs. */
9573 operands[0] = operands[1] = stack_pointer_rtx;
9574 operands[2] = GEN_INT (current_function_pretend_args_size);
9575 output_add_immediate (operands);
9579 /* We may have already restored PC directly from the stack. */
9580 if (!really_return || saved_regs_mask & (1 << PC_REGNUM))
9581 return "";
9583 /* Stack adjustment for exception handler. */
9584 if (current_function_calls_eh_return)
9585 asm_fprintf (f, "\tadd\t%r, %r, %r\n", SP_REGNUM, SP_REGNUM,
9586 ARM_EH_STACKADJ_REGNUM);
9588 /* Generate the return instruction. */
9589 switch ((int) ARM_FUNC_TYPE (func_type))
9591 case ARM_FT_ISR:
9592 case ARM_FT_FIQ:
9593 asm_fprintf (f, "\tsubs\t%r, %r, #4\n", PC_REGNUM, LR_REGNUM);
9594 break;
9596 case ARM_FT_EXCEPTION:
9597 asm_fprintf (f, "\tmovs\t%r, %r\n", PC_REGNUM, LR_REGNUM);
9598 break;
9600 case ARM_FT_INTERWORKED:
9601 asm_fprintf (f, "\tbx\t%r\n", LR_REGNUM);
9602 break;
9604 default:
9605 if (arm_arch5 || arm_arch4t)
9606 asm_fprintf (f, "\tbx\t%r\n", LR_REGNUM);
9607 else
9608 asm_fprintf (f, "\tmov\t%r, %r\n", PC_REGNUM, LR_REGNUM);
9609 break;
9612 return "";
9615 static void
9616 arm_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
9617 HOST_WIDE_INT frame_size ATTRIBUTE_UNUSED)
9619 arm_stack_offsets *offsets;
9621 if (TARGET_THUMB)
9623 int regno;
9625 /* Emit any call-via-reg trampolines that are needed for v4t support
9626 of call_reg and call_value_reg type insns. */
9627 for (regno = 0; regno < SP_REGNUM; regno++)
9629 rtx label = cfun->machine->call_via[regno];
9631 if (label != NULL)
9633 function_section (current_function_decl);
9634 targetm.asm_out.internal_label (asm_out_file, "L",
9635 CODE_LABEL_NUMBER (label));
9636 asm_fprintf (asm_out_file, "\tbx\t%r\n", regno);
9640 /* ??? Probably not safe to set this here, since it assumes that a
9641 function will be emitted as assembly immediately after we generate
9642 RTL for it. This does not happen for inline functions. */
9643 return_used_this_function = 0;
9645 else
9647 /* We need to take into account any stack-frame rounding. */
9648 offsets = arm_get_frame_offsets ();
9650 if (use_return_insn (FALSE, NULL)
9651 && return_used_this_function
9652 && offsets->saved_regs != offsets->outgoing_args
9653 && !frame_pointer_needed)
9654 abort ();
9656 /* Reset the ARM-specific per-function variables. */
9657 after_arm_reorg = 0;
9661 /* Generate and emit an insn that we will recognize as a push_multi.
9662 Unfortunately, since this insn does not reflect very well the actual
9663 semantics of the operation, we need to annotate the insn for the benefit
9664 of DWARF2 frame unwind information. */
9665 static rtx
9666 emit_multi_reg_push (int mask)
9668 int num_regs = 0;
9669 int num_dwarf_regs;
9670 int i, j;
9671 rtx par;
9672 rtx dwarf;
9673 int dwarf_par_index;
9674 rtx tmp, reg;
9676 for (i = 0; i <= LAST_ARM_REGNUM; i++)
9677 if (mask & (1 << i))
9678 num_regs++;
9680 if (num_regs == 0 || num_regs > 16)
9681 abort ();
9683 /* We don't record the PC in the dwarf frame information. */
9684 num_dwarf_regs = num_regs;
9685 if (mask & (1 << PC_REGNUM))
9686 num_dwarf_regs--;
9688 /* For the body of the insn we are going to generate an UNSPEC in
9689 parallel with several USEs. This allows the insn to be recognized
9690 by the push_multi pattern in the arm.md file. The insn looks
9691 something like this:
9693 (parallel [
9694 (set (mem:BLK (pre_dec:BLK (reg:SI sp)))
9695 (unspec:BLK [(reg:SI r4)] UNSPEC_PUSH_MULT))
9696 (use (reg:SI 11 fp))
9697 (use (reg:SI 12 ip))
9698 (use (reg:SI 14 lr))
9699 (use (reg:SI 15 pc))
9702 For the frame note however, we try to be more explicit and actually
9703 show each register being stored into the stack frame, plus a (single)
9704 decrement of the stack pointer. We do it this way in order to be
9705 friendly to the stack unwinding code, which only wants to see a single
9706 stack decrement per instruction. The RTL we generate for the note looks
9707 something like this:
9709 (sequence [
9710 (set (reg:SI sp) (plus:SI (reg:SI sp) (const_int -20)))
9711 (set (mem:SI (reg:SI sp)) (reg:SI r4))
9712 (set (mem:SI (plus:SI (reg:SI sp) (const_int 4))) (reg:SI fp))
9713 (set (mem:SI (plus:SI (reg:SI sp) (const_int 8))) (reg:SI ip))
9714 (set (mem:SI (plus:SI (reg:SI sp) (const_int 12))) (reg:SI lr))
9717 This sequence is used both by the code to support stack unwinding for
9718 exceptions handlers and the code to generate dwarf2 frame debugging. */
9720 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (num_regs));
9721 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (num_dwarf_regs + 1));
9722 dwarf_par_index = 1;
9724 for (i = 0; i <= LAST_ARM_REGNUM; i++)
9726 if (mask & (1 << i))
9728 reg = gen_rtx_REG (SImode, i);
9730 XVECEXP (par, 0, 0)
9731 = gen_rtx_SET (VOIDmode,
9732 gen_rtx_MEM (BLKmode,
9733 gen_rtx_PRE_DEC (BLKmode,
9734 stack_pointer_rtx)),
9735 gen_rtx_UNSPEC (BLKmode,
9736 gen_rtvec (1, reg),
9737 UNSPEC_PUSH_MULT));
9739 if (i != PC_REGNUM)
9741 tmp = gen_rtx_SET (VOIDmode,
9742 gen_rtx_MEM (SImode, stack_pointer_rtx),
9743 reg);
9744 RTX_FRAME_RELATED_P (tmp) = 1;
9745 XVECEXP (dwarf, 0, dwarf_par_index) = tmp;
9746 dwarf_par_index++;
9749 break;
9753 for (j = 1, i++; j < num_regs; i++)
9755 if (mask & (1 << i))
9757 reg = gen_rtx_REG (SImode, i);
9759 XVECEXP (par, 0, j) = gen_rtx_USE (VOIDmode, reg);
9761 if (i != PC_REGNUM)
9763 tmp = gen_rtx_SET (VOIDmode,
9764 gen_rtx_MEM (SImode,
9765 plus_constant (stack_pointer_rtx,
9766 4 * j)),
9767 reg);
9768 RTX_FRAME_RELATED_P (tmp) = 1;
9769 XVECEXP (dwarf, 0, dwarf_par_index++) = tmp;
9772 j++;
9776 par = emit_insn (par);
9778 tmp = gen_rtx_SET (SImode,
9779 stack_pointer_rtx,
9780 gen_rtx_PLUS (SImode,
9781 stack_pointer_rtx,
9782 GEN_INT (-4 * num_regs)));
9783 RTX_FRAME_RELATED_P (tmp) = 1;
9784 XVECEXP (dwarf, 0, 0) = tmp;
9786 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
9787 REG_NOTES (par));
9788 return par;
9791 static rtx
9792 emit_sfm (int base_reg, int count)
9794 rtx par;
9795 rtx dwarf;
9796 rtx tmp, reg;
9797 int i;
9799 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
9800 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (count + 1));
9802 reg = gen_rtx_REG (XFmode, base_reg++);
9804 XVECEXP (par, 0, 0)
9805 = gen_rtx_SET (VOIDmode,
9806 gen_rtx_MEM (BLKmode,
9807 gen_rtx_PRE_DEC (BLKmode, stack_pointer_rtx)),
9808 gen_rtx_UNSPEC (BLKmode,
9809 gen_rtvec (1, reg),
9810 UNSPEC_PUSH_MULT));
9811 tmp = gen_rtx_SET (VOIDmode,
9812 gen_rtx_MEM (XFmode, stack_pointer_rtx), reg);
9813 RTX_FRAME_RELATED_P (tmp) = 1;
9814 XVECEXP (dwarf, 0, 1) = tmp;
9816 for (i = 1; i < count; i++)
9818 reg = gen_rtx_REG (XFmode, base_reg++);
9819 XVECEXP (par, 0, i) = gen_rtx_USE (VOIDmode, reg);
9821 tmp = gen_rtx_SET (VOIDmode,
9822 gen_rtx_MEM (XFmode,
9823 plus_constant (stack_pointer_rtx,
9824 i * 12)),
9825 reg);
9826 RTX_FRAME_RELATED_P (tmp) = 1;
9827 XVECEXP (dwarf, 0, i + 1) = tmp;
9830 tmp = gen_rtx_SET (VOIDmode,
9831 stack_pointer_rtx,
9832 gen_rtx_PLUS (SImode,
9833 stack_pointer_rtx,
9834 GEN_INT (-12 * count)));
9835 RTX_FRAME_RELATED_P (tmp) = 1;
9836 XVECEXP (dwarf, 0, 0) = tmp;
9838 par = emit_insn (par);
9839 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
9840 REG_NOTES (par));
9841 return par;
9845 /* Return true if the current function needs to save/restore LR. */
9847 static bool
9848 thumb_force_lr_save (void)
9850 return !cfun->machine->lr_save_eliminated
9851 && (!leaf_function_p ()
9852 || thumb_far_jump_used_p ()
9853 || regs_ever_live [LR_REGNUM]);
9857 /* Compute the distance from register FROM to register TO.
9858 These can be the arg pointer (26), the soft frame pointer (25),
9859 the stack pointer (13) or the hard frame pointer (11).
9860 In thumb mode r7 is used as the soft frame pointer, if needed.
9861 Typical stack layout looks like this:
9863 old stack pointer -> | |
9864 ----
9865 | | \
9866 | | saved arguments for
9867 | | vararg functions
9868 | | /
9870 hard FP & arg pointer -> | | \
9871 | | stack
9872 | | frame
9873 | | /
9875 | | \
9876 | | call saved
9877 | | registers
9878 soft frame pointer -> | | /
9880 | | \
9881 | | local
9882 | | variables
9883 | | /
9885 | | \
9886 | | outgoing
9887 | | arguments
9888 current stack pointer -> | | /
9891 For a given function some or all of these stack components
9892 may not be needed, giving rise to the possibility of
9893 eliminating some of the registers.
9895 The values returned by this function must reflect the behavior
9896 of arm_expand_prologue() and arm_compute_save_reg_mask().
9898 The sign of the number returned reflects the direction of stack
9899 growth, so the values are positive for all eliminations except
9900 from the soft frame pointer to the hard frame pointer.
9902 SFP may point just inside the local variables block to ensure correct
9903 alignment. */
9906 /* Calculate stack offsets. These are used to calculate register elimination
9907 offsets and in prologue/epilogue code. */
9909 static arm_stack_offsets *
9910 arm_get_frame_offsets (void)
9912 struct arm_stack_offsets *offsets;
9913 unsigned long func_type;
9914 int leaf;
9915 int saved;
9916 HOST_WIDE_INT frame_size;
9918 offsets = &cfun->machine->stack_offsets;
9920 /* We need to know if we are a leaf function. Unfortunately, it
9921 is possible to be called after start_sequence has been called,
9922 which causes get_insns to return the insns for the sequence,
9923 not the function, which will cause leaf_function_p to return
9924 the incorrect result.
9926 to know about leaf functions once reload has completed, and the
9927 frame size cannot be changed after that time, so we can safely
9928 use the cached value. */
9930 if (reload_completed)
9931 return offsets;
9933 /* Initially this is the size of the local variables. It will translated
9934 into an offset once we have determined the size of preceding data. */
9935 frame_size = ROUND_UP_WORD (get_frame_size ());
9937 leaf = leaf_function_p ();
9939 /* Space for variadic functions. */
9940 offsets->saved_args = current_function_pretend_args_size;
9942 offsets->frame = offsets->saved_args + (frame_pointer_needed ? 4 : 0);
9944 if (TARGET_ARM)
9946 unsigned int regno;
9948 saved = bit_count (arm_compute_save_reg_mask ()) * 4;
9950 /* We know that SP will be doubleword aligned on entry, and we must
9951 preserve that condition at any subroutine call. We also require the
9952 soft frame pointer to be doubleword aligned. */
9954 if (TARGET_REALLY_IWMMXT)
9956 /* Check for the call-saved iWMMXt registers. */
9957 for (regno = FIRST_IWMMXT_REGNUM;
9958 regno <= LAST_IWMMXT_REGNUM;
9959 regno++)
9960 if (regs_ever_live [regno] && ! call_used_regs [regno])
9961 saved += 8;
9964 func_type = arm_current_func_type ();
9965 if (! IS_VOLATILE (func_type))
9967 /* Space for saved FPA registers. */
9968 for (regno = FIRST_FPA_REGNUM; regno <= LAST_FPA_REGNUM; regno++)
9969 if (regs_ever_live[regno] && ! call_used_regs[regno])
9970 saved += 12;
9972 /* Space for saved VFP registers. */
9973 if (TARGET_HARD_FLOAT && TARGET_VFP)
9974 saved += arm_get_vfp_saved_size ();
9977 else /* TARGET_THUMB */
9979 saved = bit_count (thumb_compute_save_reg_mask ()) * 4;
9980 if (TARGET_BACKTRACE)
9981 saved += 16;
9984 /* Saved registers include the stack frame. */
9985 offsets->saved_regs = offsets->saved_args + saved;
9986 offsets->soft_frame = offsets->saved_regs + CALLER_INTERWORKING_SLOT_SIZE;
9987 /* A leaf function does not need any stack alignment if it has nothing
9988 on the stack. */
9989 if (leaf && frame_size == 0)
9991 offsets->outgoing_args = offsets->soft_frame;
9992 return offsets;
9995 /* Ensure SFP has the correct alignment. */
9996 if (ARM_DOUBLEWORD_ALIGN
9997 && (offsets->soft_frame & 7))
9998 offsets->soft_frame += 4;
10000 offsets->outgoing_args = offsets->soft_frame + frame_size
10001 + current_function_outgoing_args_size;
10003 if (ARM_DOUBLEWORD_ALIGN)
10005 /* Ensure SP remains doubleword aligned. */
10006 if (offsets->outgoing_args & 7)
10007 offsets->outgoing_args += 4;
10008 if (offsets->outgoing_args & 7)
10009 abort ();
10012 return offsets;
10016 /* Calculate the relative offsets for the different stack pointers. Positive
10017 offsets are in the direction of stack growth. */
10019 HOST_WIDE_INT
10020 arm_compute_initial_elimination_offset (unsigned int from, unsigned int to)
10022 arm_stack_offsets *offsets;
10024 offsets = arm_get_frame_offsets ();
10026 /* OK, now we have enough information to compute the distances.
10027 There must be an entry in these switch tables for each pair
10028 of registers in ELIMINABLE_REGS, even if some of the entries
10029 seem to be redundant or useless. */
10030 switch (from)
10032 case ARG_POINTER_REGNUM:
10033 switch (to)
10035 case THUMB_HARD_FRAME_POINTER_REGNUM:
10036 return 0;
10038 case FRAME_POINTER_REGNUM:
10039 /* This is the reverse of the soft frame pointer
10040 to hard frame pointer elimination below. */
10041 return offsets->soft_frame - offsets->saved_args;
10043 case ARM_HARD_FRAME_POINTER_REGNUM:
10044 /* If there is no stack frame then the hard
10045 frame pointer and the arg pointer coincide. */
10046 if (offsets->frame == offsets->saved_regs)
10047 return 0;
10048 /* FIXME: Not sure about this. Maybe we should always return 0 ? */
10049 return (frame_pointer_needed
10050 && cfun->static_chain_decl != NULL
10051 && ! cfun->machine->uses_anonymous_args) ? 4 : 0;
10053 case STACK_POINTER_REGNUM:
10054 /* If nothing has been pushed on the stack at all
10055 then this will return -4. This *is* correct! */
10056 return offsets->outgoing_args - (offsets->saved_args + 4);
10058 default:
10059 abort ();
10061 break;
10063 case FRAME_POINTER_REGNUM:
10064 switch (to)
10066 case THUMB_HARD_FRAME_POINTER_REGNUM:
10067 return 0;
10069 case ARM_HARD_FRAME_POINTER_REGNUM:
10070 /* The hard frame pointer points to the top entry in the
10071 stack frame. The soft frame pointer to the bottom entry
10072 in the stack frame. If there is no stack frame at all,
10073 then they are identical. */
10075 return offsets->frame - offsets->soft_frame;
10077 case STACK_POINTER_REGNUM:
10078 return offsets->outgoing_args - offsets->soft_frame;
10080 default:
10081 abort ();
10083 break;
10085 default:
10086 /* You cannot eliminate from the stack pointer.
10087 In theory you could eliminate from the hard frame
10088 pointer to the stack pointer, but this will never
10089 happen, since if a stack frame is not needed the
10090 hard frame pointer will never be used. */
10091 abort ();
10096 /* Generate the prologue instructions for entry into an ARM function. */
10097 void
10098 arm_expand_prologue (void)
10100 int reg;
10101 rtx amount;
10102 rtx insn;
10103 rtx ip_rtx;
10104 unsigned long live_regs_mask;
10105 unsigned long func_type;
10106 int fp_offset = 0;
10107 int saved_pretend_args = 0;
10108 int saved_regs = 0;
10109 unsigned HOST_WIDE_INT args_to_push;
10110 arm_stack_offsets *offsets;
10112 func_type = arm_current_func_type ();
10114 /* Naked functions don't have prologues. */
10115 if (IS_NAKED (func_type))
10116 return;
10118 /* Make a copy of c_f_p_a_s as we may need to modify it locally. */
10119 args_to_push = current_function_pretend_args_size;
10121 /* Compute which register we will have to save onto the stack. */
10122 live_regs_mask = arm_compute_save_reg_mask ();
10124 ip_rtx = gen_rtx_REG (SImode, IP_REGNUM);
10126 if (frame_pointer_needed)
10128 if (IS_INTERRUPT (func_type))
10130 /* Interrupt functions must not corrupt any registers.
10131 Creating a frame pointer however, corrupts the IP
10132 register, so we must push it first. */
10133 insn = emit_multi_reg_push (1 << IP_REGNUM);
10135 /* Do not set RTX_FRAME_RELATED_P on this insn.
10136 The dwarf stack unwinding code only wants to see one
10137 stack decrement per function, and this is not it. If
10138 this instruction is labeled as being part of the frame
10139 creation sequence then dwarf2out_frame_debug_expr will
10140 abort when it encounters the assignment of IP to FP
10141 later on, since the use of SP here establishes SP as
10142 the CFA register and not IP.
10144 Anyway this instruction is not really part of the stack
10145 frame creation although it is part of the prologue. */
10147 else if (IS_NESTED (func_type))
10149 /* The Static chain register is the same as the IP register
10150 used as a scratch register during stack frame creation.
10151 To get around this need to find somewhere to store IP
10152 whilst the frame is being created. We try the following
10153 places in order:
10155 1. The last argument register.
10156 2. A slot on the stack above the frame. (This only
10157 works if the function is not a varargs function).
10158 3. Register r3, after pushing the argument registers
10159 onto the stack.
10161 Note - we only need to tell the dwarf2 backend about the SP
10162 adjustment in the second variant; the static chain register
10163 doesn't need to be unwound, as it doesn't contain a value
10164 inherited from the caller. */
10166 if (regs_ever_live[3] == 0)
10168 insn = gen_rtx_REG (SImode, 3);
10169 insn = gen_rtx_SET (SImode, insn, ip_rtx);
10170 insn = emit_insn (insn);
10172 else if (args_to_push == 0)
10174 rtx dwarf;
10175 insn = gen_rtx_PRE_DEC (SImode, stack_pointer_rtx);
10176 insn = gen_rtx_MEM (SImode, insn);
10177 insn = gen_rtx_SET (VOIDmode, insn, ip_rtx);
10178 insn = emit_insn (insn);
10180 fp_offset = 4;
10182 /* Just tell the dwarf backend that we adjusted SP. */
10183 dwarf = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
10184 gen_rtx_PLUS (SImode, stack_pointer_rtx,
10185 GEN_INT (-fp_offset)));
10186 RTX_FRAME_RELATED_P (insn) = 1;
10187 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
10188 dwarf, REG_NOTES (insn));
10190 else
10192 /* Store the args on the stack. */
10193 if (cfun->machine->uses_anonymous_args)
10194 insn = emit_multi_reg_push
10195 ((0xf0 >> (args_to_push / 4)) & 0xf);
10196 else
10197 insn = emit_insn
10198 (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10199 GEN_INT (- args_to_push)));
10201 RTX_FRAME_RELATED_P (insn) = 1;
10203 saved_pretend_args = 1;
10204 fp_offset = args_to_push;
10205 args_to_push = 0;
10207 /* Now reuse r3 to preserve IP. */
10208 insn = gen_rtx_REG (SImode, 3);
10209 insn = gen_rtx_SET (SImode, insn, ip_rtx);
10210 (void) emit_insn (insn);
10214 if (fp_offset)
10216 insn = gen_rtx_PLUS (SImode, stack_pointer_rtx, GEN_INT (fp_offset));
10217 insn = gen_rtx_SET (SImode, ip_rtx, insn);
10219 else
10220 insn = gen_movsi (ip_rtx, stack_pointer_rtx);
10222 insn = emit_insn (insn);
10223 RTX_FRAME_RELATED_P (insn) = 1;
10226 if (args_to_push)
10228 /* Push the argument registers, or reserve space for them. */
10229 if (cfun->machine->uses_anonymous_args)
10230 insn = emit_multi_reg_push
10231 ((0xf0 >> (args_to_push / 4)) & 0xf);
10232 else
10233 insn = emit_insn
10234 (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10235 GEN_INT (- args_to_push)));
10236 RTX_FRAME_RELATED_P (insn) = 1;
10239 /* If this is an interrupt service routine, and the link register
10240 is going to be pushed, and we are not creating a stack frame,
10241 (which would involve an extra push of IP and a pop in the epilogue)
10242 subtracting four from LR now will mean that the function return
10243 can be done with a single instruction. */
10244 if ((func_type == ARM_FT_ISR || func_type == ARM_FT_FIQ)
10245 && (live_regs_mask & (1 << LR_REGNUM)) != 0
10246 && ! frame_pointer_needed)
10247 emit_insn (gen_rtx_SET (SImode,
10248 gen_rtx_REG (SImode, LR_REGNUM),
10249 gen_rtx_PLUS (SImode,
10250 gen_rtx_REG (SImode, LR_REGNUM),
10251 GEN_INT (-4))));
10253 if (live_regs_mask)
10255 insn = emit_multi_reg_push (live_regs_mask);
10256 saved_regs += bit_count (live_regs_mask) * 4;
10257 RTX_FRAME_RELATED_P (insn) = 1;
10260 if (TARGET_IWMMXT)
10261 for (reg = LAST_IWMMXT_REGNUM; reg >= FIRST_IWMMXT_REGNUM; reg--)
10262 if (regs_ever_live[reg] && ! call_used_regs [reg])
10264 insn = gen_rtx_PRE_DEC (V2SImode, stack_pointer_rtx);
10265 insn = gen_rtx_MEM (V2SImode, insn);
10266 insn = emit_insn (gen_rtx_SET (VOIDmode, insn,
10267 gen_rtx_REG (V2SImode, reg)));
10268 RTX_FRAME_RELATED_P (insn) = 1;
10269 saved_regs += 8;
10272 if (! IS_VOLATILE (func_type))
10274 int start_reg;
10276 /* Save any floating point call-saved registers used by this
10277 function. */
10278 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
10280 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
10281 if (regs_ever_live[reg] && !call_used_regs[reg])
10283 insn = gen_rtx_PRE_DEC (XFmode, stack_pointer_rtx);
10284 insn = gen_rtx_MEM (XFmode, insn);
10285 insn = emit_insn (gen_rtx_SET (VOIDmode, insn,
10286 gen_rtx_REG (XFmode, reg)));
10287 RTX_FRAME_RELATED_P (insn) = 1;
10288 saved_regs += 12;
10291 else
10293 start_reg = LAST_FPA_REGNUM;
10295 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
10297 if (regs_ever_live[reg] && !call_used_regs[reg])
10299 if (start_reg - reg == 3)
10301 insn = emit_sfm (reg, 4);
10302 RTX_FRAME_RELATED_P (insn) = 1;
10303 saved_regs += 48;
10304 start_reg = reg - 1;
10307 else
10309 if (start_reg != reg)
10311 insn = emit_sfm (reg + 1, start_reg - reg);
10312 RTX_FRAME_RELATED_P (insn) = 1;
10313 saved_regs += (start_reg - reg) * 12;
10315 start_reg = reg - 1;
10319 if (start_reg != reg)
10321 insn = emit_sfm (reg + 1, start_reg - reg);
10322 saved_regs += (start_reg - reg) * 12;
10323 RTX_FRAME_RELATED_P (insn) = 1;
10326 if (TARGET_HARD_FLOAT && TARGET_VFP)
10328 start_reg = FIRST_VFP_REGNUM;
10330 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
10332 if ((!regs_ever_live[reg] || call_used_regs[reg])
10333 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
10335 if (start_reg != reg)
10336 saved_regs += vfp_emit_fstmx (start_reg,
10337 (reg - start_reg) / 2);
10338 start_reg = reg + 2;
10341 if (start_reg != reg)
10342 saved_regs += vfp_emit_fstmx (start_reg,
10343 (reg - start_reg) / 2);
10347 if (frame_pointer_needed)
10349 /* Create the new frame pointer. */
10350 insn = GEN_INT (-(4 + args_to_push + fp_offset));
10351 insn = emit_insn (gen_addsi3 (hard_frame_pointer_rtx, ip_rtx, insn));
10352 RTX_FRAME_RELATED_P (insn) = 1;
10354 if (IS_NESTED (func_type))
10356 /* Recover the static chain register. */
10357 if (regs_ever_live [3] == 0
10358 || saved_pretend_args)
10359 insn = gen_rtx_REG (SImode, 3);
10360 else /* if (current_function_pretend_args_size == 0) */
10362 insn = gen_rtx_PLUS (SImode, hard_frame_pointer_rtx,
10363 GEN_INT (4));
10364 insn = gen_rtx_MEM (SImode, insn);
10367 emit_insn (gen_rtx_SET (SImode, ip_rtx, insn));
10368 /* Add a USE to stop propagate_one_insn() from barfing. */
10369 emit_insn (gen_prologue_use (ip_rtx));
10373 offsets = arm_get_frame_offsets ();
10374 if (offsets->outgoing_args != offsets->saved_args + saved_regs)
10376 /* This add can produce multiple insns for a large constant, so we
10377 need to get tricky. */
10378 rtx last = get_last_insn ();
10380 amount = GEN_INT (offsets->saved_args + saved_regs
10381 - offsets->outgoing_args);
10383 insn = emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10384 amount));
10387 last = last ? NEXT_INSN (last) : get_insns ();
10388 RTX_FRAME_RELATED_P (last) = 1;
10390 while (last != insn);
10392 /* If the frame pointer is needed, emit a special barrier that
10393 will prevent the scheduler from moving stores to the frame
10394 before the stack adjustment. */
10395 if (frame_pointer_needed)
10396 insn = emit_insn (gen_stack_tie (stack_pointer_rtx,
10397 hard_frame_pointer_rtx));
10401 if (flag_pic)
10402 arm_load_pic_register (INVALID_REGNUM);
10404 /* If we are profiling, make sure no instructions are scheduled before
10405 the call to mcount. Similarly if the user has requested no
10406 scheduling in the prolog. */
10407 if (current_function_profile || TARGET_NO_SCHED_PRO)
10408 emit_insn (gen_blockage ());
10410 /* If the link register is being kept alive, with the return address in it,
10411 then make sure that it does not get reused by the ce2 pass. */
10412 if ((live_regs_mask & (1 << LR_REGNUM)) == 0)
10414 emit_insn (gen_prologue_use (gen_rtx_REG (SImode, LR_REGNUM)));
10415 cfun->machine->lr_save_eliminated = 1;
10419 /* If CODE is 'd', then the X is a condition operand and the instruction
10420 should only be executed if the condition is true.
10421 if CODE is 'D', then the X is a condition operand and the instruction
10422 should only be executed if the condition is false: however, if the mode
10423 of the comparison is CCFPEmode, then always execute the instruction -- we
10424 do this because in these circumstances !GE does not necessarily imply LT;
10425 in these cases the instruction pattern will take care to make sure that
10426 an instruction containing %d will follow, thereby undoing the effects of
10427 doing this instruction unconditionally.
10428 If CODE is 'N' then X is a floating point operand that must be negated
10429 before output.
10430 If CODE is 'B' then output a bitwise inverted value of X (a const int).
10431 If X is a REG and CODE is `M', output a ldm/stm style multi-reg. */
10432 void
10433 arm_print_operand (FILE *stream, rtx x, int code)
10435 switch (code)
10437 case '@':
10438 fputs (ASM_COMMENT_START, stream);
10439 return;
10441 case '_':
10442 fputs (user_label_prefix, stream);
10443 return;
10445 case '|':
10446 fputs (REGISTER_PREFIX, stream);
10447 return;
10449 case '?':
10450 if (arm_ccfsm_state == 3 || arm_ccfsm_state == 4)
10452 if (TARGET_THUMB)
10454 output_operand_lossage ("predicated Thumb instruction");
10455 break;
10457 if (current_insn_predicate != NULL)
10459 output_operand_lossage
10460 ("predicated instruction in conditional sequence");
10461 break;
10464 fputs (arm_condition_codes[arm_current_cc], stream);
10466 else if (current_insn_predicate)
10468 enum arm_cond_code code;
10470 if (TARGET_THUMB)
10472 output_operand_lossage ("predicated Thumb instruction");
10473 break;
10476 code = get_arm_condition_code (current_insn_predicate);
10477 fputs (arm_condition_codes[code], stream);
10479 return;
10481 case 'N':
10483 REAL_VALUE_TYPE r;
10484 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
10485 r = REAL_VALUE_NEGATE (r);
10486 fprintf (stream, "%s", fp_const_from_val (&r));
10488 return;
10490 case 'B':
10491 if (GET_CODE (x) == CONST_INT)
10493 HOST_WIDE_INT val;
10494 val = ARM_SIGN_EXTEND (~INTVAL (x));
10495 fprintf (stream, HOST_WIDE_INT_PRINT_DEC, val);
10497 else
10499 putc ('~', stream);
10500 output_addr_const (stream, x);
10502 return;
10504 case 'i':
10505 fprintf (stream, "%s", arithmetic_instr (x, 1));
10506 return;
10508 /* Truncate Cirrus shift counts. */
10509 case 's':
10510 if (GET_CODE (x) == CONST_INT)
10512 fprintf (stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0x3f);
10513 return;
10515 arm_print_operand (stream, x, 0);
10516 return;
10518 case 'I':
10519 fprintf (stream, "%s", arithmetic_instr (x, 0));
10520 return;
10522 case 'S':
10524 HOST_WIDE_INT val;
10525 const char * shift = shift_op (x, &val);
10527 if (shift)
10529 fprintf (stream, ", %s ", shift_op (x, &val));
10530 if (val == -1)
10531 arm_print_operand (stream, XEXP (x, 1), 0);
10532 else
10533 fprintf (stream, "#" HOST_WIDE_INT_PRINT_DEC, val);
10536 return;
10538 /* An explanation of the 'Q', 'R' and 'H' register operands:
10540 In a pair of registers containing a DI or DF value the 'Q'
10541 operand returns the register number of the register containing
10542 the least significant part of the value. The 'R' operand returns
10543 the register number of the register containing the most
10544 significant part of the value.
10546 The 'H' operand returns the higher of the two register numbers.
10547 On a run where WORDS_BIG_ENDIAN is true the 'H' operand is the
10548 same as the 'Q' operand, since the most significant part of the
10549 value is held in the lower number register. The reverse is true
10550 on systems where WORDS_BIG_ENDIAN is false.
10552 The purpose of these operands is to distinguish between cases
10553 where the endian-ness of the values is important (for example
10554 when they are added together), and cases where the endian-ness
10555 is irrelevant, but the order of register operations is important.
10556 For example when loading a value from memory into a register
10557 pair, the endian-ness does not matter. Provided that the value
10558 from the lower memory address is put into the lower numbered
10559 register, and the value from the higher address is put into the
10560 higher numbered register, the load will work regardless of whether
10561 the value being loaded is big-wordian or little-wordian. The
10562 order of the two register loads can matter however, if the address
10563 of the memory location is actually held in one of the registers
10564 being overwritten by the load. */
10565 case 'Q':
10566 if (GET_CODE (x) != REG || REGNO (x) > LAST_ARM_REGNUM)
10568 output_operand_lossage ("invalid operand for code '%c'", code);
10569 return;
10572 asm_fprintf (stream, "%r", REGNO (x) + (WORDS_BIG_ENDIAN ? 1 : 0));
10573 return;
10575 case 'R':
10576 if (GET_CODE (x) != REG || REGNO (x) > LAST_ARM_REGNUM)
10578 output_operand_lossage ("invalid operand for code '%c'", code);
10579 return;
10582 asm_fprintf (stream, "%r", REGNO (x) + (WORDS_BIG_ENDIAN ? 0 : 1));
10583 return;
10585 case 'H':
10586 if (GET_CODE (x) != REG || REGNO (x) > LAST_ARM_REGNUM)
10588 output_operand_lossage ("invalid operand for code '%c'", code);
10589 return;
10592 asm_fprintf (stream, "%r", REGNO (x) + 1);
10593 return;
10595 case 'm':
10596 asm_fprintf (stream, "%r",
10597 GET_CODE (XEXP (x, 0)) == REG
10598 ? REGNO (XEXP (x, 0)) : REGNO (XEXP (XEXP (x, 0), 0)));
10599 return;
10601 case 'M':
10602 asm_fprintf (stream, "{%r-%r}",
10603 REGNO (x),
10604 REGNO (x) + ARM_NUM_REGS (GET_MODE (x)) - 1);
10605 return;
10607 case 'd':
10608 /* CONST_TRUE_RTX means always -- that's the default. */
10609 if (x == const_true_rtx)
10610 return;
10612 if (!COMPARISON_P (x))
10614 output_operand_lossage ("invalid operand for code '%c'", code);
10615 return;
10618 fputs (arm_condition_codes[get_arm_condition_code (x)],
10619 stream);
10620 return;
10622 case 'D':
10623 /* CONST_TRUE_RTX means not always -- i.e. never. We shouldn't ever
10624 want to do that. */
10625 if (x == const_true_rtx)
10627 output_operand_lossage ("instruction never exectued");
10628 return;
10630 if (!COMPARISON_P (x))
10632 output_operand_lossage ("invalid operand for code '%c'", code);
10633 return;
10636 fputs (arm_condition_codes[ARM_INVERSE_CONDITION_CODE
10637 (get_arm_condition_code (x))],
10638 stream);
10639 return;
10641 /* Cirrus registers can be accessed in a variety of ways:
10642 single floating point (f)
10643 double floating point (d)
10644 32bit integer (fx)
10645 64bit integer (dx). */
10646 case 'W': /* Cirrus register in F mode. */
10647 case 'X': /* Cirrus register in D mode. */
10648 case 'Y': /* Cirrus register in FX mode. */
10649 case 'Z': /* Cirrus register in DX mode. */
10650 if (GET_CODE (x) != REG || REGNO_REG_CLASS (REGNO (x)) != CIRRUS_REGS)
10651 abort ();
10653 fprintf (stream, "mv%s%s",
10654 code == 'W' ? "f"
10655 : code == 'X' ? "d"
10656 : code == 'Y' ? "fx" : "dx", reg_names[REGNO (x)] + 2);
10658 return;
10660 /* Print cirrus register in the mode specified by the register's mode. */
10661 case 'V':
10663 int mode = GET_MODE (x);
10665 if (GET_CODE (x) != REG || REGNO_REG_CLASS (REGNO (x)) != CIRRUS_REGS)
10667 output_operand_lossage ("invalid operand for code '%c'", code);
10668 return;
10671 fprintf (stream, "mv%s%s",
10672 mode == DFmode ? "d"
10673 : mode == SImode ? "fx"
10674 : mode == DImode ? "dx"
10675 : "f", reg_names[REGNO (x)] + 2);
10677 return;
10680 case 'U':
10681 if (GET_CODE (x) != REG
10682 || REGNO (x) < FIRST_IWMMXT_GR_REGNUM
10683 || REGNO (x) > LAST_IWMMXT_GR_REGNUM)
10684 /* Bad value for wCG register number. */
10686 output_operand_lossage ("invalid operand for code '%c'", code);
10687 return;
10690 else
10691 fprintf (stream, "%d", REGNO (x) - FIRST_IWMMXT_GR_REGNUM);
10692 return;
10694 /* Print an iWMMXt control register name. */
10695 case 'w':
10696 if (GET_CODE (x) != CONST_INT
10697 || INTVAL (x) < 0
10698 || INTVAL (x) >= 16)
10699 /* Bad value for wC register number. */
10701 output_operand_lossage ("invalid operand for code '%c'", code);
10702 return;
10705 else
10707 static const char * wc_reg_names [16] =
10709 "wCID", "wCon", "wCSSF", "wCASF",
10710 "wC4", "wC5", "wC6", "wC7",
10711 "wCGR0", "wCGR1", "wCGR2", "wCGR3",
10712 "wC12", "wC13", "wC14", "wC15"
10715 fprintf (stream, wc_reg_names [INTVAL (x)]);
10717 return;
10719 /* Print a VFP double precision register name. */
10720 case 'P':
10722 int mode = GET_MODE (x);
10723 int num;
10725 if (mode != DImode && mode != DFmode)
10727 output_operand_lossage ("invalid operand for code '%c'", code);
10728 return;
10731 if (GET_CODE (x) != REG
10732 || !IS_VFP_REGNUM (REGNO (x)))
10734 output_operand_lossage ("invalid operand for code '%c'", code);
10735 return;
10738 num = REGNO(x) - FIRST_VFP_REGNUM;
10739 if (num & 1)
10741 output_operand_lossage ("invalid operand for code '%c'", code);
10742 return;
10745 fprintf (stream, "d%d", num >> 1);
10747 return;
10749 default:
10750 if (x == 0)
10752 output_operand_lossage ("missing operand");
10753 return;
10756 if (GET_CODE (x) == REG)
10757 asm_fprintf (stream, "%r", REGNO (x));
10758 else if (GET_CODE (x) == MEM)
10760 output_memory_reference_mode = GET_MODE (x);
10761 output_address (XEXP (x, 0));
10763 else if (GET_CODE (x) == CONST_DOUBLE)
10764 fprintf (stream, "#%s", fp_immediate_constant (x));
10765 else if (GET_CODE (x) == NEG)
10766 abort (); /* This should never happen now. */
10767 else
10769 fputc ('#', stream);
10770 output_addr_const (stream, x);
10775 #ifndef AOF_ASSEMBLER
10776 /* Target hook for assembling integer objects. The ARM version needs to
10777 handle word-sized values specially. */
10778 static bool
10779 arm_assemble_integer (rtx x, unsigned int size, int aligned_p)
10781 if (size == UNITS_PER_WORD && aligned_p)
10783 fputs ("\t.word\t", asm_out_file);
10784 output_addr_const (asm_out_file, x);
10786 /* Mark symbols as position independent. We only do this in the
10787 .text segment, not in the .data segment. */
10788 if (NEED_GOT_RELOC && flag_pic && making_const_table &&
10789 (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF))
10791 if (GET_CODE (x) == SYMBOL_REF
10792 && (CONSTANT_POOL_ADDRESS_P (x)
10793 || SYMBOL_REF_LOCAL_P (x)))
10794 fputs ("(GOTOFF)", asm_out_file);
10795 else if (GET_CODE (x) == LABEL_REF)
10796 fputs ("(GOTOFF)", asm_out_file);
10797 else
10798 fputs ("(GOT)", asm_out_file);
10800 fputc ('\n', asm_out_file);
10801 return true;
10804 if (arm_vector_mode_supported_p (GET_MODE (x)))
10806 int i, units;
10808 if (GET_CODE (x) != CONST_VECTOR)
10809 abort ();
10811 units = CONST_VECTOR_NUNITS (x);
10813 switch (GET_MODE (x))
10815 case V2SImode: size = 4; break;
10816 case V4HImode: size = 2; break;
10817 case V8QImode: size = 1; break;
10818 default:
10819 abort ();
10822 for (i = 0; i < units; i++)
10824 rtx elt;
10826 elt = CONST_VECTOR_ELT (x, i);
10827 assemble_integer
10828 (elt, size, i == 0 ? BIGGEST_ALIGNMENT : size * BITS_PER_UNIT, 1);
10831 return true;
10834 return default_assemble_integer (x, size, aligned_p);
10836 #endif
10838 /* A finite state machine takes care of noticing whether or not instructions
10839 can be conditionally executed, and thus decrease execution time and code
10840 size by deleting branch instructions. The fsm is controlled by
10841 final_prescan_insn, and controls the actions of ASM_OUTPUT_OPCODE. */
10843 /* The state of the fsm controlling condition codes are:
10844 0: normal, do nothing special
10845 1: make ASM_OUTPUT_OPCODE not output this instruction
10846 2: make ASM_OUTPUT_OPCODE not output this instruction
10847 3: make instructions conditional
10848 4: make instructions conditional
10850 State transitions (state->state by whom under condition):
10851 0 -> 1 final_prescan_insn if the `target' is a label
10852 0 -> 2 final_prescan_insn if the `target' is an unconditional branch
10853 1 -> 3 ASM_OUTPUT_OPCODE after not having output the conditional branch
10854 2 -> 4 ASM_OUTPUT_OPCODE after not having output the conditional branch
10855 3 -> 0 (*targetm.asm_out.internal_label) if the `target' label is reached
10856 (the target label has CODE_LABEL_NUMBER equal to arm_target_label).
10857 4 -> 0 final_prescan_insn if the `target' unconditional branch is reached
10858 (the target insn is arm_target_insn).
10860 If the jump clobbers the conditions then we use states 2 and 4.
10862 A similar thing can be done with conditional return insns.
10864 XXX In case the `target' is an unconditional branch, this conditionalising
10865 of the instructions always reduces code size, but not always execution
10866 time. But then, I want to reduce the code size to somewhere near what
10867 /bin/cc produces. */
10869 /* Returns the index of the ARM condition code string in
10870 `arm_condition_codes'. COMPARISON should be an rtx like
10871 `(eq (...) (...))'. */
10872 static enum arm_cond_code
10873 get_arm_condition_code (rtx comparison)
10875 enum machine_mode mode = GET_MODE (XEXP (comparison, 0));
10876 int code;
10877 enum rtx_code comp_code = GET_CODE (comparison);
10879 if (GET_MODE_CLASS (mode) != MODE_CC)
10880 mode = SELECT_CC_MODE (comp_code, XEXP (comparison, 0),
10881 XEXP (comparison, 1));
10883 switch (mode)
10885 case CC_DNEmode: code = ARM_NE; goto dominance;
10886 case CC_DEQmode: code = ARM_EQ; goto dominance;
10887 case CC_DGEmode: code = ARM_GE; goto dominance;
10888 case CC_DGTmode: code = ARM_GT; goto dominance;
10889 case CC_DLEmode: code = ARM_LE; goto dominance;
10890 case CC_DLTmode: code = ARM_LT; goto dominance;
10891 case CC_DGEUmode: code = ARM_CS; goto dominance;
10892 case CC_DGTUmode: code = ARM_HI; goto dominance;
10893 case CC_DLEUmode: code = ARM_LS; goto dominance;
10894 case CC_DLTUmode: code = ARM_CC;
10896 dominance:
10897 if (comp_code != EQ && comp_code != NE)
10898 abort ();
10900 if (comp_code == EQ)
10901 return ARM_INVERSE_CONDITION_CODE (code);
10902 return code;
10904 case CC_NOOVmode:
10905 switch (comp_code)
10907 case NE: return ARM_NE;
10908 case EQ: return ARM_EQ;
10909 case GE: return ARM_PL;
10910 case LT: return ARM_MI;
10911 default: abort ();
10914 case CC_Zmode:
10915 switch (comp_code)
10917 case NE: return ARM_NE;
10918 case EQ: return ARM_EQ;
10919 default: abort ();
10922 case CC_Nmode:
10923 switch (comp_code)
10925 case NE: return ARM_MI;
10926 case EQ: return ARM_PL;
10927 default: abort ();
10930 case CCFPEmode:
10931 case CCFPmode:
10932 /* These encodings assume that AC=1 in the FPA system control
10933 byte. This allows us to handle all cases except UNEQ and
10934 LTGT. */
10935 switch (comp_code)
10937 case GE: return ARM_GE;
10938 case GT: return ARM_GT;
10939 case LE: return ARM_LS;
10940 case LT: return ARM_MI;
10941 case NE: return ARM_NE;
10942 case EQ: return ARM_EQ;
10943 case ORDERED: return ARM_VC;
10944 case UNORDERED: return ARM_VS;
10945 case UNLT: return ARM_LT;
10946 case UNLE: return ARM_LE;
10947 case UNGT: return ARM_HI;
10948 case UNGE: return ARM_PL;
10949 /* UNEQ and LTGT do not have a representation. */
10950 case UNEQ: /* Fall through. */
10951 case LTGT: /* Fall through. */
10952 default: abort ();
10955 case CC_SWPmode:
10956 switch (comp_code)
10958 case NE: return ARM_NE;
10959 case EQ: return ARM_EQ;
10960 case GE: return ARM_LE;
10961 case GT: return ARM_LT;
10962 case LE: return ARM_GE;
10963 case LT: return ARM_GT;
10964 case GEU: return ARM_LS;
10965 case GTU: return ARM_CC;
10966 case LEU: return ARM_CS;
10967 case LTU: return ARM_HI;
10968 default: abort ();
10971 case CC_Cmode:
10972 switch (comp_code)
10974 case LTU: return ARM_CS;
10975 case GEU: return ARM_CC;
10976 default: abort ();
10979 case CCmode:
10980 switch (comp_code)
10982 case NE: return ARM_NE;
10983 case EQ: return ARM_EQ;
10984 case GE: return ARM_GE;
10985 case GT: return ARM_GT;
10986 case LE: return ARM_LE;
10987 case LT: return ARM_LT;
10988 case GEU: return ARM_CS;
10989 case GTU: return ARM_HI;
10990 case LEU: return ARM_LS;
10991 case LTU: return ARM_CC;
10992 default: abort ();
10995 default: abort ();
10998 abort ();
11001 void
11002 arm_final_prescan_insn (rtx insn)
11004 /* BODY will hold the body of INSN. */
11005 rtx body = PATTERN (insn);
11007 /* This will be 1 if trying to repeat the trick, and things need to be
11008 reversed if it appears to fail. */
11009 int reverse = 0;
11011 /* JUMP_CLOBBERS will be one implies that the conditions if a branch is
11012 taken are clobbered, even if the rtl suggests otherwise. It also
11013 means that we have to grub around within the jump expression to find
11014 out what the conditions are when the jump isn't taken. */
11015 int jump_clobbers = 0;
11017 /* If we start with a return insn, we only succeed if we find another one. */
11018 int seeking_return = 0;
11020 /* START_INSN will hold the insn from where we start looking. This is the
11021 first insn after the following code_label if REVERSE is true. */
11022 rtx start_insn = insn;
11024 /* If in state 4, check if the target branch is reached, in order to
11025 change back to state 0. */
11026 if (arm_ccfsm_state == 4)
11028 if (insn == arm_target_insn)
11030 arm_target_insn = NULL;
11031 arm_ccfsm_state = 0;
11033 return;
11036 /* If in state 3, it is possible to repeat the trick, if this insn is an
11037 unconditional branch to a label, and immediately following this branch
11038 is the previous target label which is only used once, and the label this
11039 branch jumps to is not too far off. */
11040 if (arm_ccfsm_state == 3)
11042 if (simplejump_p (insn))
11044 start_insn = next_nonnote_insn (start_insn);
11045 if (GET_CODE (start_insn) == BARRIER)
11047 /* XXX Isn't this always a barrier? */
11048 start_insn = next_nonnote_insn (start_insn);
11050 if (GET_CODE (start_insn) == CODE_LABEL
11051 && CODE_LABEL_NUMBER (start_insn) == arm_target_label
11052 && LABEL_NUSES (start_insn) == 1)
11053 reverse = TRUE;
11054 else
11055 return;
11057 else if (GET_CODE (body) == RETURN)
11059 start_insn = next_nonnote_insn (start_insn);
11060 if (GET_CODE (start_insn) == BARRIER)
11061 start_insn = next_nonnote_insn (start_insn);
11062 if (GET_CODE (start_insn) == CODE_LABEL
11063 && CODE_LABEL_NUMBER (start_insn) == arm_target_label
11064 && LABEL_NUSES (start_insn) == 1)
11066 reverse = TRUE;
11067 seeking_return = 1;
11069 else
11070 return;
11072 else
11073 return;
11076 if (arm_ccfsm_state != 0 && !reverse)
11077 abort ();
11078 if (GET_CODE (insn) != JUMP_INSN)
11079 return;
11081 /* This jump might be paralleled with a clobber of the condition codes
11082 the jump should always come first */
11083 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) > 0)
11084 body = XVECEXP (body, 0, 0);
11086 if (reverse
11087 || (GET_CODE (body) == SET && GET_CODE (SET_DEST (body)) == PC
11088 && GET_CODE (SET_SRC (body)) == IF_THEN_ELSE))
11090 int insns_skipped;
11091 int fail = FALSE, succeed = FALSE;
11092 /* Flag which part of the IF_THEN_ELSE is the LABEL_REF. */
11093 int then_not_else = TRUE;
11094 rtx this_insn = start_insn, label = 0;
11096 /* If the jump cannot be done with one instruction, we cannot
11097 conditionally execute the instruction in the inverse case. */
11098 if (get_attr_conds (insn) == CONDS_JUMP_CLOB)
11100 jump_clobbers = 1;
11101 return;
11104 /* Register the insn jumped to. */
11105 if (reverse)
11107 if (!seeking_return)
11108 label = XEXP (SET_SRC (body), 0);
11110 else if (GET_CODE (XEXP (SET_SRC (body), 1)) == LABEL_REF)
11111 label = XEXP (XEXP (SET_SRC (body), 1), 0);
11112 else if (GET_CODE (XEXP (SET_SRC (body), 2)) == LABEL_REF)
11114 label = XEXP (XEXP (SET_SRC (body), 2), 0);
11115 then_not_else = FALSE;
11117 else if (GET_CODE (XEXP (SET_SRC (body), 1)) == RETURN)
11118 seeking_return = 1;
11119 else if (GET_CODE (XEXP (SET_SRC (body), 2)) == RETURN)
11121 seeking_return = 1;
11122 then_not_else = FALSE;
11124 else
11125 abort ();
11127 /* See how many insns this branch skips, and what kind of insns. If all
11128 insns are okay, and the label or unconditional branch to the same
11129 label is not too far away, succeed. */
11130 for (insns_skipped = 0;
11131 !fail && !succeed && insns_skipped++ < max_insns_skipped;)
11133 rtx scanbody;
11135 this_insn = next_nonnote_insn (this_insn);
11136 if (!this_insn)
11137 break;
11139 switch (GET_CODE (this_insn))
11141 case CODE_LABEL:
11142 /* Succeed if it is the target label, otherwise fail since
11143 control falls in from somewhere else. */
11144 if (this_insn == label)
11146 if (jump_clobbers)
11148 arm_ccfsm_state = 2;
11149 this_insn = next_nonnote_insn (this_insn);
11151 else
11152 arm_ccfsm_state = 1;
11153 succeed = TRUE;
11155 else
11156 fail = TRUE;
11157 break;
11159 case BARRIER:
11160 /* Succeed if the following insn is the target label.
11161 Otherwise fail.
11162 If return insns are used then the last insn in a function
11163 will be a barrier. */
11164 this_insn = next_nonnote_insn (this_insn);
11165 if (this_insn && this_insn == label)
11167 if (jump_clobbers)
11169 arm_ccfsm_state = 2;
11170 this_insn = next_nonnote_insn (this_insn);
11172 else
11173 arm_ccfsm_state = 1;
11174 succeed = TRUE;
11176 else
11177 fail = TRUE;
11178 break;
11180 case CALL_INSN:
11181 /* The AAPCS says that conditional calls should not be
11182 used since they make interworking inefficient (the
11183 linker can't transform BL<cond> into BLX). That's
11184 only a problem if the machine has BLX. */
11185 if (arm_arch5)
11187 fail = TRUE;
11188 break;
11191 /* Succeed if the following insn is the target label, or
11192 if the following two insns are a barrier and the
11193 target label. */
11194 this_insn = next_nonnote_insn (this_insn);
11195 if (this_insn && GET_CODE (this_insn) == BARRIER)
11196 this_insn = next_nonnote_insn (this_insn);
11198 if (this_insn && this_insn == label
11199 && insns_skipped < max_insns_skipped)
11201 if (jump_clobbers)
11203 arm_ccfsm_state = 2;
11204 this_insn = next_nonnote_insn (this_insn);
11206 else
11207 arm_ccfsm_state = 1;
11208 succeed = TRUE;
11210 else
11211 fail = TRUE;
11212 break;
11214 case JUMP_INSN:
11215 /* If this is an unconditional branch to the same label, succeed.
11216 If it is to another label, do nothing. If it is conditional,
11217 fail. */
11218 /* XXX Probably, the tests for SET and the PC are
11219 unnecessary. */
11221 scanbody = PATTERN (this_insn);
11222 if (GET_CODE (scanbody) == SET
11223 && GET_CODE (SET_DEST (scanbody)) == PC)
11225 if (GET_CODE (SET_SRC (scanbody)) == LABEL_REF
11226 && XEXP (SET_SRC (scanbody), 0) == label && !reverse)
11228 arm_ccfsm_state = 2;
11229 succeed = TRUE;
11231 else if (GET_CODE (SET_SRC (scanbody)) == IF_THEN_ELSE)
11232 fail = TRUE;
11234 /* Fail if a conditional return is undesirable (e.g. on a
11235 StrongARM), but still allow this if optimizing for size. */
11236 else if (GET_CODE (scanbody) == RETURN
11237 && !use_return_insn (TRUE, NULL)
11238 && !optimize_size)
11239 fail = TRUE;
11240 else if (GET_CODE (scanbody) == RETURN
11241 && seeking_return)
11243 arm_ccfsm_state = 2;
11244 succeed = TRUE;
11246 else if (GET_CODE (scanbody) == PARALLEL)
11248 switch (get_attr_conds (this_insn))
11250 case CONDS_NOCOND:
11251 break;
11252 default:
11253 fail = TRUE;
11254 break;
11257 else
11258 fail = TRUE; /* Unrecognized jump (e.g. epilogue). */
11260 break;
11262 case INSN:
11263 /* Instructions using or affecting the condition codes make it
11264 fail. */
11265 scanbody = PATTERN (this_insn);
11266 if (!(GET_CODE (scanbody) == SET
11267 || GET_CODE (scanbody) == PARALLEL)
11268 || get_attr_conds (this_insn) != CONDS_NOCOND)
11269 fail = TRUE;
11271 /* A conditional cirrus instruction must be followed by
11272 a non Cirrus instruction. However, since we
11273 conditionalize instructions in this function and by
11274 the time we get here we can't add instructions
11275 (nops), because shorten_branches() has already been
11276 called, we will disable conditionalizing Cirrus
11277 instructions to be safe. */
11278 if (GET_CODE (scanbody) != USE
11279 && GET_CODE (scanbody) != CLOBBER
11280 && get_attr_cirrus (this_insn) != CIRRUS_NOT)
11281 fail = TRUE;
11282 break;
11284 default:
11285 break;
11288 if (succeed)
11290 if ((!seeking_return) && (arm_ccfsm_state == 1 || reverse))
11291 arm_target_label = CODE_LABEL_NUMBER (label);
11292 else if (seeking_return || arm_ccfsm_state == 2)
11294 while (this_insn && GET_CODE (PATTERN (this_insn)) == USE)
11296 this_insn = next_nonnote_insn (this_insn);
11297 if (this_insn && (GET_CODE (this_insn) == BARRIER
11298 || GET_CODE (this_insn) == CODE_LABEL))
11299 abort ();
11301 if (!this_insn)
11303 /* Oh, dear! we ran off the end.. give up. */
11304 recog (PATTERN (insn), insn, NULL);
11305 arm_ccfsm_state = 0;
11306 arm_target_insn = NULL;
11307 return;
11309 arm_target_insn = this_insn;
11311 else
11312 abort ();
11313 if (jump_clobbers)
11315 if (reverse)
11316 abort ();
11317 arm_current_cc =
11318 get_arm_condition_code (XEXP (XEXP (XEXP (SET_SRC (body),
11319 0), 0), 1));
11320 if (GET_CODE (XEXP (XEXP (SET_SRC (body), 0), 0)) == AND)
11321 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
11322 if (GET_CODE (XEXP (SET_SRC (body), 0)) == NE)
11323 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
11325 else
11327 /* If REVERSE is true, ARM_CURRENT_CC needs to be inverted from
11328 what it was. */
11329 if (!reverse)
11330 arm_current_cc = get_arm_condition_code (XEXP (SET_SRC (body),
11331 0));
11334 if (reverse || then_not_else)
11335 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
11338 /* Restore recog_data (getting the attributes of other insns can
11339 destroy this array, but final.c assumes that it remains intact
11340 across this call; since the insn has been recognized already we
11341 call recog direct). */
11342 recog (PATTERN (insn), insn, NULL);
11346 /* Returns true if REGNO is a valid register
11347 for holding a quantity of type MODE. */
11349 arm_hard_regno_mode_ok (unsigned int regno, enum machine_mode mode)
11351 if (GET_MODE_CLASS (mode) == MODE_CC)
11352 return regno == CC_REGNUM || regno == VFPCC_REGNUM;
11354 if (TARGET_THUMB)
11355 /* For the Thumb we only allow values bigger than SImode in
11356 registers 0 - 6, so that there is always a second low
11357 register available to hold the upper part of the value.
11358 We probably we ought to ensure that the register is the
11359 start of an even numbered register pair. */
11360 return (ARM_NUM_REGS (mode) < 2) || (regno < LAST_LO_REGNUM);
11362 if (IS_CIRRUS_REGNUM (regno))
11363 /* We have outlawed SI values in Cirrus registers because they
11364 reside in the lower 32 bits, but SF values reside in the
11365 upper 32 bits. This causes gcc all sorts of grief. We can't
11366 even split the registers into pairs because Cirrus SI values
11367 get sign extended to 64bits-- aldyh. */
11368 return (GET_MODE_CLASS (mode) == MODE_FLOAT) || (mode == DImode);
11370 if (IS_VFP_REGNUM (regno))
11372 if (mode == SFmode || mode == SImode)
11373 return TRUE;
11375 /* DFmode values are only valid in even register pairs. */
11376 if (mode == DFmode)
11377 return ((regno - FIRST_VFP_REGNUM) & 1) == 0;
11378 return FALSE;
11381 if (IS_IWMMXT_GR_REGNUM (regno))
11382 return mode == SImode;
11384 if (IS_IWMMXT_REGNUM (regno))
11385 return VALID_IWMMXT_REG_MODE (mode);
11387 /* We allow any value to be stored in the general registers.
11388 Restrict doubleword quantities to even register pairs so that we can
11389 use ldrd. */
11390 if (regno <= LAST_ARM_REGNUM)
11391 return !(TARGET_LDRD && GET_MODE_SIZE (mode) > 4 && (regno & 1) != 0);
11393 if ( regno == FRAME_POINTER_REGNUM
11394 || regno == ARG_POINTER_REGNUM)
11395 /* We only allow integers in the fake hard registers. */
11396 return GET_MODE_CLASS (mode) == MODE_INT;
11398 /* The only registers left are the FPA registers
11399 which we only allow to hold FP values. */
11400 return GET_MODE_CLASS (mode) == MODE_FLOAT
11401 && regno >= FIRST_FPA_REGNUM
11402 && regno <= LAST_FPA_REGNUM;
11406 arm_regno_class (int regno)
11408 if (TARGET_THUMB)
11410 if (regno == STACK_POINTER_REGNUM)
11411 return STACK_REG;
11412 if (regno == CC_REGNUM)
11413 return CC_REG;
11414 if (regno < 8)
11415 return LO_REGS;
11416 return HI_REGS;
11419 if ( regno <= LAST_ARM_REGNUM
11420 || regno == FRAME_POINTER_REGNUM
11421 || regno == ARG_POINTER_REGNUM)
11422 return GENERAL_REGS;
11424 if (regno == CC_REGNUM || regno == VFPCC_REGNUM)
11425 return NO_REGS;
11427 if (IS_CIRRUS_REGNUM (regno))
11428 return CIRRUS_REGS;
11430 if (IS_VFP_REGNUM (regno))
11431 return VFP_REGS;
11433 if (IS_IWMMXT_REGNUM (regno))
11434 return IWMMXT_REGS;
11436 if (IS_IWMMXT_GR_REGNUM (regno))
11437 return IWMMXT_GR_REGS;
11439 return FPA_REGS;
11442 /* Handle a special case when computing the offset
11443 of an argument from the frame pointer. */
11445 arm_debugger_arg_offset (int value, rtx addr)
11447 rtx insn;
11449 /* We are only interested if dbxout_parms() failed to compute the offset. */
11450 if (value != 0)
11451 return 0;
11453 /* We can only cope with the case where the address is held in a register. */
11454 if (GET_CODE (addr) != REG)
11455 return 0;
11457 /* If we are using the frame pointer to point at the argument, then
11458 an offset of 0 is correct. */
11459 if (REGNO (addr) == (unsigned) HARD_FRAME_POINTER_REGNUM)
11460 return 0;
11462 /* If we are using the stack pointer to point at the
11463 argument, then an offset of 0 is correct. */
11464 if ((TARGET_THUMB || !frame_pointer_needed)
11465 && REGNO (addr) == SP_REGNUM)
11466 return 0;
11468 /* Oh dear. The argument is pointed to by a register rather
11469 than being held in a register, or being stored at a known
11470 offset from the frame pointer. Since GDB only understands
11471 those two kinds of argument we must translate the address
11472 held in the register into an offset from the frame pointer.
11473 We do this by searching through the insns for the function
11474 looking to see where this register gets its value. If the
11475 register is initialized from the frame pointer plus an offset
11476 then we are in luck and we can continue, otherwise we give up.
11478 This code is exercised by producing debugging information
11479 for a function with arguments like this:
11481 double func (double a, double b, int c, double d) {return d;}
11483 Without this code the stab for parameter 'd' will be set to
11484 an offset of 0 from the frame pointer, rather than 8. */
11486 /* The if() statement says:
11488 If the insn is a normal instruction
11489 and if the insn is setting the value in a register
11490 and if the register being set is the register holding the address of the argument
11491 and if the address is computing by an addition
11492 that involves adding to a register
11493 which is the frame pointer
11494 a constant integer
11496 then... */
11498 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
11500 if ( GET_CODE (insn) == INSN
11501 && GET_CODE (PATTERN (insn)) == SET
11502 && REGNO (XEXP (PATTERN (insn), 0)) == REGNO (addr)
11503 && GET_CODE (XEXP (PATTERN (insn), 1)) == PLUS
11504 && GET_CODE (XEXP (XEXP (PATTERN (insn), 1), 0)) == REG
11505 && REGNO (XEXP (XEXP (PATTERN (insn), 1), 0)) == (unsigned) HARD_FRAME_POINTER_REGNUM
11506 && GET_CODE (XEXP (XEXP (PATTERN (insn), 1), 1)) == CONST_INT
11509 value = INTVAL (XEXP (XEXP (PATTERN (insn), 1), 1));
11511 break;
11515 if (value == 0)
11517 debug_rtx (addr);
11518 warning ("unable to compute real location of stacked parameter");
11519 value = 8; /* XXX magic hack */
11522 return value;
11525 #define def_mbuiltin(MASK, NAME, TYPE, CODE) \
11526 do \
11528 if ((MASK) & insn_flags) \
11529 lang_hooks.builtin_function ((NAME), (TYPE), (CODE), \
11530 BUILT_IN_MD, NULL, NULL_TREE); \
11532 while (0)
11534 struct builtin_description
11536 const unsigned int mask;
11537 const enum insn_code icode;
11538 const char * const name;
11539 const enum arm_builtins code;
11540 const enum rtx_code comparison;
11541 const unsigned int flag;
11544 static const struct builtin_description bdesc_2arg[] =
11546 #define IWMMXT_BUILTIN(code, string, builtin) \
11547 { FL_IWMMXT, CODE_FOR_##code, "__builtin_arm_" string, \
11548 ARM_BUILTIN_##builtin, 0, 0 },
11550 IWMMXT_BUILTIN (addv8qi3, "waddb", WADDB)
11551 IWMMXT_BUILTIN (addv4hi3, "waddh", WADDH)
11552 IWMMXT_BUILTIN (addv2si3, "waddw", WADDW)
11553 IWMMXT_BUILTIN (subv8qi3, "wsubb", WSUBB)
11554 IWMMXT_BUILTIN (subv4hi3, "wsubh", WSUBH)
11555 IWMMXT_BUILTIN (subv2si3, "wsubw", WSUBW)
11556 IWMMXT_BUILTIN (ssaddv8qi3, "waddbss", WADDSSB)
11557 IWMMXT_BUILTIN (ssaddv4hi3, "waddhss", WADDSSH)
11558 IWMMXT_BUILTIN (ssaddv2si3, "waddwss", WADDSSW)
11559 IWMMXT_BUILTIN (sssubv8qi3, "wsubbss", WSUBSSB)
11560 IWMMXT_BUILTIN (sssubv4hi3, "wsubhss", WSUBSSH)
11561 IWMMXT_BUILTIN (sssubv2si3, "wsubwss", WSUBSSW)
11562 IWMMXT_BUILTIN (usaddv8qi3, "waddbus", WADDUSB)
11563 IWMMXT_BUILTIN (usaddv4hi3, "waddhus", WADDUSH)
11564 IWMMXT_BUILTIN (usaddv2si3, "waddwus", WADDUSW)
11565 IWMMXT_BUILTIN (ussubv8qi3, "wsubbus", WSUBUSB)
11566 IWMMXT_BUILTIN (ussubv4hi3, "wsubhus", WSUBUSH)
11567 IWMMXT_BUILTIN (ussubv2si3, "wsubwus", WSUBUSW)
11568 IWMMXT_BUILTIN (mulv4hi3, "wmulul", WMULUL)
11569 IWMMXT_BUILTIN (smulv4hi3_highpart, "wmulsm", WMULSM)
11570 IWMMXT_BUILTIN (umulv4hi3_highpart, "wmulum", WMULUM)
11571 IWMMXT_BUILTIN (eqv8qi3, "wcmpeqb", WCMPEQB)
11572 IWMMXT_BUILTIN (eqv4hi3, "wcmpeqh", WCMPEQH)
11573 IWMMXT_BUILTIN (eqv2si3, "wcmpeqw", WCMPEQW)
11574 IWMMXT_BUILTIN (gtuv8qi3, "wcmpgtub", WCMPGTUB)
11575 IWMMXT_BUILTIN (gtuv4hi3, "wcmpgtuh", WCMPGTUH)
11576 IWMMXT_BUILTIN (gtuv2si3, "wcmpgtuw", WCMPGTUW)
11577 IWMMXT_BUILTIN (gtv8qi3, "wcmpgtsb", WCMPGTSB)
11578 IWMMXT_BUILTIN (gtv4hi3, "wcmpgtsh", WCMPGTSH)
11579 IWMMXT_BUILTIN (gtv2si3, "wcmpgtsw", WCMPGTSW)
11580 IWMMXT_BUILTIN (umaxv8qi3, "wmaxub", WMAXUB)
11581 IWMMXT_BUILTIN (smaxv8qi3, "wmaxsb", WMAXSB)
11582 IWMMXT_BUILTIN (umaxv4hi3, "wmaxuh", WMAXUH)
11583 IWMMXT_BUILTIN (smaxv4hi3, "wmaxsh", WMAXSH)
11584 IWMMXT_BUILTIN (umaxv2si3, "wmaxuw", WMAXUW)
11585 IWMMXT_BUILTIN (smaxv2si3, "wmaxsw", WMAXSW)
11586 IWMMXT_BUILTIN (uminv8qi3, "wminub", WMINUB)
11587 IWMMXT_BUILTIN (sminv8qi3, "wminsb", WMINSB)
11588 IWMMXT_BUILTIN (uminv4hi3, "wminuh", WMINUH)
11589 IWMMXT_BUILTIN (sminv4hi3, "wminsh", WMINSH)
11590 IWMMXT_BUILTIN (uminv2si3, "wminuw", WMINUW)
11591 IWMMXT_BUILTIN (sminv2si3, "wminsw", WMINSW)
11592 IWMMXT_BUILTIN (iwmmxt_anddi3, "wand", WAND)
11593 IWMMXT_BUILTIN (iwmmxt_nanddi3, "wandn", WANDN)
11594 IWMMXT_BUILTIN (iwmmxt_iordi3, "wor", WOR)
11595 IWMMXT_BUILTIN (iwmmxt_xordi3, "wxor", WXOR)
11596 IWMMXT_BUILTIN (iwmmxt_uavgv8qi3, "wavg2b", WAVG2B)
11597 IWMMXT_BUILTIN (iwmmxt_uavgv4hi3, "wavg2h", WAVG2H)
11598 IWMMXT_BUILTIN (iwmmxt_uavgrndv8qi3, "wavg2br", WAVG2BR)
11599 IWMMXT_BUILTIN (iwmmxt_uavgrndv4hi3, "wavg2hr", WAVG2HR)
11600 IWMMXT_BUILTIN (iwmmxt_wunpckilb, "wunpckilb", WUNPCKILB)
11601 IWMMXT_BUILTIN (iwmmxt_wunpckilh, "wunpckilh", WUNPCKILH)
11602 IWMMXT_BUILTIN (iwmmxt_wunpckilw, "wunpckilw", WUNPCKILW)
11603 IWMMXT_BUILTIN (iwmmxt_wunpckihb, "wunpckihb", WUNPCKIHB)
11604 IWMMXT_BUILTIN (iwmmxt_wunpckihh, "wunpckihh", WUNPCKIHH)
11605 IWMMXT_BUILTIN (iwmmxt_wunpckihw, "wunpckihw", WUNPCKIHW)
11606 IWMMXT_BUILTIN (iwmmxt_wmadds, "wmadds", WMADDS)
11607 IWMMXT_BUILTIN (iwmmxt_wmaddu, "wmaddu", WMADDU)
11609 #define IWMMXT_BUILTIN2(code, builtin) \
11610 { FL_IWMMXT, CODE_FOR_##code, NULL, ARM_BUILTIN_##builtin, 0, 0 },
11612 IWMMXT_BUILTIN2 (iwmmxt_wpackhss, WPACKHSS)
11613 IWMMXT_BUILTIN2 (iwmmxt_wpackwss, WPACKWSS)
11614 IWMMXT_BUILTIN2 (iwmmxt_wpackdss, WPACKDSS)
11615 IWMMXT_BUILTIN2 (iwmmxt_wpackhus, WPACKHUS)
11616 IWMMXT_BUILTIN2 (iwmmxt_wpackwus, WPACKWUS)
11617 IWMMXT_BUILTIN2 (iwmmxt_wpackdus, WPACKDUS)
11618 IWMMXT_BUILTIN2 (ashlv4hi3_di, WSLLH)
11619 IWMMXT_BUILTIN2 (ashlv4hi3, WSLLHI)
11620 IWMMXT_BUILTIN2 (ashlv2si3_di, WSLLW)
11621 IWMMXT_BUILTIN2 (ashlv2si3, WSLLWI)
11622 IWMMXT_BUILTIN2 (ashldi3_di, WSLLD)
11623 IWMMXT_BUILTIN2 (ashldi3_iwmmxt, WSLLDI)
11624 IWMMXT_BUILTIN2 (lshrv4hi3_di, WSRLH)
11625 IWMMXT_BUILTIN2 (lshrv4hi3, WSRLHI)
11626 IWMMXT_BUILTIN2 (lshrv2si3_di, WSRLW)
11627 IWMMXT_BUILTIN2 (lshrv2si3, WSRLWI)
11628 IWMMXT_BUILTIN2 (lshrdi3_di, WSRLD)
11629 IWMMXT_BUILTIN2 (lshrdi3_iwmmxt, WSRLDI)
11630 IWMMXT_BUILTIN2 (ashrv4hi3_di, WSRAH)
11631 IWMMXT_BUILTIN2 (ashrv4hi3, WSRAHI)
11632 IWMMXT_BUILTIN2 (ashrv2si3_di, WSRAW)
11633 IWMMXT_BUILTIN2 (ashrv2si3, WSRAWI)
11634 IWMMXT_BUILTIN2 (ashrdi3_di, WSRAD)
11635 IWMMXT_BUILTIN2 (ashrdi3_iwmmxt, WSRADI)
11636 IWMMXT_BUILTIN2 (rorv4hi3_di, WRORH)
11637 IWMMXT_BUILTIN2 (rorv4hi3, WRORHI)
11638 IWMMXT_BUILTIN2 (rorv2si3_di, WRORW)
11639 IWMMXT_BUILTIN2 (rorv2si3, WRORWI)
11640 IWMMXT_BUILTIN2 (rordi3_di, WRORD)
11641 IWMMXT_BUILTIN2 (rordi3, WRORDI)
11642 IWMMXT_BUILTIN2 (iwmmxt_wmacuz, WMACUZ)
11643 IWMMXT_BUILTIN2 (iwmmxt_wmacsz, WMACSZ)
11646 static const struct builtin_description bdesc_1arg[] =
11648 IWMMXT_BUILTIN (iwmmxt_tmovmskb, "tmovmskb", TMOVMSKB)
11649 IWMMXT_BUILTIN (iwmmxt_tmovmskh, "tmovmskh", TMOVMSKH)
11650 IWMMXT_BUILTIN (iwmmxt_tmovmskw, "tmovmskw", TMOVMSKW)
11651 IWMMXT_BUILTIN (iwmmxt_waccb, "waccb", WACCB)
11652 IWMMXT_BUILTIN (iwmmxt_wacch, "wacch", WACCH)
11653 IWMMXT_BUILTIN (iwmmxt_waccw, "waccw", WACCW)
11654 IWMMXT_BUILTIN (iwmmxt_wunpckehub, "wunpckehub", WUNPCKEHUB)
11655 IWMMXT_BUILTIN (iwmmxt_wunpckehuh, "wunpckehuh", WUNPCKEHUH)
11656 IWMMXT_BUILTIN (iwmmxt_wunpckehuw, "wunpckehuw", WUNPCKEHUW)
11657 IWMMXT_BUILTIN (iwmmxt_wunpckehsb, "wunpckehsb", WUNPCKEHSB)
11658 IWMMXT_BUILTIN (iwmmxt_wunpckehsh, "wunpckehsh", WUNPCKEHSH)
11659 IWMMXT_BUILTIN (iwmmxt_wunpckehsw, "wunpckehsw", WUNPCKEHSW)
11660 IWMMXT_BUILTIN (iwmmxt_wunpckelub, "wunpckelub", WUNPCKELUB)
11661 IWMMXT_BUILTIN (iwmmxt_wunpckeluh, "wunpckeluh", WUNPCKELUH)
11662 IWMMXT_BUILTIN (iwmmxt_wunpckeluw, "wunpckeluw", WUNPCKELUW)
11663 IWMMXT_BUILTIN (iwmmxt_wunpckelsb, "wunpckelsb", WUNPCKELSB)
11664 IWMMXT_BUILTIN (iwmmxt_wunpckelsh, "wunpckelsh", WUNPCKELSH)
11665 IWMMXT_BUILTIN (iwmmxt_wunpckelsw, "wunpckelsw", WUNPCKELSW)
11668 /* Set up all the iWMMXt builtins. This is
11669 not called if TARGET_IWMMXT is zero. */
11671 static void
11672 arm_init_iwmmxt_builtins (void)
11674 const struct builtin_description * d;
11675 size_t i;
11676 tree endlink = void_list_node;
11678 tree V2SI_type_node = build_vector_type_for_mode (intSI_type_node, V2SImode);
11679 tree V4HI_type_node = build_vector_type_for_mode (intHI_type_node, V4HImode);
11680 tree V8QI_type_node = build_vector_type_for_mode (intQI_type_node, V8QImode);
11682 tree int_ftype_int
11683 = build_function_type (integer_type_node,
11684 tree_cons (NULL_TREE, integer_type_node, endlink));
11685 tree v8qi_ftype_v8qi_v8qi_int
11686 = build_function_type (V8QI_type_node,
11687 tree_cons (NULL_TREE, V8QI_type_node,
11688 tree_cons (NULL_TREE, V8QI_type_node,
11689 tree_cons (NULL_TREE,
11690 integer_type_node,
11691 endlink))));
11692 tree v4hi_ftype_v4hi_int
11693 = build_function_type (V4HI_type_node,
11694 tree_cons (NULL_TREE, V4HI_type_node,
11695 tree_cons (NULL_TREE, integer_type_node,
11696 endlink)));
11697 tree v2si_ftype_v2si_int
11698 = build_function_type (V2SI_type_node,
11699 tree_cons (NULL_TREE, V2SI_type_node,
11700 tree_cons (NULL_TREE, integer_type_node,
11701 endlink)));
11702 tree v2si_ftype_di_di
11703 = build_function_type (V2SI_type_node,
11704 tree_cons (NULL_TREE, long_long_integer_type_node,
11705 tree_cons (NULL_TREE, long_long_integer_type_node,
11706 endlink)));
11707 tree di_ftype_di_int
11708 = build_function_type (long_long_integer_type_node,
11709 tree_cons (NULL_TREE, long_long_integer_type_node,
11710 tree_cons (NULL_TREE, integer_type_node,
11711 endlink)));
11712 tree di_ftype_di_int_int
11713 = build_function_type (long_long_integer_type_node,
11714 tree_cons (NULL_TREE, long_long_integer_type_node,
11715 tree_cons (NULL_TREE, integer_type_node,
11716 tree_cons (NULL_TREE,
11717 integer_type_node,
11718 endlink))));
11719 tree int_ftype_v8qi
11720 = build_function_type (integer_type_node,
11721 tree_cons (NULL_TREE, V8QI_type_node,
11722 endlink));
11723 tree int_ftype_v4hi
11724 = build_function_type (integer_type_node,
11725 tree_cons (NULL_TREE, V4HI_type_node,
11726 endlink));
11727 tree int_ftype_v2si
11728 = build_function_type (integer_type_node,
11729 tree_cons (NULL_TREE, V2SI_type_node,
11730 endlink));
11731 tree int_ftype_v8qi_int
11732 = build_function_type (integer_type_node,
11733 tree_cons (NULL_TREE, V8QI_type_node,
11734 tree_cons (NULL_TREE, integer_type_node,
11735 endlink)));
11736 tree int_ftype_v4hi_int
11737 = build_function_type (integer_type_node,
11738 tree_cons (NULL_TREE, V4HI_type_node,
11739 tree_cons (NULL_TREE, integer_type_node,
11740 endlink)));
11741 tree int_ftype_v2si_int
11742 = build_function_type (integer_type_node,
11743 tree_cons (NULL_TREE, V2SI_type_node,
11744 tree_cons (NULL_TREE, integer_type_node,
11745 endlink)));
11746 tree v8qi_ftype_v8qi_int_int
11747 = build_function_type (V8QI_type_node,
11748 tree_cons (NULL_TREE, V8QI_type_node,
11749 tree_cons (NULL_TREE, integer_type_node,
11750 tree_cons (NULL_TREE,
11751 integer_type_node,
11752 endlink))));
11753 tree v4hi_ftype_v4hi_int_int
11754 = build_function_type (V4HI_type_node,
11755 tree_cons (NULL_TREE, V4HI_type_node,
11756 tree_cons (NULL_TREE, integer_type_node,
11757 tree_cons (NULL_TREE,
11758 integer_type_node,
11759 endlink))));
11760 tree v2si_ftype_v2si_int_int
11761 = build_function_type (V2SI_type_node,
11762 tree_cons (NULL_TREE, V2SI_type_node,
11763 tree_cons (NULL_TREE, integer_type_node,
11764 tree_cons (NULL_TREE,
11765 integer_type_node,
11766 endlink))));
11767 /* Miscellaneous. */
11768 tree v8qi_ftype_v4hi_v4hi
11769 = build_function_type (V8QI_type_node,
11770 tree_cons (NULL_TREE, V4HI_type_node,
11771 tree_cons (NULL_TREE, V4HI_type_node,
11772 endlink)));
11773 tree v4hi_ftype_v2si_v2si
11774 = build_function_type (V4HI_type_node,
11775 tree_cons (NULL_TREE, V2SI_type_node,
11776 tree_cons (NULL_TREE, V2SI_type_node,
11777 endlink)));
11778 tree v2si_ftype_v4hi_v4hi
11779 = build_function_type (V2SI_type_node,
11780 tree_cons (NULL_TREE, V4HI_type_node,
11781 tree_cons (NULL_TREE, V4HI_type_node,
11782 endlink)));
11783 tree v2si_ftype_v8qi_v8qi
11784 = build_function_type (V2SI_type_node,
11785 tree_cons (NULL_TREE, V8QI_type_node,
11786 tree_cons (NULL_TREE, V8QI_type_node,
11787 endlink)));
11788 tree v4hi_ftype_v4hi_di
11789 = build_function_type (V4HI_type_node,
11790 tree_cons (NULL_TREE, V4HI_type_node,
11791 tree_cons (NULL_TREE,
11792 long_long_integer_type_node,
11793 endlink)));
11794 tree v2si_ftype_v2si_di
11795 = build_function_type (V2SI_type_node,
11796 tree_cons (NULL_TREE, V2SI_type_node,
11797 tree_cons (NULL_TREE,
11798 long_long_integer_type_node,
11799 endlink)));
11800 tree void_ftype_int_int
11801 = build_function_type (void_type_node,
11802 tree_cons (NULL_TREE, integer_type_node,
11803 tree_cons (NULL_TREE, integer_type_node,
11804 endlink)));
11805 tree di_ftype_void
11806 = build_function_type (long_long_unsigned_type_node, endlink);
11807 tree di_ftype_v8qi
11808 = build_function_type (long_long_integer_type_node,
11809 tree_cons (NULL_TREE, V8QI_type_node,
11810 endlink));
11811 tree di_ftype_v4hi
11812 = build_function_type (long_long_integer_type_node,
11813 tree_cons (NULL_TREE, V4HI_type_node,
11814 endlink));
11815 tree di_ftype_v2si
11816 = build_function_type (long_long_integer_type_node,
11817 tree_cons (NULL_TREE, V2SI_type_node,
11818 endlink));
11819 tree v2si_ftype_v4hi
11820 = build_function_type (V2SI_type_node,
11821 tree_cons (NULL_TREE, V4HI_type_node,
11822 endlink));
11823 tree v4hi_ftype_v8qi
11824 = build_function_type (V4HI_type_node,
11825 tree_cons (NULL_TREE, V8QI_type_node,
11826 endlink));
11828 tree di_ftype_di_v4hi_v4hi
11829 = build_function_type (long_long_unsigned_type_node,
11830 tree_cons (NULL_TREE,
11831 long_long_unsigned_type_node,
11832 tree_cons (NULL_TREE, V4HI_type_node,
11833 tree_cons (NULL_TREE,
11834 V4HI_type_node,
11835 endlink))));
11837 tree di_ftype_v4hi_v4hi
11838 = build_function_type (long_long_unsigned_type_node,
11839 tree_cons (NULL_TREE, V4HI_type_node,
11840 tree_cons (NULL_TREE, V4HI_type_node,
11841 endlink)));
11843 /* Normal vector binops. */
11844 tree v8qi_ftype_v8qi_v8qi
11845 = build_function_type (V8QI_type_node,
11846 tree_cons (NULL_TREE, V8QI_type_node,
11847 tree_cons (NULL_TREE, V8QI_type_node,
11848 endlink)));
11849 tree v4hi_ftype_v4hi_v4hi
11850 = build_function_type (V4HI_type_node,
11851 tree_cons (NULL_TREE, V4HI_type_node,
11852 tree_cons (NULL_TREE, V4HI_type_node,
11853 endlink)));
11854 tree v2si_ftype_v2si_v2si
11855 = build_function_type (V2SI_type_node,
11856 tree_cons (NULL_TREE, V2SI_type_node,
11857 tree_cons (NULL_TREE, V2SI_type_node,
11858 endlink)));
11859 tree di_ftype_di_di
11860 = build_function_type (long_long_unsigned_type_node,
11861 tree_cons (NULL_TREE, long_long_unsigned_type_node,
11862 tree_cons (NULL_TREE,
11863 long_long_unsigned_type_node,
11864 endlink)));
11866 /* Add all builtins that are more or less simple operations on two
11867 operands. */
11868 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
11870 /* Use one of the operands; the target can have a different mode for
11871 mask-generating compares. */
11872 enum machine_mode mode;
11873 tree type;
11875 if (d->name == 0)
11876 continue;
11878 mode = insn_data[d->icode].operand[1].mode;
11880 switch (mode)
11882 case V8QImode:
11883 type = v8qi_ftype_v8qi_v8qi;
11884 break;
11885 case V4HImode:
11886 type = v4hi_ftype_v4hi_v4hi;
11887 break;
11888 case V2SImode:
11889 type = v2si_ftype_v2si_v2si;
11890 break;
11891 case DImode:
11892 type = di_ftype_di_di;
11893 break;
11895 default:
11896 abort ();
11899 def_mbuiltin (d->mask, d->name, type, d->code);
11902 /* Add the remaining MMX insns with somewhat more complicated types. */
11903 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wzero", di_ftype_void, ARM_BUILTIN_WZERO);
11904 def_mbuiltin (FL_IWMMXT, "__builtin_arm_setwcx", void_ftype_int_int, ARM_BUILTIN_SETWCX);
11905 def_mbuiltin (FL_IWMMXT, "__builtin_arm_getwcx", int_ftype_int, ARM_BUILTIN_GETWCX);
11907 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSLLH);
11908 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllw", v2si_ftype_v2si_di, ARM_BUILTIN_WSLLW);
11909 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wslld", di_ftype_di_di, ARM_BUILTIN_WSLLD);
11910 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSLLHI);
11911 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllwi", v2si_ftype_v2si_int, ARM_BUILTIN_WSLLWI);
11912 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wslldi", di_ftype_di_int, ARM_BUILTIN_WSLLDI);
11914 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSRLH);
11915 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlw", v2si_ftype_v2si_di, ARM_BUILTIN_WSRLW);
11916 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrld", di_ftype_di_di, ARM_BUILTIN_WSRLD);
11917 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSRLHI);
11918 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlwi", v2si_ftype_v2si_int, ARM_BUILTIN_WSRLWI);
11919 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrldi", di_ftype_di_int, ARM_BUILTIN_WSRLDI);
11921 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrah", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSRAH);
11922 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsraw", v2si_ftype_v2si_di, ARM_BUILTIN_WSRAW);
11923 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrad", di_ftype_di_di, ARM_BUILTIN_WSRAD);
11924 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrahi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSRAHI);
11925 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrawi", v2si_ftype_v2si_int, ARM_BUILTIN_WSRAWI);
11926 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsradi", di_ftype_di_int, ARM_BUILTIN_WSRADI);
11928 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WRORH);
11929 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorw", v2si_ftype_v2si_di, ARM_BUILTIN_WRORW);
11930 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrord", di_ftype_di_di, ARM_BUILTIN_WRORD);
11931 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WRORHI);
11932 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorwi", v2si_ftype_v2si_int, ARM_BUILTIN_WRORWI);
11933 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrordi", di_ftype_di_int, ARM_BUILTIN_WRORDI);
11935 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wshufh", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSHUFH);
11937 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadb", v2si_ftype_v8qi_v8qi, ARM_BUILTIN_WSADB);
11938 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadh", v2si_ftype_v4hi_v4hi, ARM_BUILTIN_WSADH);
11939 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadbz", v2si_ftype_v8qi_v8qi, ARM_BUILTIN_WSADBZ);
11940 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadhz", v2si_ftype_v4hi_v4hi, ARM_BUILTIN_WSADHZ);
11942 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsb", int_ftype_v8qi_int, ARM_BUILTIN_TEXTRMSB);
11943 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsh", int_ftype_v4hi_int, ARM_BUILTIN_TEXTRMSH);
11944 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsw", int_ftype_v2si_int, ARM_BUILTIN_TEXTRMSW);
11945 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmub", int_ftype_v8qi_int, ARM_BUILTIN_TEXTRMUB);
11946 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmuh", int_ftype_v4hi_int, ARM_BUILTIN_TEXTRMUH);
11947 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmuw", int_ftype_v2si_int, ARM_BUILTIN_TEXTRMUW);
11948 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrb", v8qi_ftype_v8qi_int_int, ARM_BUILTIN_TINSRB);
11949 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrh", v4hi_ftype_v4hi_int_int, ARM_BUILTIN_TINSRH);
11950 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrw", v2si_ftype_v2si_int_int, ARM_BUILTIN_TINSRW);
11952 def_mbuiltin (FL_IWMMXT, "__builtin_arm_waccb", di_ftype_v8qi, ARM_BUILTIN_WACCB);
11953 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wacch", di_ftype_v4hi, ARM_BUILTIN_WACCH);
11954 def_mbuiltin (FL_IWMMXT, "__builtin_arm_waccw", di_ftype_v2si, ARM_BUILTIN_WACCW);
11956 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskb", int_ftype_v8qi, ARM_BUILTIN_TMOVMSKB);
11957 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskh", int_ftype_v4hi, ARM_BUILTIN_TMOVMSKH);
11958 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskw", int_ftype_v2si, ARM_BUILTIN_TMOVMSKW);
11960 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackhss", v8qi_ftype_v4hi_v4hi, ARM_BUILTIN_WPACKHSS);
11961 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackhus", v8qi_ftype_v4hi_v4hi, ARM_BUILTIN_WPACKHUS);
11962 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackwus", v4hi_ftype_v2si_v2si, ARM_BUILTIN_WPACKWUS);
11963 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackwss", v4hi_ftype_v2si_v2si, ARM_BUILTIN_WPACKWSS);
11964 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackdus", v2si_ftype_di_di, ARM_BUILTIN_WPACKDUS);
11965 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackdss", v2si_ftype_di_di, ARM_BUILTIN_WPACKDSS);
11967 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehub", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKEHUB);
11968 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehuh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKEHUH);
11969 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehuw", di_ftype_v2si, ARM_BUILTIN_WUNPCKEHUW);
11970 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsb", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKEHSB);
11971 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKEHSH);
11972 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsw", di_ftype_v2si, ARM_BUILTIN_WUNPCKEHSW);
11973 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelub", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKELUB);
11974 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckeluh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKELUH);
11975 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckeluw", di_ftype_v2si, ARM_BUILTIN_WUNPCKELUW);
11976 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsb", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKELSB);
11977 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKELSH);
11978 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsw", di_ftype_v2si, ARM_BUILTIN_WUNPCKELSW);
11980 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacs", di_ftype_di_v4hi_v4hi, ARM_BUILTIN_WMACS);
11981 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacsz", di_ftype_v4hi_v4hi, ARM_BUILTIN_WMACSZ);
11982 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacu", di_ftype_di_v4hi_v4hi, ARM_BUILTIN_WMACU);
11983 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacuz", di_ftype_v4hi_v4hi, ARM_BUILTIN_WMACUZ);
11985 def_mbuiltin (FL_IWMMXT, "__builtin_arm_walign", v8qi_ftype_v8qi_v8qi_int, ARM_BUILTIN_WALIGN);
11986 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmia", di_ftype_di_int_int, ARM_BUILTIN_TMIA);
11987 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiaph", di_ftype_di_int_int, ARM_BUILTIN_TMIAPH);
11988 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiabb", di_ftype_di_int_int, ARM_BUILTIN_TMIABB);
11989 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiabt", di_ftype_di_int_int, ARM_BUILTIN_TMIABT);
11990 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiatb", di_ftype_di_int_int, ARM_BUILTIN_TMIATB);
11991 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiatt", di_ftype_di_int_int, ARM_BUILTIN_TMIATT);
11994 static void
11995 arm_init_builtins (void)
11997 if (TARGET_REALLY_IWMMXT)
11998 arm_init_iwmmxt_builtins ();
12001 /* Errors in the source file can cause expand_expr to return const0_rtx
12002 where we expect a vector. To avoid crashing, use one of the vector
12003 clear instructions. */
12005 static rtx
12006 safe_vector_operand (rtx x, enum machine_mode mode)
12008 if (x != const0_rtx)
12009 return x;
12010 x = gen_reg_rtx (mode);
12012 emit_insn (gen_iwmmxt_clrdi (mode == DImode ? x
12013 : gen_rtx_SUBREG (DImode, x, 0)));
12014 return x;
12017 /* Subroutine of arm_expand_builtin to take care of binop insns. */
12019 static rtx
12020 arm_expand_binop_builtin (enum insn_code icode,
12021 tree arglist, rtx target)
12023 rtx pat;
12024 tree arg0 = TREE_VALUE (arglist);
12025 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12026 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12027 rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12028 enum machine_mode tmode = insn_data[icode].operand[0].mode;
12029 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
12030 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
12032 if (VECTOR_MODE_P (mode0))
12033 op0 = safe_vector_operand (op0, mode0);
12034 if (VECTOR_MODE_P (mode1))
12035 op1 = safe_vector_operand (op1, mode1);
12037 if (! target
12038 || GET_MODE (target) != tmode
12039 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12040 target = gen_reg_rtx (tmode);
12042 /* In case the insn wants input operands in modes different from
12043 the result, abort. */
12044 if (GET_MODE (op0) != mode0 || GET_MODE (op1) != mode1)
12045 abort ();
12047 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12048 op0 = copy_to_mode_reg (mode0, op0);
12049 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12050 op1 = copy_to_mode_reg (mode1, op1);
12052 pat = GEN_FCN (icode) (target, op0, op1);
12053 if (! pat)
12054 return 0;
12055 emit_insn (pat);
12056 return target;
12059 /* Subroutine of arm_expand_builtin to take care of unop insns. */
12061 static rtx
12062 arm_expand_unop_builtin (enum insn_code icode,
12063 tree arglist, rtx target, int do_load)
12065 rtx pat;
12066 tree arg0 = TREE_VALUE (arglist);
12067 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12068 enum machine_mode tmode = insn_data[icode].operand[0].mode;
12069 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
12071 if (! target
12072 || GET_MODE (target) != tmode
12073 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12074 target = gen_reg_rtx (tmode);
12075 if (do_load)
12076 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
12077 else
12079 if (VECTOR_MODE_P (mode0))
12080 op0 = safe_vector_operand (op0, mode0);
12082 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12083 op0 = copy_to_mode_reg (mode0, op0);
12086 pat = GEN_FCN (icode) (target, op0);
12087 if (! pat)
12088 return 0;
12089 emit_insn (pat);
12090 return target;
12093 /* Expand an expression EXP that calls a built-in function,
12094 with result going to TARGET if that's convenient
12095 (and in mode MODE if that's convenient).
12096 SUBTARGET may be used as the target for computing one of EXP's operands.
12097 IGNORE is nonzero if the value is to be ignored. */
12099 static rtx
12100 arm_expand_builtin (tree exp,
12101 rtx target,
12102 rtx subtarget ATTRIBUTE_UNUSED,
12103 enum machine_mode mode ATTRIBUTE_UNUSED,
12104 int ignore ATTRIBUTE_UNUSED)
12106 const struct builtin_description * d;
12107 enum insn_code icode;
12108 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
12109 tree arglist = TREE_OPERAND (exp, 1);
12110 tree arg0;
12111 tree arg1;
12112 tree arg2;
12113 rtx op0;
12114 rtx op1;
12115 rtx op2;
12116 rtx pat;
12117 int fcode = DECL_FUNCTION_CODE (fndecl);
12118 size_t i;
12119 enum machine_mode tmode;
12120 enum machine_mode mode0;
12121 enum machine_mode mode1;
12122 enum machine_mode mode2;
12124 switch (fcode)
12126 case ARM_BUILTIN_TEXTRMSB:
12127 case ARM_BUILTIN_TEXTRMUB:
12128 case ARM_BUILTIN_TEXTRMSH:
12129 case ARM_BUILTIN_TEXTRMUH:
12130 case ARM_BUILTIN_TEXTRMSW:
12131 case ARM_BUILTIN_TEXTRMUW:
12132 icode = (fcode == ARM_BUILTIN_TEXTRMSB ? CODE_FOR_iwmmxt_textrmsb
12133 : fcode == ARM_BUILTIN_TEXTRMUB ? CODE_FOR_iwmmxt_textrmub
12134 : fcode == ARM_BUILTIN_TEXTRMSH ? CODE_FOR_iwmmxt_textrmsh
12135 : fcode == ARM_BUILTIN_TEXTRMUH ? CODE_FOR_iwmmxt_textrmuh
12136 : CODE_FOR_iwmmxt_textrmw);
12138 arg0 = TREE_VALUE (arglist);
12139 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12140 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12141 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12142 tmode = insn_data[icode].operand[0].mode;
12143 mode0 = insn_data[icode].operand[1].mode;
12144 mode1 = insn_data[icode].operand[2].mode;
12146 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12147 op0 = copy_to_mode_reg (mode0, op0);
12148 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12150 /* @@@ better error message */
12151 error ("selector must be an immediate");
12152 return gen_reg_rtx (tmode);
12154 if (target == 0
12155 || GET_MODE (target) != tmode
12156 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12157 target = gen_reg_rtx (tmode);
12158 pat = GEN_FCN (icode) (target, op0, op1);
12159 if (! pat)
12160 return 0;
12161 emit_insn (pat);
12162 return target;
12164 case ARM_BUILTIN_TINSRB:
12165 case ARM_BUILTIN_TINSRH:
12166 case ARM_BUILTIN_TINSRW:
12167 icode = (fcode == ARM_BUILTIN_TINSRB ? CODE_FOR_iwmmxt_tinsrb
12168 : fcode == ARM_BUILTIN_TINSRH ? CODE_FOR_iwmmxt_tinsrh
12169 : CODE_FOR_iwmmxt_tinsrw);
12170 arg0 = TREE_VALUE (arglist);
12171 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12172 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
12173 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12174 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12175 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
12176 tmode = insn_data[icode].operand[0].mode;
12177 mode0 = insn_data[icode].operand[1].mode;
12178 mode1 = insn_data[icode].operand[2].mode;
12179 mode2 = insn_data[icode].operand[3].mode;
12181 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12182 op0 = copy_to_mode_reg (mode0, op0);
12183 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12184 op1 = copy_to_mode_reg (mode1, op1);
12185 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
12187 /* @@@ better error message */
12188 error ("selector must be an immediate");
12189 return const0_rtx;
12191 if (target == 0
12192 || GET_MODE (target) != tmode
12193 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12194 target = gen_reg_rtx (tmode);
12195 pat = GEN_FCN (icode) (target, op0, op1, op2);
12196 if (! pat)
12197 return 0;
12198 emit_insn (pat);
12199 return target;
12201 case ARM_BUILTIN_SETWCX:
12202 arg0 = TREE_VALUE (arglist);
12203 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12204 op0 = force_reg (SImode, expand_expr (arg0, NULL_RTX, VOIDmode, 0));
12205 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12206 emit_insn (gen_iwmmxt_tmcr (op1, op0));
12207 return 0;
12209 case ARM_BUILTIN_GETWCX:
12210 arg0 = TREE_VALUE (arglist);
12211 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12212 target = gen_reg_rtx (SImode);
12213 emit_insn (gen_iwmmxt_tmrc (target, op0));
12214 return target;
12216 case ARM_BUILTIN_WSHUFH:
12217 icode = CODE_FOR_iwmmxt_wshufh;
12218 arg0 = TREE_VALUE (arglist);
12219 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12220 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12221 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12222 tmode = insn_data[icode].operand[0].mode;
12223 mode1 = insn_data[icode].operand[1].mode;
12224 mode2 = insn_data[icode].operand[2].mode;
12226 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
12227 op0 = copy_to_mode_reg (mode1, op0);
12228 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
12230 /* @@@ better error message */
12231 error ("mask must be an immediate");
12232 return const0_rtx;
12234 if (target == 0
12235 || GET_MODE (target) != tmode
12236 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12237 target = gen_reg_rtx (tmode);
12238 pat = GEN_FCN (icode) (target, op0, op1);
12239 if (! pat)
12240 return 0;
12241 emit_insn (pat);
12242 return target;
12244 case ARM_BUILTIN_WSADB:
12245 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadb, arglist, target);
12246 case ARM_BUILTIN_WSADH:
12247 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadh, arglist, target);
12248 case ARM_BUILTIN_WSADBZ:
12249 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadbz, arglist, target);
12250 case ARM_BUILTIN_WSADHZ:
12251 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadhz, arglist, target);
12253 /* Several three-argument builtins. */
12254 case ARM_BUILTIN_WMACS:
12255 case ARM_BUILTIN_WMACU:
12256 case ARM_BUILTIN_WALIGN:
12257 case ARM_BUILTIN_TMIA:
12258 case ARM_BUILTIN_TMIAPH:
12259 case ARM_BUILTIN_TMIATT:
12260 case ARM_BUILTIN_TMIATB:
12261 case ARM_BUILTIN_TMIABT:
12262 case ARM_BUILTIN_TMIABB:
12263 icode = (fcode == ARM_BUILTIN_WMACS ? CODE_FOR_iwmmxt_wmacs
12264 : fcode == ARM_BUILTIN_WMACU ? CODE_FOR_iwmmxt_wmacu
12265 : fcode == ARM_BUILTIN_TMIA ? CODE_FOR_iwmmxt_tmia
12266 : fcode == ARM_BUILTIN_TMIAPH ? CODE_FOR_iwmmxt_tmiaph
12267 : fcode == ARM_BUILTIN_TMIABB ? CODE_FOR_iwmmxt_tmiabb
12268 : fcode == ARM_BUILTIN_TMIABT ? CODE_FOR_iwmmxt_tmiabt
12269 : fcode == ARM_BUILTIN_TMIATB ? CODE_FOR_iwmmxt_tmiatb
12270 : fcode == ARM_BUILTIN_TMIATT ? CODE_FOR_iwmmxt_tmiatt
12271 : CODE_FOR_iwmmxt_walign);
12272 arg0 = TREE_VALUE (arglist);
12273 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12274 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
12275 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12276 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12277 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
12278 tmode = insn_data[icode].operand[0].mode;
12279 mode0 = insn_data[icode].operand[1].mode;
12280 mode1 = insn_data[icode].operand[2].mode;
12281 mode2 = insn_data[icode].operand[3].mode;
12283 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12284 op0 = copy_to_mode_reg (mode0, op0);
12285 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12286 op1 = copy_to_mode_reg (mode1, op1);
12287 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
12288 op2 = copy_to_mode_reg (mode2, op2);
12289 if (target == 0
12290 || GET_MODE (target) != tmode
12291 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12292 target = gen_reg_rtx (tmode);
12293 pat = GEN_FCN (icode) (target, op0, op1, op2);
12294 if (! pat)
12295 return 0;
12296 emit_insn (pat);
12297 return target;
12299 case ARM_BUILTIN_WZERO:
12300 target = gen_reg_rtx (DImode);
12301 emit_insn (gen_iwmmxt_clrdi (target));
12302 return target;
12304 default:
12305 break;
12308 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
12309 if (d->code == (const enum arm_builtins) fcode)
12310 return arm_expand_binop_builtin (d->icode, arglist, target);
12312 for (i = 0, d = bdesc_1arg; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
12313 if (d->code == (const enum arm_builtins) fcode)
12314 return arm_expand_unop_builtin (d->icode, arglist, target, 0);
12316 /* @@@ Should really do something sensible here. */
12317 return NULL_RTX;
12320 /* Recursively search through all of the blocks in a function
12321 checking to see if any of the variables created in that
12322 function match the RTX called 'orig'. If they do then
12323 replace them with the RTX called 'new'. */
12324 static void
12325 replace_symbols_in_block (tree block, rtx orig, rtx new)
12327 for (; block; block = BLOCK_CHAIN (block))
12329 tree sym;
12331 if (!TREE_USED (block))
12332 continue;
12334 for (sym = BLOCK_VARS (block); sym; sym = TREE_CHAIN (sym))
12336 if ( (DECL_NAME (sym) == 0 && TREE_CODE (sym) != TYPE_DECL)
12337 || DECL_IGNORED_P (sym)
12338 || TREE_CODE (sym) != VAR_DECL
12339 || DECL_EXTERNAL (sym)
12340 || !rtx_equal_p (DECL_RTL (sym), orig)
12342 continue;
12344 SET_DECL_RTL (sym, new);
12347 replace_symbols_in_block (BLOCK_SUBBLOCKS (block), orig, new);
12351 /* Return the number (counting from 0) of
12352 the least significant set bit in MASK. */
12354 inline static int
12355 number_of_first_bit_set (int mask)
12357 int bit;
12359 for (bit = 0;
12360 (mask & (1 << bit)) == 0;
12361 ++bit)
12362 continue;
12364 return bit;
12367 /* Generate code to return from a thumb function.
12368 If 'reg_containing_return_addr' is -1, then the return address is
12369 actually on the stack, at the stack pointer. */
12370 static void
12371 thumb_exit (FILE *f, int reg_containing_return_addr)
12373 unsigned regs_available_for_popping;
12374 unsigned regs_to_pop;
12375 int pops_needed;
12376 unsigned available;
12377 unsigned required;
12378 int mode;
12379 int size;
12380 int restore_a4 = FALSE;
12382 /* Compute the registers we need to pop. */
12383 regs_to_pop = 0;
12384 pops_needed = 0;
12386 if (reg_containing_return_addr == -1)
12388 regs_to_pop |= 1 << LR_REGNUM;
12389 ++pops_needed;
12392 if (TARGET_BACKTRACE)
12394 /* Restore the (ARM) frame pointer and stack pointer. */
12395 regs_to_pop |= (1 << ARM_HARD_FRAME_POINTER_REGNUM) | (1 << SP_REGNUM);
12396 pops_needed += 2;
12399 /* If there is nothing to pop then just emit the BX instruction and
12400 return. */
12401 if (pops_needed == 0)
12403 if (current_function_calls_eh_return)
12404 asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, ARM_EH_STACKADJ_REGNUM);
12406 asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
12407 return;
12409 /* Otherwise if we are not supporting interworking and we have not created
12410 a backtrace structure and the function was not entered in ARM mode then
12411 just pop the return address straight into the PC. */
12412 else if (!TARGET_INTERWORK
12413 && !TARGET_BACKTRACE
12414 && !is_called_in_ARM_mode (current_function_decl)
12415 && !current_function_calls_eh_return)
12417 asm_fprintf (f, "\tpop\t{%r}\n", PC_REGNUM);
12418 return;
12421 /* Find out how many of the (return) argument registers we can corrupt. */
12422 regs_available_for_popping = 0;
12424 /* If returning via __builtin_eh_return, the bottom three registers
12425 all contain information needed for the return. */
12426 if (current_function_calls_eh_return)
12427 size = 12;
12428 else
12430 /* If we can deduce the registers used from the function's
12431 return value. This is more reliable that examining
12432 regs_ever_live[] because that will be set if the register is
12433 ever used in the function, not just if the register is used
12434 to hold a return value. */
12436 if (current_function_return_rtx != 0)
12437 mode = GET_MODE (current_function_return_rtx);
12438 else
12439 mode = DECL_MODE (DECL_RESULT (current_function_decl));
12441 size = GET_MODE_SIZE (mode);
12443 if (size == 0)
12445 /* In a void function we can use any argument register.
12446 In a function that returns a structure on the stack
12447 we can use the second and third argument registers. */
12448 if (mode == VOIDmode)
12449 regs_available_for_popping =
12450 (1 << ARG_REGISTER (1))
12451 | (1 << ARG_REGISTER (2))
12452 | (1 << ARG_REGISTER (3));
12453 else
12454 regs_available_for_popping =
12455 (1 << ARG_REGISTER (2))
12456 | (1 << ARG_REGISTER (3));
12458 else if (size <= 4)
12459 regs_available_for_popping =
12460 (1 << ARG_REGISTER (2))
12461 | (1 << ARG_REGISTER (3));
12462 else if (size <= 8)
12463 regs_available_for_popping =
12464 (1 << ARG_REGISTER (3));
12467 /* Match registers to be popped with registers into which we pop them. */
12468 for (available = regs_available_for_popping,
12469 required = regs_to_pop;
12470 required != 0 && available != 0;
12471 available &= ~(available & - available),
12472 required &= ~(required & - required))
12473 -- pops_needed;
12475 /* If we have any popping registers left over, remove them. */
12476 if (available > 0)
12477 regs_available_for_popping &= ~available;
12479 /* Otherwise if we need another popping register we can use
12480 the fourth argument register. */
12481 else if (pops_needed)
12483 /* If we have not found any free argument registers and
12484 reg a4 contains the return address, we must move it. */
12485 if (regs_available_for_popping == 0
12486 && reg_containing_return_addr == LAST_ARG_REGNUM)
12488 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM, LAST_ARG_REGNUM);
12489 reg_containing_return_addr = LR_REGNUM;
12491 else if (size > 12)
12493 /* Register a4 is being used to hold part of the return value,
12494 but we have dire need of a free, low register. */
12495 restore_a4 = TRUE;
12497 asm_fprintf (f, "\tmov\t%r, %r\n",IP_REGNUM, LAST_ARG_REGNUM);
12500 if (reg_containing_return_addr != LAST_ARG_REGNUM)
12502 /* The fourth argument register is available. */
12503 regs_available_for_popping |= 1 << LAST_ARG_REGNUM;
12505 --pops_needed;
12509 /* Pop as many registers as we can. */
12510 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
12511 regs_available_for_popping);
12513 /* Process the registers we popped. */
12514 if (reg_containing_return_addr == -1)
12516 /* The return address was popped into the lowest numbered register. */
12517 regs_to_pop &= ~(1 << LR_REGNUM);
12519 reg_containing_return_addr =
12520 number_of_first_bit_set (regs_available_for_popping);
12522 /* Remove this register for the mask of available registers, so that
12523 the return address will not be corrupted by further pops. */
12524 regs_available_for_popping &= ~(1 << reg_containing_return_addr);
12527 /* If we popped other registers then handle them here. */
12528 if (regs_available_for_popping)
12530 int frame_pointer;
12532 /* Work out which register currently contains the frame pointer. */
12533 frame_pointer = number_of_first_bit_set (regs_available_for_popping);
12535 /* Move it into the correct place. */
12536 asm_fprintf (f, "\tmov\t%r, %r\n",
12537 ARM_HARD_FRAME_POINTER_REGNUM, frame_pointer);
12539 /* (Temporarily) remove it from the mask of popped registers. */
12540 regs_available_for_popping &= ~(1 << frame_pointer);
12541 regs_to_pop &= ~(1 << ARM_HARD_FRAME_POINTER_REGNUM);
12543 if (regs_available_for_popping)
12545 int stack_pointer;
12547 /* We popped the stack pointer as well,
12548 find the register that contains it. */
12549 stack_pointer = number_of_first_bit_set (regs_available_for_popping);
12551 /* Move it into the stack register. */
12552 asm_fprintf (f, "\tmov\t%r, %r\n", SP_REGNUM, stack_pointer);
12554 /* At this point we have popped all necessary registers, so
12555 do not worry about restoring regs_available_for_popping
12556 to its correct value:
12558 assert (pops_needed == 0)
12559 assert (regs_available_for_popping == (1 << frame_pointer))
12560 assert (regs_to_pop == (1 << STACK_POINTER)) */
12562 else
12564 /* Since we have just move the popped value into the frame
12565 pointer, the popping register is available for reuse, and
12566 we know that we still have the stack pointer left to pop. */
12567 regs_available_for_popping |= (1 << frame_pointer);
12571 /* If we still have registers left on the stack, but we no longer have
12572 any registers into which we can pop them, then we must move the return
12573 address into the link register and make available the register that
12574 contained it. */
12575 if (regs_available_for_popping == 0 && pops_needed > 0)
12577 regs_available_for_popping |= 1 << reg_containing_return_addr;
12579 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM,
12580 reg_containing_return_addr);
12582 reg_containing_return_addr = LR_REGNUM;
12585 /* If we have registers left on the stack then pop some more.
12586 We know that at most we will want to pop FP and SP. */
12587 if (pops_needed > 0)
12589 int popped_into;
12590 int move_to;
12592 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
12593 regs_available_for_popping);
12595 /* We have popped either FP or SP.
12596 Move whichever one it is into the correct register. */
12597 popped_into = number_of_first_bit_set (regs_available_for_popping);
12598 move_to = number_of_first_bit_set (regs_to_pop);
12600 asm_fprintf (f, "\tmov\t%r, %r\n", move_to, popped_into);
12602 regs_to_pop &= ~(1 << move_to);
12604 --pops_needed;
12607 /* If we still have not popped everything then we must have only
12608 had one register available to us and we are now popping the SP. */
12609 if (pops_needed > 0)
12611 int popped_into;
12613 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
12614 regs_available_for_popping);
12616 popped_into = number_of_first_bit_set (regs_available_for_popping);
12618 asm_fprintf (f, "\tmov\t%r, %r\n", SP_REGNUM, popped_into);
12620 assert (regs_to_pop == (1 << STACK_POINTER))
12621 assert (pops_needed == 1)
12625 /* If necessary restore the a4 register. */
12626 if (restore_a4)
12628 if (reg_containing_return_addr != LR_REGNUM)
12630 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM, LAST_ARG_REGNUM);
12631 reg_containing_return_addr = LR_REGNUM;
12634 asm_fprintf (f, "\tmov\t%r, %r\n", LAST_ARG_REGNUM, IP_REGNUM);
12637 if (current_function_calls_eh_return)
12638 asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, ARM_EH_STACKADJ_REGNUM);
12640 /* Return to caller. */
12641 asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
12644 /* Emit code to push or pop registers to or from the stack. F is the
12645 assembly file. MASK is the registers to push or pop. PUSH is
12646 nonzero if we should push, and zero if we should pop. For debugging
12647 output, if pushing, adjust CFA_OFFSET by the amount of space added
12648 to the stack. REAL_REGS should have the same number of bits set as
12649 MASK, and will be used instead (in the same order) to describe which
12650 registers were saved - this is used to mark the save slots when we
12651 push high registers after moving them to low registers. */
12652 static void
12653 thumb_pushpop (FILE *f, int mask, int push, int *cfa_offset, int real_regs)
12655 int regno;
12656 int lo_mask = mask & 0xFF;
12657 int pushed_words = 0;
12659 if (lo_mask == 0 && !push && (mask & (1 << PC_REGNUM)))
12661 /* Special case. Do not generate a POP PC statement here, do it in
12662 thumb_exit() */
12663 thumb_exit (f, -1);
12664 return;
12667 fprintf (f, "\t%s\t{", push ? "push" : "pop");
12669 /* Look at the low registers first. */
12670 for (regno = 0; regno <= LAST_LO_REGNUM; regno++, lo_mask >>= 1)
12672 if (lo_mask & 1)
12674 asm_fprintf (f, "%r", regno);
12676 if ((lo_mask & ~1) != 0)
12677 fprintf (f, ", ");
12679 pushed_words++;
12683 if (push && (mask & (1 << LR_REGNUM)))
12685 /* Catch pushing the LR. */
12686 if (mask & 0xFF)
12687 fprintf (f, ", ");
12689 asm_fprintf (f, "%r", LR_REGNUM);
12691 pushed_words++;
12693 else if (!push && (mask & (1 << PC_REGNUM)))
12695 /* Catch popping the PC. */
12696 if (TARGET_INTERWORK || TARGET_BACKTRACE
12697 || current_function_calls_eh_return)
12699 /* The PC is never poped directly, instead
12700 it is popped into r3 and then BX is used. */
12701 fprintf (f, "}\n");
12703 thumb_exit (f, -1);
12705 return;
12707 else
12709 if (mask & 0xFF)
12710 fprintf (f, ", ");
12712 asm_fprintf (f, "%r", PC_REGNUM);
12716 fprintf (f, "}\n");
12718 if (push && pushed_words && dwarf2out_do_frame ())
12720 char *l = dwarf2out_cfi_label ();
12721 int pushed_mask = real_regs;
12723 *cfa_offset += pushed_words * 4;
12724 dwarf2out_def_cfa (l, SP_REGNUM, *cfa_offset);
12726 pushed_words = 0;
12727 pushed_mask = real_regs;
12728 for (regno = 0; regno <= 14; regno++, pushed_mask >>= 1)
12730 if (pushed_mask & 1)
12731 dwarf2out_reg_save (l, regno, 4 * pushed_words++ - *cfa_offset);
12736 void
12737 thumb_final_prescan_insn (rtx insn)
12739 if (flag_print_asm_name)
12740 asm_fprintf (asm_out_file, "%@ 0x%04x\n",
12741 INSN_ADDRESSES (INSN_UID (insn)));
12745 thumb_shiftable_const (unsigned HOST_WIDE_INT val)
12747 unsigned HOST_WIDE_INT mask = 0xff;
12748 int i;
12750 if (val == 0) /* XXX */
12751 return 0;
12753 for (i = 0; i < 25; i++)
12754 if ((val & (mask << i)) == val)
12755 return 1;
12757 return 0;
12760 /* Returns nonzero if the current function contains,
12761 or might contain a far jump. */
12762 static int
12763 thumb_far_jump_used_p (void)
12765 rtx insn;
12767 /* This test is only important for leaf functions. */
12768 /* assert (!leaf_function_p ()); */
12770 /* If we have already decided that far jumps may be used,
12771 do not bother checking again, and always return true even if
12772 it turns out that they are not being used. Once we have made
12773 the decision that far jumps are present (and that hence the link
12774 register will be pushed onto the stack) we cannot go back on it. */
12775 if (cfun->machine->far_jump_used)
12776 return 1;
12778 /* If this function is not being called from the prologue/epilogue
12779 generation code then it must be being called from the
12780 INITIAL_ELIMINATION_OFFSET macro. */
12781 if (!(ARM_DOUBLEWORD_ALIGN || reload_completed))
12783 /* In this case we know that we are being asked about the elimination
12784 of the arg pointer register. If that register is not being used,
12785 then there are no arguments on the stack, and we do not have to
12786 worry that a far jump might force the prologue to push the link
12787 register, changing the stack offsets. In this case we can just
12788 return false, since the presence of far jumps in the function will
12789 not affect stack offsets.
12791 If the arg pointer is live (or if it was live, but has now been
12792 eliminated and so set to dead) then we do have to test to see if
12793 the function might contain a far jump. This test can lead to some
12794 false negatives, since before reload is completed, then length of
12795 branch instructions is not known, so gcc defaults to returning their
12796 longest length, which in turn sets the far jump attribute to true.
12798 A false negative will not result in bad code being generated, but it
12799 will result in a needless push and pop of the link register. We
12800 hope that this does not occur too often.
12802 If we need doubleword stack alignment this could affect the other
12803 elimination offsets so we can't risk getting it wrong. */
12804 if (regs_ever_live [ARG_POINTER_REGNUM])
12805 cfun->machine->arg_pointer_live = 1;
12806 else if (!cfun->machine->arg_pointer_live)
12807 return 0;
12810 /* Check to see if the function contains a branch
12811 insn with the far jump attribute set. */
12812 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
12814 if (GET_CODE (insn) == JUMP_INSN
12815 /* Ignore tablejump patterns. */
12816 && GET_CODE (PATTERN (insn)) != ADDR_VEC
12817 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC
12818 && get_attr_far_jump (insn) == FAR_JUMP_YES
12821 /* Record the fact that we have decided that
12822 the function does use far jumps. */
12823 cfun->machine->far_jump_used = 1;
12824 return 1;
12828 return 0;
12831 /* Return nonzero if FUNC must be entered in ARM mode. */
12833 is_called_in_ARM_mode (tree func)
12835 if (TREE_CODE (func) != FUNCTION_DECL)
12836 abort ();
12838 /* Ignore the problem about functions whoes address is taken. */
12839 if (TARGET_CALLEE_INTERWORKING && TREE_PUBLIC (func))
12840 return TRUE;
12842 #ifdef ARM_PE
12843 return lookup_attribute ("interfacearm", DECL_ATTRIBUTES (func)) != NULL_TREE;
12844 #else
12845 return FALSE;
12846 #endif
12849 /* The bits which aren't usefully expanded as rtl. */
12850 const char *
12851 thumb_unexpanded_epilogue (void)
12853 int regno;
12854 int live_regs_mask = 0;
12855 int high_regs_pushed = 0;
12856 int had_to_push_lr;
12857 int size;
12858 int mode;
12860 if (return_used_this_function)
12861 return "";
12863 if (IS_NAKED (arm_current_func_type ()))
12864 return "";
12866 live_regs_mask = thumb_compute_save_reg_mask ();
12867 high_regs_pushed = bit_count (live_regs_mask & 0x0f00);
12869 /* If we can deduce the registers used from the function's return value.
12870 This is more reliable that examining regs_ever_live[] because that
12871 will be set if the register is ever used in the function, not just if
12872 the register is used to hold a return value. */
12874 if (current_function_return_rtx != 0)
12875 mode = GET_MODE (current_function_return_rtx);
12876 else
12877 mode = DECL_MODE (DECL_RESULT (current_function_decl));
12879 size = GET_MODE_SIZE (mode);
12881 /* The prolog may have pushed some high registers to use as
12882 work registers. e.g. the testsuite file:
12883 gcc/testsuite/gcc/gcc.c-torture/execute/complex-2.c
12884 compiles to produce:
12885 push {r4, r5, r6, r7, lr}
12886 mov r7, r9
12887 mov r6, r8
12888 push {r6, r7}
12889 as part of the prolog. We have to undo that pushing here. */
12891 if (high_regs_pushed)
12893 int mask = live_regs_mask & 0xff;
12894 int next_hi_reg;
12896 /* The available low registers depend on the size of the value we are
12897 returning. */
12898 if (size <= 12)
12899 mask |= 1 << 3;
12900 if (size <= 8)
12901 mask |= 1 << 2;
12903 if (mask == 0)
12904 /* Oh dear! We have no low registers into which we can pop
12905 high registers! */
12906 internal_error
12907 ("no low registers available for popping high registers");
12909 for (next_hi_reg = 8; next_hi_reg < 13; next_hi_reg++)
12910 if (live_regs_mask & (1 << next_hi_reg))
12911 break;
12913 while (high_regs_pushed)
12915 /* Find lo register(s) into which the high register(s) can
12916 be popped. */
12917 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
12919 if (mask & (1 << regno))
12920 high_regs_pushed--;
12921 if (high_regs_pushed == 0)
12922 break;
12925 mask &= (2 << regno) - 1; /* A noop if regno == 8 */
12927 /* Pop the values into the low register(s). */
12928 thumb_pushpop (asm_out_file, mask, 0, NULL, mask);
12930 /* Move the value(s) into the high registers. */
12931 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
12933 if (mask & (1 << regno))
12935 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", next_hi_reg,
12936 regno);
12938 for (next_hi_reg++; next_hi_reg < 13; next_hi_reg++)
12939 if (live_regs_mask & (1 << next_hi_reg))
12940 break;
12944 live_regs_mask &= ~0x0f00;
12947 had_to_push_lr = (live_regs_mask & (1 << LR_REGNUM)) != 0;
12948 live_regs_mask &= 0xff;
12950 if (current_function_pretend_args_size == 0 || TARGET_BACKTRACE)
12952 /* Pop the return address into the PC. */
12953 if (had_to_push_lr)
12954 live_regs_mask |= 1 << PC_REGNUM;
12956 /* Either no argument registers were pushed or a backtrace
12957 structure was created which includes an adjusted stack
12958 pointer, so just pop everything. */
12959 if (live_regs_mask)
12960 thumb_pushpop (asm_out_file, live_regs_mask, FALSE, NULL,
12961 live_regs_mask);
12963 /* We have either just popped the return address into the
12964 PC or it is was kept in LR for the entire function. */
12965 if (!had_to_push_lr)
12966 thumb_exit (asm_out_file, LR_REGNUM);
12968 else
12970 /* Pop everything but the return address. */
12971 if (live_regs_mask)
12972 thumb_pushpop (asm_out_file, live_regs_mask, FALSE, NULL,
12973 live_regs_mask);
12975 if (had_to_push_lr)
12977 if (size > 12)
12979 /* We have no free low regs, so save one. */
12980 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", IP_REGNUM,
12981 LAST_ARG_REGNUM);
12984 /* Get the return address into a temporary register. */
12985 thumb_pushpop (asm_out_file, 1 << LAST_ARG_REGNUM, 0, NULL,
12986 1 << LAST_ARG_REGNUM);
12988 if (size > 12)
12990 /* Move the return address to lr. */
12991 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", LR_REGNUM,
12992 LAST_ARG_REGNUM);
12993 /* Restore the low register. */
12994 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", LAST_ARG_REGNUM,
12995 IP_REGNUM);
12996 regno = LR_REGNUM;
12998 else
12999 regno = LAST_ARG_REGNUM;
13001 else
13002 regno = LR_REGNUM;
13004 /* Remove the argument registers that were pushed onto the stack. */
13005 asm_fprintf (asm_out_file, "\tadd\t%r, %r, #%d\n",
13006 SP_REGNUM, SP_REGNUM,
13007 current_function_pretend_args_size);
13009 thumb_exit (asm_out_file, regno);
13012 return "";
13015 /* Functions to save and restore machine-specific function data. */
13016 static struct machine_function *
13017 arm_init_machine_status (void)
13019 struct machine_function *machine;
13020 machine = (machine_function *) ggc_alloc_cleared (sizeof (machine_function));
13022 #if ARM_FT_UNKNOWN != 0
13023 machine->func_type = ARM_FT_UNKNOWN;
13024 #endif
13025 return machine;
13028 /* Return an RTX indicating where the return address to the
13029 calling function can be found. */
13031 arm_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
13033 if (count != 0)
13034 return NULL_RTX;
13036 return get_hard_reg_initial_val (Pmode, LR_REGNUM);
13039 /* Do anything needed before RTL is emitted for each function. */
13040 void
13041 arm_init_expanders (void)
13043 /* Arrange to initialize and mark the machine per-function status. */
13044 init_machine_status = arm_init_machine_status;
13046 /* This is to stop the combine pass optimizing away the alignment
13047 adjustment of va_arg. */
13048 /* ??? It is claimed that this should not be necessary. */
13049 if (cfun)
13050 mark_reg_pointer (arg_pointer_rtx, PARM_BOUNDARY);
13054 /* Like arm_compute_initial_elimination offset. Simpler because
13055 THUMB_HARD_FRAME_POINTER isn't actually the ABI specified frame pointer. */
13057 HOST_WIDE_INT
13058 thumb_compute_initial_elimination_offset (unsigned int from, unsigned int to)
13060 arm_stack_offsets *offsets;
13062 offsets = arm_get_frame_offsets ();
13064 switch (from)
13066 case ARG_POINTER_REGNUM:
13067 switch (to)
13069 case STACK_POINTER_REGNUM:
13070 return offsets->outgoing_args - offsets->saved_args;
13072 case FRAME_POINTER_REGNUM:
13073 return offsets->soft_frame - offsets->saved_args;
13075 case THUMB_HARD_FRAME_POINTER_REGNUM:
13076 case ARM_HARD_FRAME_POINTER_REGNUM:
13077 return offsets->saved_regs - offsets->saved_args;
13079 default:
13080 abort();
13082 break;
13084 case FRAME_POINTER_REGNUM:
13085 switch (to)
13087 case STACK_POINTER_REGNUM:
13088 return offsets->outgoing_args - offsets->soft_frame;
13090 case THUMB_HARD_FRAME_POINTER_REGNUM:
13091 case ARM_HARD_FRAME_POINTER_REGNUM:
13092 return offsets->saved_regs - offsets->soft_frame;
13094 default:
13095 abort();
13097 break;
13099 default:
13100 abort ();
13105 /* Generate the rest of a function's prologue. */
13106 void
13107 thumb_expand_prologue (void)
13109 rtx insn, dwarf;
13111 HOST_WIDE_INT amount;
13112 arm_stack_offsets *offsets;
13113 unsigned long func_type;
13114 int regno;
13115 unsigned long live_regs_mask;
13117 func_type = arm_current_func_type ();
13119 /* Naked functions don't have prologues. */
13120 if (IS_NAKED (func_type))
13121 return;
13123 if (IS_INTERRUPT (func_type))
13125 error ("interrupt Service Routines cannot be coded in Thumb mode");
13126 return;
13129 live_regs_mask = thumb_compute_save_reg_mask ();
13130 /* Load the pic register before setting the frame pointer, so we can use r7
13131 as a temporary work register. */
13132 if (flag_pic)
13133 arm_load_pic_register (thumb_find_work_register (live_regs_mask));
13135 offsets = arm_get_frame_offsets ();
13137 if (frame_pointer_needed)
13139 insn = emit_insn (gen_movsi (hard_frame_pointer_rtx,
13140 stack_pointer_rtx));
13141 RTX_FRAME_RELATED_P (insn) = 1;
13143 else if (CALLER_INTERWORKING_SLOT_SIZE > 0)
13144 emit_move_insn (gen_rtx_REG (Pmode, ARM_HARD_FRAME_POINTER_REGNUM),
13145 stack_pointer_rtx);
13147 amount = offsets->outgoing_args - offsets->saved_regs;
13148 if (amount)
13150 if (amount < 512)
13152 insn = emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
13153 GEN_INT (- amount)));
13154 RTX_FRAME_RELATED_P (insn) = 1;
13156 else
13158 rtx reg;
13160 /* The stack decrement is too big for an immediate value in a single
13161 insn. In theory we could issue multiple subtracts, but after
13162 three of them it becomes more space efficient to place the full
13163 value in the constant pool and load into a register. (Also the
13164 ARM debugger really likes to see only one stack decrement per
13165 function). So instead we look for a scratch register into which
13166 we can load the decrement, and then we subtract this from the
13167 stack pointer. Unfortunately on the thumb the only available
13168 scratch registers are the argument registers, and we cannot use
13169 these as they may hold arguments to the function. Instead we
13170 attempt to locate a call preserved register which is used by this
13171 function. If we can find one, then we know that it will have
13172 been pushed at the start of the prologue and so we can corrupt
13173 it now. */
13174 for (regno = LAST_ARG_REGNUM + 1; regno <= LAST_LO_REGNUM; regno++)
13175 if (live_regs_mask & (1 << regno)
13176 && !(frame_pointer_needed
13177 && (regno == THUMB_HARD_FRAME_POINTER_REGNUM)))
13178 break;
13180 if (regno > LAST_LO_REGNUM) /* Very unlikely. */
13182 rtx spare = gen_rtx_REG (SImode, IP_REGNUM);
13184 /* Choose an arbitrary, non-argument low register. */
13185 reg = gen_rtx_REG (SImode, LAST_LO_REGNUM);
13187 /* Save it by copying it into a high, scratch register. */
13188 emit_insn (gen_movsi (spare, reg));
13189 /* Add a USE to stop propagate_one_insn() from barfing. */
13190 emit_insn (gen_prologue_use (spare));
13192 /* Decrement the stack. */
13193 emit_insn (gen_movsi (reg, GEN_INT (- amount)));
13194 insn = emit_insn (gen_addsi3 (stack_pointer_rtx,
13195 stack_pointer_rtx, reg));
13196 RTX_FRAME_RELATED_P (insn) = 1;
13197 dwarf = gen_rtx_SET (SImode, stack_pointer_rtx,
13198 plus_constant (stack_pointer_rtx,
13199 -amount));
13200 RTX_FRAME_RELATED_P (dwarf) = 1;
13201 REG_NOTES (insn)
13202 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
13203 REG_NOTES (insn));
13205 /* Restore the low register's original value. */
13206 emit_insn (gen_movsi (reg, spare));
13208 /* Emit a USE of the restored scratch register, so that flow
13209 analysis will not consider the restore redundant. The
13210 register won't be used again in this function and isn't
13211 restored by the epilogue. */
13212 emit_insn (gen_prologue_use (reg));
13214 else
13216 reg = gen_rtx_REG (SImode, regno);
13218 emit_insn (gen_movsi (reg, GEN_INT (- amount)));
13220 insn = emit_insn (gen_addsi3 (stack_pointer_rtx,
13221 stack_pointer_rtx, reg));
13222 RTX_FRAME_RELATED_P (insn) = 1;
13223 dwarf = gen_rtx_SET (SImode, stack_pointer_rtx,
13224 plus_constant (stack_pointer_rtx,
13225 -amount));
13226 RTX_FRAME_RELATED_P (dwarf) = 1;
13227 REG_NOTES (insn)
13228 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
13229 REG_NOTES (insn));
13232 /* If the frame pointer is needed, emit a special barrier that
13233 will prevent the scheduler from moving stores to the frame
13234 before the stack adjustment. */
13235 if (frame_pointer_needed)
13236 emit_insn (gen_stack_tie (stack_pointer_rtx,
13237 hard_frame_pointer_rtx));
13240 if (current_function_profile || TARGET_NO_SCHED_PRO)
13241 emit_insn (gen_blockage ());
13243 cfun->machine->lr_save_eliminated = !thumb_force_lr_save ();
13244 if (live_regs_mask & 0xff)
13245 cfun->machine->lr_save_eliminated = 0;
13247 /* If the link register is being kept alive, with the return address in it,
13248 then make sure that it does not get reused by the ce2 pass. */
13249 if (cfun->machine->lr_save_eliminated)
13250 emit_insn (gen_prologue_use (gen_rtx_REG (SImode, LR_REGNUM)));
13254 void
13255 thumb_expand_epilogue (void)
13257 HOST_WIDE_INT amount;
13258 arm_stack_offsets *offsets;
13259 int regno;
13261 /* Naked functions don't have prologues. */
13262 if (IS_NAKED (arm_current_func_type ()))
13263 return;
13265 offsets = arm_get_frame_offsets ();
13266 amount = offsets->outgoing_args - offsets->saved_regs;
13268 if (frame_pointer_needed)
13269 emit_insn (gen_movsi (stack_pointer_rtx, hard_frame_pointer_rtx));
13270 else if (amount)
13272 if (amount < 512)
13273 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
13274 GEN_INT (amount)));
13275 else
13277 /* r3 is always free in the epilogue. */
13278 rtx reg = gen_rtx_REG (SImode, LAST_ARG_REGNUM);
13280 emit_insn (gen_movsi (reg, GEN_INT (amount)));
13281 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, reg));
13285 /* Emit a USE (stack_pointer_rtx), so that
13286 the stack adjustment will not be deleted. */
13287 emit_insn (gen_prologue_use (stack_pointer_rtx));
13289 if (current_function_profile || TARGET_NO_SCHED_PRO)
13290 emit_insn (gen_blockage ());
13292 /* Emit a clobber for each insn that will be restored in the epilogue,
13293 so that flow2 will get register lifetimes correct. */
13294 for (regno = 0; regno < 13; regno++)
13295 if (regs_ever_live[regno] && !call_used_regs[regno])
13296 emit_insn (gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, regno)));
13298 if (! regs_ever_live[LR_REGNUM])
13299 emit_insn (gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, LR_REGNUM)));
13302 static void
13303 thumb_output_function_prologue (FILE *f, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
13305 int live_regs_mask = 0;
13306 int l_mask;
13307 int high_regs_pushed = 0;
13308 int cfa_offset = 0;
13309 int regno;
13311 if (IS_NAKED (arm_current_func_type ()))
13312 return;
13314 if (is_called_in_ARM_mode (current_function_decl))
13316 const char * name;
13318 if (GET_CODE (DECL_RTL (current_function_decl)) != MEM)
13319 abort ();
13320 if (GET_CODE (XEXP (DECL_RTL (current_function_decl), 0)) != SYMBOL_REF)
13321 abort ();
13322 name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
13324 /* Generate code sequence to switch us into Thumb mode. */
13325 /* The .code 32 directive has already been emitted by
13326 ASM_DECLARE_FUNCTION_NAME. */
13327 asm_fprintf (f, "\torr\t%r, %r, #1\n", IP_REGNUM, PC_REGNUM);
13328 asm_fprintf (f, "\tbx\t%r\n", IP_REGNUM);
13330 /* Generate a label, so that the debugger will notice the
13331 change in instruction sets. This label is also used by
13332 the assembler to bypass the ARM code when this function
13333 is called from a Thumb encoded function elsewhere in the
13334 same file. Hence the definition of STUB_NAME here must
13335 agree with the definition in gas/config/tc-arm.c. */
13337 #define STUB_NAME ".real_start_of"
13339 fprintf (f, "\t.code\t16\n");
13340 #ifdef ARM_PE
13341 if (arm_dllexport_name_p (name))
13342 name = arm_strip_name_encoding (name);
13343 #endif
13344 asm_fprintf (f, "\t.globl %s%U%s\n", STUB_NAME, name);
13345 fprintf (f, "\t.thumb_func\n");
13346 asm_fprintf (f, "%s%U%s:\n", STUB_NAME, name);
13349 if (current_function_pretend_args_size)
13351 if (cfun->machine->uses_anonymous_args)
13353 int num_pushes;
13355 fprintf (f, "\tpush\t{");
13357 num_pushes = ARM_NUM_INTS (current_function_pretend_args_size);
13359 for (regno = LAST_ARG_REGNUM + 1 - num_pushes;
13360 regno <= LAST_ARG_REGNUM;
13361 regno++)
13362 asm_fprintf (f, "%r%s", regno,
13363 regno == LAST_ARG_REGNUM ? "" : ", ");
13365 fprintf (f, "}\n");
13367 else
13368 asm_fprintf (f, "\tsub\t%r, %r, #%d\n",
13369 SP_REGNUM, SP_REGNUM,
13370 current_function_pretend_args_size);
13372 /* We don't need to record the stores for unwinding (would it
13373 help the debugger any if we did?), but record the change in
13374 the stack pointer. */
13375 if (dwarf2out_do_frame ())
13377 char *l = dwarf2out_cfi_label ();
13378 cfa_offset = cfa_offset + current_function_pretend_args_size;
13379 dwarf2out_def_cfa (l, SP_REGNUM, cfa_offset);
13383 live_regs_mask = thumb_compute_save_reg_mask ();
13384 /* Just low regs and lr. */
13385 l_mask = live_regs_mask & 0x40ff;
13387 if (TARGET_BACKTRACE)
13389 int offset;
13390 int work_register;
13392 /* We have been asked to create a stack backtrace structure.
13393 The code looks like this:
13395 0 .align 2
13396 0 func:
13397 0 sub SP, #16 Reserve space for 4 registers.
13398 2 push {R7} Push low registers.
13399 4 add R7, SP, #20 Get the stack pointer before the push.
13400 6 str R7, [SP, #8] Store the stack pointer (before reserving the space).
13401 8 mov R7, PC Get hold of the start of this code plus 12.
13402 10 str R7, [SP, #16] Store it.
13403 12 mov R7, FP Get hold of the current frame pointer.
13404 14 str R7, [SP, #4] Store it.
13405 16 mov R7, LR Get hold of the current return address.
13406 18 str R7, [SP, #12] Store it.
13407 20 add R7, SP, #16 Point at the start of the backtrace structure.
13408 22 mov FP, R7 Put this value into the frame pointer. */
13410 work_register = thumb_find_work_register (live_regs_mask);
13412 asm_fprintf
13413 (f, "\tsub\t%r, %r, #16\t%@ Create stack backtrace structure\n",
13414 SP_REGNUM, SP_REGNUM);
13416 if (dwarf2out_do_frame ())
13418 char *l = dwarf2out_cfi_label ();
13419 cfa_offset = cfa_offset + 16;
13420 dwarf2out_def_cfa (l, SP_REGNUM, cfa_offset);
13423 if (l_mask)
13425 thumb_pushpop (f, l_mask, 1, &cfa_offset, l_mask);
13426 offset = bit_count (l_mask);
13428 else
13429 offset = 0;
13431 asm_fprintf (f, "\tadd\t%r, %r, #%d\n", work_register, SP_REGNUM,
13432 offset + 16 + current_function_pretend_args_size);
13434 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13435 offset + 4);
13437 /* Make sure that the instruction fetching the PC is in the right place
13438 to calculate "start of backtrace creation code + 12". */
13439 if (l_mask)
13441 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, PC_REGNUM);
13442 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13443 offset + 12);
13444 asm_fprintf (f, "\tmov\t%r, %r\n", work_register,
13445 ARM_HARD_FRAME_POINTER_REGNUM);
13446 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13447 offset);
13449 else
13451 asm_fprintf (f, "\tmov\t%r, %r\n", work_register,
13452 ARM_HARD_FRAME_POINTER_REGNUM);
13453 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13454 offset);
13455 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, PC_REGNUM);
13456 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13457 offset + 12);
13460 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, LR_REGNUM);
13461 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13462 offset + 8);
13463 asm_fprintf (f, "\tadd\t%r, %r, #%d\n", work_register, SP_REGNUM,
13464 offset + 12);
13465 asm_fprintf (f, "\tmov\t%r, %r\t\t%@ Backtrace structure created\n",
13466 ARM_HARD_FRAME_POINTER_REGNUM, work_register);
13468 else if (l_mask)
13469 thumb_pushpop (f, l_mask, 1, &cfa_offset, l_mask);
13471 high_regs_pushed = bit_count (live_regs_mask & 0x0f00);
13473 if (high_regs_pushed)
13475 int pushable_regs = 0;
13476 int next_hi_reg;
13478 for (next_hi_reg = 12; next_hi_reg > LAST_LO_REGNUM; next_hi_reg--)
13479 if (live_regs_mask & (1 << next_hi_reg))
13480 break;
13482 pushable_regs = l_mask & 0xff;
13484 if (pushable_regs == 0)
13485 pushable_regs = 1 << thumb_find_work_register (live_regs_mask);
13487 while (high_regs_pushed > 0)
13489 int real_regs_mask = 0;
13491 for (regno = LAST_LO_REGNUM; regno >= 0; regno--)
13493 if (pushable_regs & (1 << regno))
13495 asm_fprintf (f, "\tmov\t%r, %r\n", regno, next_hi_reg);
13497 high_regs_pushed--;
13498 real_regs_mask |= (1 << next_hi_reg);
13500 if (high_regs_pushed)
13502 for (next_hi_reg--; next_hi_reg > LAST_LO_REGNUM;
13503 next_hi_reg--)
13504 if (live_regs_mask & (1 << next_hi_reg))
13505 break;
13507 else
13509 pushable_regs &= ~((1 << regno) - 1);
13510 break;
13515 thumb_pushpop (f, pushable_regs, 1, &cfa_offset, real_regs_mask);
13520 /* Handle the case of a double word load into a low register from
13521 a computed memory address. The computed address may involve a
13522 register which is overwritten by the load. */
13523 const char *
13524 thumb_load_double_from_address (rtx *operands)
13526 rtx addr;
13527 rtx base;
13528 rtx offset;
13529 rtx arg1;
13530 rtx arg2;
13532 if (GET_CODE (operands[0]) != REG)
13533 abort ();
13535 if (GET_CODE (operands[1]) != MEM)
13536 abort ();
13538 /* Get the memory address. */
13539 addr = XEXP (operands[1], 0);
13541 /* Work out how the memory address is computed. */
13542 switch (GET_CODE (addr))
13544 case REG:
13545 operands[2] = gen_rtx_MEM (SImode,
13546 plus_constant (XEXP (operands[1], 0), 4));
13548 if (REGNO (operands[0]) == REGNO (addr))
13550 output_asm_insn ("ldr\t%H0, %2", operands);
13551 output_asm_insn ("ldr\t%0, %1", operands);
13553 else
13555 output_asm_insn ("ldr\t%0, %1", operands);
13556 output_asm_insn ("ldr\t%H0, %2", operands);
13558 break;
13560 case CONST:
13561 /* Compute <address> + 4 for the high order load. */
13562 operands[2] = gen_rtx_MEM (SImode,
13563 plus_constant (XEXP (operands[1], 0), 4));
13565 output_asm_insn ("ldr\t%0, %1", operands);
13566 output_asm_insn ("ldr\t%H0, %2", operands);
13567 break;
13569 case PLUS:
13570 arg1 = XEXP (addr, 0);
13571 arg2 = XEXP (addr, 1);
13573 if (CONSTANT_P (arg1))
13574 base = arg2, offset = arg1;
13575 else
13576 base = arg1, offset = arg2;
13578 if (GET_CODE (base) != REG)
13579 abort ();
13581 /* Catch the case of <address> = <reg> + <reg> */
13582 if (GET_CODE (offset) == REG)
13584 int reg_offset = REGNO (offset);
13585 int reg_base = REGNO (base);
13586 int reg_dest = REGNO (operands[0]);
13588 /* Add the base and offset registers together into the
13589 higher destination register. */
13590 asm_fprintf (asm_out_file, "\tadd\t%r, %r, %r",
13591 reg_dest + 1, reg_base, reg_offset);
13593 /* Load the lower destination register from the address in
13594 the higher destination register. */
13595 asm_fprintf (asm_out_file, "\tldr\t%r, [%r, #0]",
13596 reg_dest, reg_dest + 1);
13598 /* Load the higher destination register from its own address
13599 plus 4. */
13600 asm_fprintf (asm_out_file, "\tldr\t%r, [%r, #4]",
13601 reg_dest + 1, reg_dest + 1);
13603 else
13605 /* Compute <address> + 4 for the high order load. */
13606 operands[2] = gen_rtx_MEM (SImode,
13607 plus_constant (XEXP (operands[1], 0), 4));
13609 /* If the computed address is held in the low order register
13610 then load the high order register first, otherwise always
13611 load the low order register first. */
13612 if (REGNO (operands[0]) == REGNO (base))
13614 output_asm_insn ("ldr\t%H0, %2", operands);
13615 output_asm_insn ("ldr\t%0, %1", operands);
13617 else
13619 output_asm_insn ("ldr\t%0, %1", operands);
13620 output_asm_insn ("ldr\t%H0, %2", operands);
13623 break;
13625 case LABEL_REF:
13626 /* With no registers to worry about we can just load the value
13627 directly. */
13628 operands[2] = gen_rtx_MEM (SImode,
13629 plus_constant (XEXP (operands[1], 0), 4));
13631 output_asm_insn ("ldr\t%H0, %2", operands);
13632 output_asm_insn ("ldr\t%0, %1", operands);
13633 break;
13635 default:
13636 abort ();
13637 break;
13640 return "";
13643 const char *
13644 thumb_output_move_mem_multiple (int n, rtx *operands)
13646 rtx tmp;
13648 switch (n)
13650 case 2:
13651 if (REGNO (operands[4]) > REGNO (operands[5]))
13653 tmp = operands[4];
13654 operands[4] = operands[5];
13655 operands[5] = tmp;
13657 output_asm_insn ("ldmia\t%1!, {%4, %5}", operands);
13658 output_asm_insn ("stmia\t%0!, {%4, %5}", operands);
13659 break;
13661 case 3:
13662 if (REGNO (operands[4]) > REGNO (operands[5]))
13664 tmp = operands[4];
13665 operands[4] = operands[5];
13666 operands[5] = tmp;
13668 if (REGNO (operands[5]) > REGNO (operands[6]))
13670 tmp = operands[5];
13671 operands[5] = operands[6];
13672 operands[6] = tmp;
13674 if (REGNO (operands[4]) > REGNO (operands[5]))
13676 tmp = operands[4];
13677 operands[4] = operands[5];
13678 operands[5] = tmp;
13681 output_asm_insn ("ldmia\t%1!, {%4, %5, %6}", operands);
13682 output_asm_insn ("stmia\t%0!, {%4, %5, %6}", operands);
13683 break;
13685 default:
13686 abort ();
13689 return "";
13692 /* Output a call-via instruction for thumb state. */
13693 const char *
13694 thumb_call_via_reg (rtx reg)
13696 int regno = REGNO (reg);
13697 rtx *labelp;
13699 gcc_assert (regno < SP_REGNUM);
13701 /* If we are in the normal text section we can use a single instance
13702 per compilation unit. If we are doing function sections, then we need
13703 an entry per section, since we can't rely on reachability. */
13704 if (in_text_section ())
13706 thumb_call_reg_needed = 1;
13708 if (thumb_call_via_label[regno] == NULL)
13709 thumb_call_via_label[regno] = gen_label_rtx ();
13710 labelp = thumb_call_via_label + regno;
13712 else
13714 if (cfun->machine->call_via[regno] == NULL)
13715 cfun->machine->call_via[regno] = gen_label_rtx ();
13716 labelp = cfun->machine->call_via + regno;
13719 output_asm_insn ("bl\t%a0", labelp);
13720 return "";
13723 /* Routines for generating rtl. */
13724 void
13725 thumb_expand_movmemqi (rtx *operands)
13727 rtx out = copy_to_mode_reg (SImode, XEXP (operands[0], 0));
13728 rtx in = copy_to_mode_reg (SImode, XEXP (operands[1], 0));
13729 HOST_WIDE_INT len = INTVAL (operands[2]);
13730 HOST_WIDE_INT offset = 0;
13732 while (len >= 12)
13734 emit_insn (gen_movmem12b (out, in, out, in));
13735 len -= 12;
13738 if (len >= 8)
13740 emit_insn (gen_movmem8b (out, in, out, in));
13741 len -= 8;
13744 if (len >= 4)
13746 rtx reg = gen_reg_rtx (SImode);
13747 emit_insn (gen_movsi (reg, gen_rtx_MEM (SImode, in)));
13748 emit_insn (gen_movsi (gen_rtx_MEM (SImode, out), reg));
13749 len -= 4;
13750 offset += 4;
13753 if (len >= 2)
13755 rtx reg = gen_reg_rtx (HImode);
13756 emit_insn (gen_movhi (reg, gen_rtx_MEM (HImode,
13757 plus_constant (in, offset))));
13758 emit_insn (gen_movhi (gen_rtx_MEM (HImode, plus_constant (out, offset)),
13759 reg));
13760 len -= 2;
13761 offset += 2;
13764 if (len)
13766 rtx reg = gen_reg_rtx (QImode);
13767 emit_insn (gen_movqi (reg, gen_rtx_MEM (QImode,
13768 plus_constant (in, offset))));
13769 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (out, offset)),
13770 reg));
13774 void
13775 thumb_reload_out_hi (rtx *operands)
13777 emit_insn (gen_thumb_movhi_clobber (operands[0], operands[1], operands[2]));
13780 /* Handle reading a half-word from memory during reload. */
13781 void
13782 thumb_reload_in_hi (rtx *operands ATTRIBUTE_UNUSED)
13784 abort ();
13787 /* Return the length of a function name prefix
13788 that starts with the character 'c'. */
13789 static int
13790 arm_get_strip_length (int c)
13792 switch (c)
13794 ARM_NAME_ENCODING_LENGTHS
13795 default: return 0;
13799 /* Return a pointer to a function's name with any
13800 and all prefix encodings stripped from it. */
13801 const char *
13802 arm_strip_name_encoding (const char *name)
13804 int skip;
13806 while ((skip = arm_get_strip_length (* name)))
13807 name += skip;
13809 return name;
13812 /* If there is a '*' anywhere in the name's prefix, then
13813 emit the stripped name verbatim, otherwise prepend an
13814 underscore if leading underscores are being used. */
13815 void
13816 arm_asm_output_labelref (FILE *stream, const char *name)
13818 int skip;
13819 int verbatim = 0;
13821 while ((skip = arm_get_strip_length (* name)))
13823 verbatim |= (*name == '*');
13824 name += skip;
13827 if (verbatim)
13828 fputs (name, stream);
13829 else
13830 asm_fprintf (stream, "%U%s", name);
13833 static void
13834 arm_file_end (void)
13836 int regno;
13838 if (! thumb_call_reg_needed)
13839 return;
13841 text_section ();
13842 asm_fprintf (asm_out_file, "\t.code 16\n");
13843 ASM_OUTPUT_ALIGN (asm_out_file, 1);
13845 for (regno = 0; regno < SP_REGNUM; regno++)
13847 rtx label = thumb_call_via_label[regno];
13849 if (label != 0)
13851 targetm.asm_out.internal_label (asm_out_file, "L",
13852 CODE_LABEL_NUMBER (label));
13853 asm_fprintf (asm_out_file, "\tbx\t%r\n", regno);
13858 rtx aof_pic_label;
13860 #ifdef AOF_ASSEMBLER
13861 /* Special functions only needed when producing AOF syntax assembler. */
13863 struct pic_chain
13865 struct pic_chain * next;
13866 const char * symname;
13869 static struct pic_chain * aof_pic_chain = NULL;
13872 aof_pic_entry (rtx x)
13874 struct pic_chain ** chainp;
13875 int offset;
13877 if (aof_pic_label == NULL_RTX)
13879 aof_pic_label = gen_rtx_SYMBOL_REF (Pmode, "x$adcons");
13882 for (offset = 0, chainp = &aof_pic_chain; *chainp;
13883 offset += 4, chainp = &(*chainp)->next)
13884 if ((*chainp)->symname == XSTR (x, 0))
13885 return plus_constant (aof_pic_label, offset);
13887 *chainp = (struct pic_chain *) xmalloc (sizeof (struct pic_chain));
13888 (*chainp)->next = NULL;
13889 (*chainp)->symname = XSTR (x, 0);
13890 return plus_constant (aof_pic_label, offset);
13893 void
13894 aof_dump_pic_table (FILE *f)
13896 struct pic_chain * chain;
13898 if (aof_pic_chain == NULL)
13899 return;
13901 asm_fprintf (f, "\tAREA |%r$$adcons|, BASED %r\n",
13902 PIC_OFFSET_TABLE_REGNUM,
13903 PIC_OFFSET_TABLE_REGNUM);
13904 fputs ("|x$adcons|\n", f);
13906 for (chain = aof_pic_chain; chain; chain = chain->next)
13908 fputs ("\tDCD\t", f);
13909 assemble_name (f, chain->symname);
13910 fputs ("\n", f);
13914 int arm_text_section_count = 1;
13916 char *
13917 aof_text_section (void )
13919 static char buf[100];
13920 sprintf (buf, "\tAREA |C$$code%d|, CODE, READONLY",
13921 arm_text_section_count++);
13922 if (flag_pic)
13923 strcat (buf, ", PIC, REENTRANT");
13924 return buf;
13927 static int arm_data_section_count = 1;
13929 char *
13930 aof_data_section (void)
13932 static char buf[100];
13933 sprintf (buf, "\tAREA |C$$data%d|, DATA", arm_data_section_count++);
13934 return buf;
13937 /* The AOF assembler is religiously strict about declarations of
13938 imported and exported symbols, so that it is impossible to declare
13939 a function as imported near the beginning of the file, and then to
13940 export it later on. It is, however, possible to delay the decision
13941 until all the functions in the file have been compiled. To get
13942 around this, we maintain a list of the imports and exports, and
13943 delete from it any that are subsequently defined. At the end of
13944 compilation we spit the remainder of the list out before the END
13945 directive. */
13947 struct import
13949 struct import * next;
13950 const char * name;
13953 static struct import * imports_list = NULL;
13955 void
13956 aof_add_import (const char *name)
13958 struct import * new;
13960 for (new = imports_list; new; new = new->next)
13961 if (new->name == name)
13962 return;
13964 new = (struct import *) xmalloc (sizeof (struct import));
13965 new->next = imports_list;
13966 imports_list = new;
13967 new->name = name;
13970 void
13971 aof_delete_import (const char *name)
13973 struct import ** old;
13975 for (old = &imports_list; *old; old = & (*old)->next)
13977 if ((*old)->name == name)
13979 *old = (*old)->next;
13980 return;
13985 int arm_main_function = 0;
13987 static void
13988 aof_dump_imports (FILE *f)
13990 /* The AOF assembler needs this to cause the startup code to be extracted
13991 from the library. Brining in __main causes the whole thing to work
13992 automagically. */
13993 if (arm_main_function)
13995 text_section ();
13996 fputs ("\tIMPORT __main\n", f);
13997 fputs ("\tDCD __main\n", f);
14000 /* Now dump the remaining imports. */
14001 while (imports_list)
14003 fprintf (f, "\tIMPORT\t");
14004 assemble_name (f, imports_list->name);
14005 fputc ('\n', f);
14006 imports_list = imports_list->next;
14010 static void
14011 aof_globalize_label (FILE *stream, const char *name)
14013 default_globalize_label (stream, name);
14014 if (! strcmp (name, "main"))
14015 arm_main_function = 1;
14018 static void
14019 aof_file_start (void)
14021 fputs ("__r0\tRN\t0\n", asm_out_file);
14022 fputs ("__a1\tRN\t0\n", asm_out_file);
14023 fputs ("__a2\tRN\t1\n", asm_out_file);
14024 fputs ("__a3\tRN\t2\n", asm_out_file);
14025 fputs ("__a4\tRN\t3\n", asm_out_file);
14026 fputs ("__v1\tRN\t4\n", asm_out_file);
14027 fputs ("__v2\tRN\t5\n", asm_out_file);
14028 fputs ("__v3\tRN\t6\n", asm_out_file);
14029 fputs ("__v4\tRN\t7\n", asm_out_file);
14030 fputs ("__v5\tRN\t8\n", asm_out_file);
14031 fputs ("__v6\tRN\t9\n", asm_out_file);
14032 fputs ("__sl\tRN\t10\n", asm_out_file);
14033 fputs ("__fp\tRN\t11\n", asm_out_file);
14034 fputs ("__ip\tRN\t12\n", asm_out_file);
14035 fputs ("__sp\tRN\t13\n", asm_out_file);
14036 fputs ("__lr\tRN\t14\n", asm_out_file);
14037 fputs ("__pc\tRN\t15\n", asm_out_file);
14038 fputs ("__f0\tFN\t0\n", asm_out_file);
14039 fputs ("__f1\tFN\t1\n", asm_out_file);
14040 fputs ("__f2\tFN\t2\n", asm_out_file);
14041 fputs ("__f3\tFN\t3\n", asm_out_file);
14042 fputs ("__f4\tFN\t4\n", asm_out_file);
14043 fputs ("__f5\tFN\t5\n", asm_out_file);
14044 fputs ("__f6\tFN\t6\n", asm_out_file);
14045 fputs ("__f7\tFN\t7\n", asm_out_file);
14046 text_section ();
14049 static void
14050 aof_file_end (void)
14052 if (flag_pic)
14053 aof_dump_pic_table (asm_out_file);
14054 arm_file_end ();
14055 aof_dump_imports (asm_out_file);
14056 fputs ("\tEND\n", asm_out_file);
14058 #endif /* AOF_ASSEMBLER */
14060 #ifndef ARM_PE
14061 /* Symbols in the text segment can be accessed without indirecting via the
14062 constant pool; it may take an extra binary operation, but this is still
14063 faster than indirecting via memory. Don't do this when not optimizing,
14064 since we won't be calculating al of the offsets necessary to do this
14065 simplification. */
14067 static void
14068 arm_encode_section_info (tree decl, rtx rtl, int first)
14070 /* This doesn't work with AOF syntax, since the string table may be in
14071 a different AREA. */
14072 #ifndef AOF_ASSEMBLER
14073 if (optimize > 0 && TREE_CONSTANT (decl))
14074 SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
14075 #endif
14077 /* If we are referencing a function that is weak then encode a long call
14078 flag in the function name, otherwise if the function is static or
14079 or known to be defined in this file then encode a short call flag. */
14080 if (first && DECL_P (decl))
14082 if (TREE_CODE (decl) == FUNCTION_DECL && DECL_WEAK (decl))
14083 arm_encode_call_attribute (decl, LONG_CALL_FLAG_CHAR);
14084 else if (! TREE_PUBLIC (decl))
14085 arm_encode_call_attribute (decl, SHORT_CALL_FLAG_CHAR);
14088 #endif /* !ARM_PE */
14090 static void
14091 arm_internal_label (FILE *stream, const char *prefix, unsigned long labelno)
14093 if (arm_ccfsm_state == 3 && (unsigned) arm_target_label == labelno
14094 && !strcmp (prefix, "L"))
14096 arm_ccfsm_state = 0;
14097 arm_target_insn = NULL;
14099 default_internal_label (stream, prefix, labelno);
14102 /* Output code to add DELTA to the first argument, and then jump
14103 to FUNCTION. Used for C++ multiple inheritance. */
14104 static void
14105 arm_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
14106 HOST_WIDE_INT delta,
14107 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
14108 tree function)
14110 static int thunk_label = 0;
14111 char label[256];
14112 int mi_delta = delta;
14113 const char *const mi_op = mi_delta < 0 ? "sub" : "add";
14114 int shift = 0;
14115 int this_regno = (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function)
14116 ? 1 : 0);
14117 if (mi_delta < 0)
14118 mi_delta = - mi_delta;
14119 if (TARGET_THUMB)
14121 int labelno = thunk_label++;
14122 ASM_GENERATE_INTERNAL_LABEL (label, "LTHUMBFUNC", labelno);
14123 fputs ("\tldr\tr12, ", file);
14124 assemble_name (file, label);
14125 fputc ('\n', file);
14127 while (mi_delta != 0)
14129 if ((mi_delta & (3 << shift)) == 0)
14130 shift += 2;
14131 else
14133 asm_fprintf (file, "\t%s\t%r, %r, #%d\n",
14134 mi_op, this_regno, this_regno,
14135 mi_delta & (0xff << shift));
14136 mi_delta &= ~(0xff << shift);
14137 shift += 8;
14140 if (TARGET_THUMB)
14142 fprintf (file, "\tbx\tr12\n");
14143 ASM_OUTPUT_ALIGN (file, 2);
14144 assemble_name (file, label);
14145 fputs (":\n", file);
14146 assemble_integer (XEXP (DECL_RTL (function), 0), 4, BITS_PER_WORD, 1);
14148 else
14150 fputs ("\tb\t", file);
14151 assemble_name (file, XSTR (XEXP (DECL_RTL (function), 0), 0));
14152 if (NEED_PLT_RELOC)
14153 fputs ("(PLT)", file);
14154 fputc ('\n', file);
14159 arm_emit_vector_const (FILE *file, rtx x)
14161 int i;
14162 const char * pattern;
14164 if (GET_CODE (x) != CONST_VECTOR)
14165 abort ();
14167 switch (GET_MODE (x))
14169 case V2SImode: pattern = "%08x"; break;
14170 case V4HImode: pattern = "%04x"; break;
14171 case V8QImode: pattern = "%02x"; break;
14172 default: abort ();
14175 fprintf (file, "0x");
14176 for (i = CONST_VECTOR_NUNITS (x); i--;)
14178 rtx element;
14180 element = CONST_VECTOR_ELT (x, i);
14181 fprintf (file, pattern, INTVAL (element));
14184 return 1;
14187 const char *
14188 arm_output_load_gr (rtx *operands)
14190 rtx reg;
14191 rtx offset;
14192 rtx wcgr;
14193 rtx sum;
14195 if (GET_CODE (operands [1]) != MEM
14196 || GET_CODE (sum = XEXP (operands [1], 0)) != PLUS
14197 || GET_CODE (reg = XEXP (sum, 0)) != REG
14198 || GET_CODE (offset = XEXP (sum, 1)) != CONST_INT
14199 || ((INTVAL (offset) < 1024) && (INTVAL (offset) > -1024)))
14200 return "wldrw%?\t%0, %1";
14202 /* Fix up an out-of-range load of a GR register. */
14203 output_asm_insn ("str%?\t%0, [sp, #-4]!\t@ Start of GR load expansion", & reg);
14204 wcgr = operands[0];
14205 operands[0] = reg;
14206 output_asm_insn ("ldr%?\t%0, %1", operands);
14208 operands[0] = wcgr;
14209 operands[1] = reg;
14210 output_asm_insn ("tmcr%?\t%0, %1", operands);
14211 output_asm_insn ("ldr%?\t%0, [sp], #4\t@ End of GR load expansion", & reg);
14213 return "";
14216 static rtx
14217 arm_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
14218 int incoming ATTRIBUTE_UNUSED)
14220 #if 0
14221 /* FIXME: The ARM backend has special code to handle structure
14222 returns, and will reserve its own hidden first argument. So
14223 if this macro is enabled a *second* hidden argument will be
14224 reserved, which will break binary compatibility with old
14225 toolchains and also thunk handling. One day this should be
14226 fixed. */
14227 return 0;
14228 #else
14229 /* Register in which address to store a structure value
14230 is passed to a function. */
14231 return gen_rtx_REG (Pmode, ARG_REGISTER (1));
14232 #endif
14235 /* Worker function for TARGET_SETUP_INCOMING_VARARGS.
14237 On the ARM, PRETEND_SIZE is set in order to have the prologue push the last
14238 named arg and all anonymous args onto the stack.
14239 XXX I know the prologue shouldn't be pushing registers, but it is faster
14240 that way. */
14242 static void
14243 arm_setup_incoming_varargs (CUMULATIVE_ARGS *cum,
14244 enum machine_mode mode ATTRIBUTE_UNUSED,
14245 tree type ATTRIBUTE_UNUSED,
14246 int *pretend_size,
14247 int second_time ATTRIBUTE_UNUSED)
14249 cfun->machine->uses_anonymous_args = 1;
14250 if (cum->nregs < NUM_ARG_REGS)
14251 *pretend_size = (NUM_ARG_REGS - cum->nregs) * UNITS_PER_WORD;
14254 /* Return nonzero if the CONSUMER instruction (a store) does not need
14255 PRODUCER's value to calculate the address. */
14258 arm_no_early_store_addr_dep (rtx producer, rtx consumer)
14260 rtx value = PATTERN (producer);
14261 rtx addr = PATTERN (consumer);
14263 if (GET_CODE (value) == COND_EXEC)
14264 value = COND_EXEC_CODE (value);
14265 if (GET_CODE (value) == PARALLEL)
14266 value = XVECEXP (value, 0, 0);
14267 value = XEXP (value, 0);
14268 if (GET_CODE (addr) == COND_EXEC)
14269 addr = COND_EXEC_CODE (addr);
14270 if (GET_CODE (addr) == PARALLEL)
14271 addr = XVECEXP (addr, 0, 0);
14272 addr = XEXP (addr, 0);
14274 return !reg_overlap_mentioned_p (value, addr);
14277 /* Return nonzero if the CONSUMER instruction (an ALU op) does not
14278 have an early register shift value or amount dependency on the
14279 result of PRODUCER. */
14282 arm_no_early_alu_shift_dep (rtx producer, rtx consumer)
14284 rtx value = PATTERN (producer);
14285 rtx op = PATTERN (consumer);
14286 rtx early_op;
14288 if (GET_CODE (value) == COND_EXEC)
14289 value = COND_EXEC_CODE (value);
14290 if (GET_CODE (value) == PARALLEL)
14291 value = XVECEXP (value, 0, 0);
14292 value = XEXP (value, 0);
14293 if (GET_CODE (op) == COND_EXEC)
14294 op = COND_EXEC_CODE (op);
14295 if (GET_CODE (op) == PARALLEL)
14296 op = XVECEXP (op, 0, 0);
14297 op = XEXP (op, 1);
14299 early_op = XEXP (op, 0);
14300 /* This is either an actual independent shift, or a shift applied to
14301 the first operand of another operation. We want the whole shift
14302 operation. */
14303 if (GET_CODE (early_op) == REG)
14304 early_op = op;
14306 return !reg_overlap_mentioned_p (value, early_op);
14309 /* Return nonzero if the CONSUMER instruction (an ALU op) does not
14310 have an early register shift value dependency on the result of
14311 PRODUCER. */
14314 arm_no_early_alu_shift_value_dep (rtx producer, rtx consumer)
14316 rtx value = PATTERN (producer);
14317 rtx op = PATTERN (consumer);
14318 rtx early_op;
14320 if (GET_CODE (value) == COND_EXEC)
14321 value = COND_EXEC_CODE (value);
14322 if (GET_CODE (value) == PARALLEL)
14323 value = XVECEXP (value, 0, 0);
14324 value = XEXP (value, 0);
14325 if (GET_CODE (op) == COND_EXEC)
14326 op = COND_EXEC_CODE (op);
14327 if (GET_CODE (op) == PARALLEL)
14328 op = XVECEXP (op, 0, 0);
14329 op = XEXP (op, 1);
14331 early_op = XEXP (op, 0);
14333 /* This is either an actual independent shift, or a shift applied to
14334 the first operand of another operation. We want the value being
14335 shifted, in either case. */
14336 if (GET_CODE (early_op) != REG)
14337 early_op = XEXP (early_op, 0);
14339 return !reg_overlap_mentioned_p (value, early_op);
14342 /* Return nonzero if the CONSUMER (a mul or mac op) does not
14343 have an early register mult dependency on the result of
14344 PRODUCER. */
14347 arm_no_early_mul_dep (rtx producer, rtx consumer)
14349 rtx value = PATTERN (producer);
14350 rtx op = PATTERN (consumer);
14352 if (GET_CODE (value) == COND_EXEC)
14353 value = COND_EXEC_CODE (value);
14354 if (GET_CODE (value) == PARALLEL)
14355 value = XVECEXP (value, 0, 0);
14356 value = XEXP (value, 0);
14357 if (GET_CODE (op) == COND_EXEC)
14358 op = COND_EXEC_CODE (op);
14359 if (GET_CODE (op) == PARALLEL)
14360 op = XVECEXP (op, 0, 0);
14361 op = XEXP (op, 1);
14363 return (GET_CODE (op) == PLUS
14364 && !reg_overlap_mentioned_p (value, XEXP (op, 0)));
14368 /* We can't rely on the caller doing the proper promotion when
14369 using APCS or ATPCS. */
14371 static bool
14372 arm_promote_prototypes (tree t ATTRIBUTE_UNUSED)
14374 return !TARGET_AAPCS_BASED;
14378 /* AAPCS based ABIs use short enums by default. */
14380 static bool
14381 arm_default_short_enums (void)
14383 return TARGET_AAPCS_BASED;
14387 /* AAPCS requires that anonymous bitfields affect structure alignment. */
14389 static bool
14390 arm_align_anon_bitfield (void)
14392 return TARGET_AAPCS_BASED;
14396 /* The generic C++ ABI says 64-bit (long long). The EABI says 32-bit. */
14398 static tree
14399 arm_cxx_guard_type (void)
14401 return TARGET_AAPCS_BASED ? integer_type_node : long_long_integer_type_node;
14405 /* The EABI says test the least significan bit of a guard variable. */
14407 static bool
14408 arm_cxx_guard_mask_bit (void)
14410 return TARGET_AAPCS_BASED;
14414 /* The EABI specifies that all array cookies are 8 bytes long. */
14416 static tree
14417 arm_get_cookie_size (tree type)
14419 tree size;
14421 if (!TARGET_AAPCS_BASED)
14422 return default_cxx_get_cookie_size (type);
14424 size = build_int_cst (sizetype, 8);
14425 return size;
14429 /* The EABI says that array cookies should also contain the element size. */
14431 static bool
14432 arm_cookie_has_size (void)
14434 return TARGET_AAPCS_BASED;
14438 /* The EABI says constructors and destructors should return a pointer to
14439 the object constructed/destroyed. */
14441 static bool
14442 arm_cxx_cdtor_returns_this (void)
14444 return TARGET_AAPCS_BASED;
14447 /* The EABI says that an inline function may never be the key
14448 method. */
14450 static bool
14451 arm_cxx_key_method_may_be_inline (void)
14453 return !TARGET_AAPCS_BASED;
14456 /* The EABI says that the virtual table, etc., for a class must be
14457 exported if it has a key method. The EABI does not specific the
14458 behavior if there is no key method, but there is no harm in
14459 exporting the class data in that case too. */
14461 static bool
14462 arm_cxx_export_class_data (void)
14464 return TARGET_AAPCS_BASED;
14467 void
14468 arm_set_return_address (rtx source, rtx scratch)
14470 arm_stack_offsets *offsets;
14471 HOST_WIDE_INT delta;
14472 rtx addr;
14473 unsigned long saved_regs;
14475 saved_regs = arm_compute_save_reg_mask ();
14477 if ((saved_regs & (1 << LR_REGNUM)) == 0)
14478 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNUM), source);
14479 else
14481 if (frame_pointer_needed)
14482 addr = plus_constant(hard_frame_pointer_rtx, -4);
14483 else
14485 /* LR will be the first saved register. */
14486 offsets = arm_get_frame_offsets ();
14487 delta = offsets->outgoing_args - (offsets->frame + 4);
14490 if (delta >= 4096)
14492 emit_insn (gen_addsi3 (scratch, stack_pointer_rtx,
14493 GEN_INT (delta & ~4095)));
14494 addr = scratch;
14495 delta &= 4095;
14497 else
14498 addr = stack_pointer_rtx;
14500 addr = plus_constant (addr, delta);
14502 emit_move_insn (gen_rtx_MEM (Pmode, addr), source);
14507 void
14508 thumb_set_return_address (rtx source, rtx scratch)
14510 arm_stack_offsets *offsets;
14511 HOST_WIDE_INT delta;
14512 int reg;
14513 rtx addr;
14514 unsigned long mask;
14516 emit_insn (gen_rtx_USE (VOIDmode, source));
14518 mask = thumb_compute_save_reg_mask ();
14519 if (mask & (1 << LR_REGNUM))
14521 offsets = arm_get_frame_offsets ();
14523 /* Find the saved regs. */
14524 if (frame_pointer_needed)
14526 delta = offsets->soft_frame - offsets->saved_args;
14527 reg = THUMB_HARD_FRAME_POINTER_REGNUM;
14529 else
14531 delta = offsets->outgoing_args - offsets->saved_args;
14532 reg = SP_REGNUM;
14534 /* Allow for the stack frame. */
14535 if (TARGET_BACKTRACE)
14536 delta -= 16;
14537 /* The link register is always the first saved register. */
14538 delta -= 4;
14540 /* Construct the address. */
14541 addr = gen_rtx_REG (SImode, reg);
14542 if ((reg != SP_REGNUM && delta >= 128)
14543 || delta >= 1024)
14545 emit_insn (gen_movsi (scratch, GEN_INT (delta)));
14546 emit_insn (gen_addsi3 (scratch, scratch, stack_pointer_rtx));
14547 addr = scratch;
14549 else
14550 addr = plus_constant (addr, delta);
14552 emit_move_insn (gen_rtx_MEM (Pmode, addr), source);
14554 else
14555 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNUM), source);
14558 /* Implements target hook vector_mode_supported_p. */
14559 bool
14560 arm_vector_mode_supported_p (enum machine_mode mode)
14562 if ((mode == V2SImode)
14563 || (mode == V4HImode)
14564 || (mode == V8QImode))
14565 return true;
14567 return false;
14570 /* Implement TARGET_SHIFT_TRUNCATION_MASK. SImode shifts use normal
14571 ARM insns and therefore guarantee that the shift count is modulo 256.
14572 DImode shifts (those implemented by lib1funcs.asm or by optabs.c)
14573 guarantee no particular behavior for out-of-range counts. */
14575 static unsigned HOST_WIDE_INT
14576 arm_shift_truncation_mask (enum machine_mode mode)
14578 return mode == SImode ? 255 : 0;