* PR target/19162
[official-gcc.git] / gcc / config / arm / arm.c
blob7c614fd53963c6c44df13a9d4c0f16dfde858408
1 /* Output routines for GCC for ARM.
2 Copyright (C) 1991, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
4 Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
5 and Martin Simmons (@harleqn.co.uk).
6 More major hacks by Richard Earnshaw (rearnsha@arm.com).
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it
11 under the terms of the GNU General Public License as published
12 by the Free Software Foundation; either version 2, or (at your
13 option) any later version.
15 GCC is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
17 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
18 License for more details.
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING. If not, write to
22 the Free Software Foundation, 59 Temple Place - Suite 330,
23 Boston, MA 02111-1307, USA. */
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "tm.h"
29 #include "rtl.h"
30 #include "tree.h"
31 #include "obstack.h"
32 #include "regs.h"
33 #include "hard-reg-set.h"
34 #include "real.h"
35 #include "insn-config.h"
36 #include "conditions.h"
37 #include "output.h"
38 #include "insn-attr.h"
39 #include "flags.h"
40 #include "reload.h"
41 #include "function.h"
42 #include "expr.h"
43 #include "optabs.h"
44 #include "toplev.h"
45 #include "recog.h"
46 #include "ggc.h"
47 #include "except.h"
48 #include "c-pragma.h"
49 #include "integrate.h"
50 #include "tm_p.h"
51 #include "target.h"
52 #include "target-def.h"
53 #include "debug.h"
54 #include "langhooks.h"
56 /* Forward definitions of types. */
57 typedef struct minipool_node Mnode;
58 typedef struct minipool_fixup Mfix;
60 const struct attribute_spec arm_attribute_table[];
62 /* Forward function declarations. */
63 static arm_stack_offsets *arm_get_frame_offsets (void);
64 static void arm_add_gc_roots (void);
65 static int arm_gen_constant (enum rtx_code, enum machine_mode, rtx,
66 HOST_WIDE_INT, rtx, rtx, int, int);
67 static unsigned bit_count (unsigned long);
68 static int arm_address_register_rtx_p (rtx, int);
69 static int arm_legitimate_index_p (enum machine_mode, rtx, RTX_CODE, int);
70 static int thumb_base_register_rtx_p (rtx, enum machine_mode, int);
71 inline static int thumb_index_register_rtx_p (rtx, int);
72 static int thumb_far_jump_used_p (void);
73 static bool thumb_force_lr_save (void);
74 static unsigned long thumb_compute_save_reg_mask (void);
75 static int const_ok_for_op (HOST_WIDE_INT, enum rtx_code);
76 static rtx emit_multi_reg_push (int);
77 static rtx emit_sfm (int, int);
78 #ifndef AOF_ASSEMBLER
79 static bool arm_assemble_integer (rtx, unsigned int, int);
80 #endif
81 static const char *fp_const_from_val (REAL_VALUE_TYPE *);
82 static arm_cc get_arm_condition_code (rtx);
83 static HOST_WIDE_INT int_log2 (HOST_WIDE_INT);
84 static rtx is_jump_table (rtx);
85 static const char *output_multi_immediate (rtx *, const char *, const char *,
86 int, HOST_WIDE_INT);
87 static void print_multi_reg (FILE *, const char *, int, int);
88 static const char *shift_op (rtx, HOST_WIDE_INT *);
89 static struct machine_function *arm_init_machine_status (void);
90 static int number_of_first_bit_set (int);
91 static void replace_symbols_in_block (tree, rtx, rtx);
92 static void thumb_exit (FILE *, int);
93 static void thumb_pushpop (FILE *, int, int, int *, int);
94 static rtx is_jump_table (rtx);
95 static HOST_WIDE_INT get_jump_table_size (rtx);
96 static Mnode *move_minipool_fix_forward_ref (Mnode *, Mnode *, HOST_WIDE_INT);
97 static Mnode *add_minipool_forward_ref (Mfix *);
98 static Mnode *move_minipool_fix_backward_ref (Mnode *, Mnode *, HOST_WIDE_INT);
99 static Mnode *add_minipool_backward_ref (Mfix *);
100 static void assign_minipool_offsets (Mfix *);
101 static void arm_print_value (FILE *, rtx);
102 static void dump_minipool (rtx);
103 static int arm_barrier_cost (rtx);
104 static Mfix *create_fix_barrier (Mfix *, HOST_WIDE_INT);
105 static void push_minipool_barrier (rtx, HOST_WIDE_INT);
106 static void push_minipool_fix (rtx, HOST_WIDE_INT, rtx *, enum machine_mode,
107 rtx);
108 static void arm_reorg (void);
109 static bool note_invalid_constants (rtx, HOST_WIDE_INT, int);
110 static int current_file_function_operand (rtx);
111 static unsigned long arm_compute_save_reg0_reg12_mask (void);
112 static unsigned long arm_compute_save_reg_mask (void);
113 static unsigned long arm_isr_value (tree);
114 static unsigned long arm_compute_func_type (void);
115 static tree arm_handle_fndecl_attribute (tree *, tree, tree, int, bool *);
116 static tree arm_handle_isr_attribute (tree *, tree, tree, int, bool *);
117 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
118 static tree arm_handle_notshared_attribute (tree *, tree, tree, int, bool *);
119 #endif
120 static void arm_output_function_epilogue (FILE *, HOST_WIDE_INT);
121 static void arm_output_function_prologue (FILE *, HOST_WIDE_INT);
122 static void thumb_output_function_prologue (FILE *, HOST_WIDE_INT);
123 static int arm_comp_type_attributes (tree, tree);
124 static void arm_set_default_type_attributes (tree);
125 static int arm_adjust_cost (rtx, rtx, rtx, int);
126 static int count_insns_for_constant (HOST_WIDE_INT, int);
127 static int arm_get_strip_length (int);
128 static bool arm_function_ok_for_sibcall (tree, tree);
129 static void arm_internal_label (FILE *, const char *, unsigned long);
130 static void arm_output_mi_thunk (FILE *, tree, HOST_WIDE_INT, HOST_WIDE_INT,
131 tree);
132 static int arm_rtx_costs_1 (rtx, enum rtx_code, enum rtx_code);
133 static bool arm_size_rtx_costs (rtx, int, int, int *);
134 static bool arm_slowmul_rtx_costs (rtx, int, int, int *);
135 static bool arm_fastmul_rtx_costs (rtx, int, int, int *);
136 static bool arm_xscale_rtx_costs (rtx, int, int, int *);
137 static bool arm_9e_rtx_costs (rtx, int, int, int *);
138 static int arm_address_cost (rtx);
139 static bool arm_memory_load_p (rtx);
140 static bool arm_cirrus_insn_p (rtx);
141 static void cirrus_reorg (rtx);
142 static void arm_init_builtins (void);
143 static rtx arm_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
144 static void arm_init_iwmmxt_builtins (void);
145 static rtx safe_vector_operand (rtx, enum machine_mode);
146 static rtx arm_expand_binop_builtin (enum insn_code, tree, rtx);
147 static rtx arm_expand_unop_builtin (enum insn_code, tree, rtx, int);
148 static rtx arm_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
149 static void emit_constant_insn (rtx cond, rtx pattern);
150 static int arm_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode,
151 tree, bool);
153 #ifndef ARM_PE
154 static void arm_encode_section_info (tree, rtx, int);
155 #endif
157 static void arm_file_end (void);
159 #ifdef AOF_ASSEMBLER
160 static void aof_globalize_label (FILE *, const char *);
161 static void aof_dump_imports (FILE *);
162 static void aof_dump_pic_table (FILE *);
163 static void aof_file_start (void);
164 static void aof_file_end (void);
165 #endif
166 static rtx arm_struct_value_rtx (tree, int);
167 static void arm_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
168 tree, int *, int);
169 static bool arm_pass_by_reference (CUMULATIVE_ARGS *,
170 enum machine_mode, tree, bool);
171 static bool arm_promote_prototypes (tree);
172 static bool arm_default_short_enums (void);
173 static bool arm_align_anon_bitfield (void);
175 static tree arm_cxx_guard_type (void);
176 static bool arm_cxx_guard_mask_bit (void);
177 static tree arm_get_cookie_size (tree);
178 static bool arm_cookie_has_size (void);
179 static bool arm_cxx_cdtor_returns_this (void);
180 static bool arm_cxx_key_method_may_be_inline (void);
181 static bool arm_cxx_export_class_data (void);
182 static void arm_init_libfuncs (void);
183 static unsigned HOST_WIDE_INT arm_shift_truncation_mask (enum machine_mode);
185 /* Initialize the GCC target structure. */
186 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
187 #undef TARGET_MERGE_DECL_ATTRIBUTES
188 #define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
189 #endif
191 #undef TARGET_ATTRIBUTE_TABLE
192 #define TARGET_ATTRIBUTE_TABLE arm_attribute_table
194 #undef TARGET_ASM_FILE_END
195 #define TARGET_ASM_FILE_END arm_file_end
197 #ifdef AOF_ASSEMBLER
198 #undef TARGET_ASM_BYTE_OP
199 #define TARGET_ASM_BYTE_OP "\tDCB\t"
200 #undef TARGET_ASM_ALIGNED_HI_OP
201 #define TARGET_ASM_ALIGNED_HI_OP "\tDCW\t"
202 #undef TARGET_ASM_ALIGNED_SI_OP
203 #define TARGET_ASM_ALIGNED_SI_OP "\tDCD\t"
204 #undef TARGET_ASM_GLOBALIZE_LABEL
205 #define TARGET_ASM_GLOBALIZE_LABEL aof_globalize_label
206 #undef TARGET_ASM_FILE_START
207 #define TARGET_ASM_FILE_START aof_file_start
208 #undef TARGET_ASM_FILE_END
209 #define TARGET_ASM_FILE_END aof_file_end
210 #else
211 #undef TARGET_ASM_ALIGNED_SI_OP
212 #define TARGET_ASM_ALIGNED_SI_OP NULL
213 #undef TARGET_ASM_INTEGER
214 #define TARGET_ASM_INTEGER arm_assemble_integer
215 #endif
217 #undef TARGET_ASM_FUNCTION_PROLOGUE
218 #define TARGET_ASM_FUNCTION_PROLOGUE arm_output_function_prologue
220 #undef TARGET_ASM_FUNCTION_EPILOGUE
221 #define TARGET_ASM_FUNCTION_EPILOGUE arm_output_function_epilogue
223 #undef TARGET_COMP_TYPE_ATTRIBUTES
224 #define TARGET_COMP_TYPE_ATTRIBUTES arm_comp_type_attributes
226 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
227 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES arm_set_default_type_attributes
229 #undef TARGET_SCHED_ADJUST_COST
230 #define TARGET_SCHED_ADJUST_COST arm_adjust_cost
232 #undef TARGET_ENCODE_SECTION_INFO
233 #ifdef ARM_PE
234 #define TARGET_ENCODE_SECTION_INFO arm_pe_encode_section_info
235 #else
236 #define TARGET_ENCODE_SECTION_INFO arm_encode_section_info
237 #endif
239 #undef TARGET_STRIP_NAME_ENCODING
240 #define TARGET_STRIP_NAME_ENCODING arm_strip_name_encoding
242 #undef TARGET_ASM_INTERNAL_LABEL
243 #define TARGET_ASM_INTERNAL_LABEL arm_internal_label
245 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
246 #define TARGET_FUNCTION_OK_FOR_SIBCALL arm_function_ok_for_sibcall
248 #undef TARGET_ASM_OUTPUT_MI_THUNK
249 #define TARGET_ASM_OUTPUT_MI_THUNK arm_output_mi_thunk
250 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
251 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
253 /* This will be overridden in arm_override_options. */
254 #undef TARGET_RTX_COSTS
255 #define TARGET_RTX_COSTS arm_slowmul_rtx_costs
256 #undef TARGET_ADDRESS_COST
257 #define TARGET_ADDRESS_COST arm_address_cost
259 #undef TARGET_SHIFT_TRUNCATION_MASK
260 #define TARGET_SHIFT_TRUNCATION_MASK arm_shift_truncation_mask
261 #undef TARGET_VECTOR_MODE_SUPPORTED_P
262 #define TARGET_VECTOR_MODE_SUPPORTED_P arm_vector_mode_supported_p
264 #undef TARGET_MACHINE_DEPENDENT_REORG
265 #define TARGET_MACHINE_DEPENDENT_REORG arm_reorg
267 #undef TARGET_INIT_BUILTINS
268 #define TARGET_INIT_BUILTINS arm_init_builtins
269 #undef TARGET_EXPAND_BUILTIN
270 #define TARGET_EXPAND_BUILTIN arm_expand_builtin
272 #undef TARGET_INIT_LIBFUNCS
273 #define TARGET_INIT_LIBFUNCS arm_init_libfuncs
275 #undef TARGET_PROMOTE_FUNCTION_ARGS
276 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
277 #undef TARGET_PROMOTE_FUNCTION_RETURN
278 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
279 #undef TARGET_PROMOTE_PROTOTYPES
280 #define TARGET_PROMOTE_PROTOTYPES arm_promote_prototypes
281 #undef TARGET_PASS_BY_REFERENCE
282 #define TARGET_PASS_BY_REFERENCE arm_pass_by_reference
283 #undef TARGET_ARG_PARTIAL_BYTES
284 #define TARGET_ARG_PARTIAL_BYTES arm_arg_partial_bytes
286 #undef TARGET_STRUCT_VALUE_RTX
287 #define TARGET_STRUCT_VALUE_RTX arm_struct_value_rtx
289 #undef TARGET_SETUP_INCOMING_VARARGS
290 #define TARGET_SETUP_INCOMING_VARARGS arm_setup_incoming_varargs
292 #undef TARGET_DEFAULT_SHORT_ENUMS
293 #define TARGET_DEFAULT_SHORT_ENUMS arm_default_short_enums
295 #undef TARGET_ALIGN_ANON_BITFIELD
296 #define TARGET_ALIGN_ANON_BITFIELD arm_align_anon_bitfield
298 #undef TARGET_CXX_GUARD_TYPE
299 #define TARGET_CXX_GUARD_TYPE arm_cxx_guard_type
301 #undef TARGET_CXX_GUARD_MASK_BIT
302 #define TARGET_CXX_GUARD_MASK_BIT arm_cxx_guard_mask_bit
304 #undef TARGET_CXX_GET_COOKIE_SIZE
305 #define TARGET_CXX_GET_COOKIE_SIZE arm_get_cookie_size
307 #undef TARGET_CXX_COOKIE_HAS_SIZE
308 #define TARGET_CXX_COOKIE_HAS_SIZE arm_cookie_has_size
310 #undef TARGET_CXX_CDTOR_RETURNS_THIS
311 #define TARGET_CXX_CDTOR_RETURNS_THIS arm_cxx_cdtor_returns_this
313 #undef TARGET_CXX_KEY_METHOD_MAY_BE_INLINE
314 #define TARGET_CXX_KEY_METHOD_MAY_BE_INLINE arm_cxx_key_method_may_be_inline
316 #undef TARGET_CXX_EXPORT_CLASS_DATA
317 #define TARGET_CXX_EXPORT_CLASS_DATA arm_cxx_export_class_data
319 struct gcc_target targetm = TARGET_INITIALIZER;
321 /* Obstack for minipool constant handling. */
322 static struct obstack minipool_obstack;
323 static char * minipool_startobj;
325 /* The maximum number of insns skipped which
326 will be conditionalised if possible. */
327 static int max_insns_skipped = 5;
329 extern FILE * asm_out_file;
331 /* True if we are currently building a constant table. */
332 int making_const_table;
334 /* Define the information needed to generate branch insns. This is
335 stored from the compare operation. */
336 rtx arm_compare_op0, arm_compare_op1;
338 /* The processor for which instructions should be scheduled. */
339 enum processor_type arm_tune = arm_none;
341 /* Which floating point model to use. */
342 enum arm_fp_model arm_fp_model;
344 /* Which floating point hardware is available. */
345 enum fputype arm_fpu_arch;
347 /* Which floating point hardware to schedule for. */
348 enum fputype arm_fpu_tune;
350 /* Whether to use floating point hardware. */
351 enum float_abi_type arm_float_abi;
353 /* Which ABI to use. */
354 enum arm_abi_type arm_abi;
356 /* Set by the -mfpu=... option. */
357 const char * target_fpu_name = NULL;
359 /* Set by the -mfpe=... option. */
360 const char * target_fpe_name = NULL;
362 /* Set by the -mfloat-abi=... option. */
363 const char * target_float_abi_name = NULL;
365 /* Set by the legacy -mhard-float and -msoft-float options. */
366 const char * target_float_switch = NULL;
368 /* Set by the -mabi=... option. */
369 const char * target_abi_name = NULL;
371 /* Used to parse -mstructure_size_boundary command line option. */
372 const char * structure_size_string = NULL;
373 int arm_structure_size_boundary = DEFAULT_STRUCTURE_SIZE_BOUNDARY;
375 /* Used for Thumb call_via trampolines. */
376 rtx thumb_call_via_label[13];
377 static int thumb_call_reg_needed;
379 /* Bit values used to identify processor capabilities. */
380 #define FL_CO_PROC (1 << 0) /* Has external co-processor bus */
381 #define FL_ARCH3M (1 << 1) /* Extended multiply */
382 #define FL_MODE26 (1 << 2) /* 26-bit mode support */
383 #define FL_MODE32 (1 << 3) /* 32-bit mode support */
384 #define FL_ARCH4 (1 << 4) /* Architecture rel 4 */
385 #define FL_ARCH5 (1 << 5) /* Architecture rel 5 */
386 #define FL_THUMB (1 << 6) /* Thumb aware */
387 #define FL_LDSCHED (1 << 7) /* Load scheduling necessary */
388 #define FL_STRONG (1 << 8) /* StrongARM */
389 #define FL_ARCH5E (1 << 9) /* DSP extensions to v5 */
390 #define FL_XSCALE (1 << 10) /* XScale */
391 #define FL_CIRRUS (1 << 11) /* Cirrus/DSP. */
392 #define FL_ARCH6 (1 << 12) /* Architecture rel 6. Adds
393 media instructions. */
394 #define FL_VFPV2 (1 << 13) /* Vector Floating Point V2. */
396 #define FL_IWMMXT (1 << 29) /* XScale v2 or "Intel Wireless MMX technology". */
398 #define FL_FOR_ARCH2 0
399 #define FL_FOR_ARCH3 FL_MODE32
400 #define FL_FOR_ARCH3M (FL_FOR_ARCH3 | FL_ARCH3M)
401 #define FL_FOR_ARCH4 (FL_FOR_ARCH3M | FL_ARCH4)
402 #define FL_FOR_ARCH4T (FL_FOR_ARCH4 | FL_THUMB)
403 #define FL_FOR_ARCH5 (FL_FOR_ARCH4 | FL_ARCH5)
404 #define FL_FOR_ARCH5T (FL_FOR_ARCH5 | FL_THUMB)
405 #define FL_FOR_ARCH5E (FL_FOR_ARCH5 | FL_ARCH5E)
406 #define FL_FOR_ARCH5TE (FL_FOR_ARCH5E | FL_THUMB)
407 #define FL_FOR_ARCH5TEJ FL_FOR_ARCH5TE
408 #define FL_FOR_ARCH6 (FL_FOR_ARCH5TE | FL_ARCH6)
409 #define FL_FOR_ARCH6J FL_FOR_ARCH6
410 #define FL_FOR_ARCH6K FL_FOR_ARCH6
411 #define FL_FOR_ARCH6Z FL_FOR_ARCH6
412 #define FL_FOR_ARCH6ZK FL_FOR_ARCH6
414 /* The bits in this mask specify which
415 instructions we are allowed to generate. */
416 static unsigned long insn_flags = 0;
418 /* The bits in this mask specify which instruction scheduling options should
419 be used. */
420 static unsigned long tune_flags = 0;
422 /* The following are used in the arm.md file as equivalents to bits
423 in the above two flag variables. */
425 /* Nonzero if this chip supports the ARM Architecture 3M extensions. */
426 int arm_arch3m = 0;
428 /* Nonzero if this chip supports the ARM Architecture 4 extensions. */
429 int arm_arch4 = 0;
431 /* Nonzero if this chip supports the ARM Architecture 4t extensions. */
432 int arm_arch4t = 0;
434 /* Nonzero if this chip supports the ARM Architecture 5 extensions. */
435 int arm_arch5 = 0;
437 /* Nonzero if this chip supports the ARM Architecture 5E extensions. */
438 int arm_arch5e = 0;
440 /* Nonzero if this chip supports the ARM Architecture 6 extensions. */
441 int arm_arch6 = 0;
443 /* Nonzero if this chip can benefit from load scheduling. */
444 int arm_ld_sched = 0;
446 /* Nonzero if this chip is a StrongARM. */
447 int arm_is_strong = 0;
449 /* Nonzero if this chip is a Cirrus variant. */
450 int arm_arch_cirrus = 0;
452 /* Nonzero if this chip supports Intel Wireless MMX technology. */
453 int arm_arch_iwmmxt = 0;
455 /* Nonzero if this chip is an XScale. */
456 int arm_arch_xscale = 0;
458 /* Nonzero if tuning for XScale */
459 int arm_tune_xscale = 0;
461 /* Nonzero if this chip is an ARM6 or an ARM7. */
462 int arm_is_6_or_7 = 0;
464 /* Nonzero if generating Thumb instructions. */
465 int thumb_code = 0;
467 /* Nonzero if we should define __THUMB_INTERWORK__ in the
468 preprocessor.
469 XXX This is a bit of a hack, it's intended to help work around
470 problems in GLD which doesn't understand that armv5t code is
471 interworking clean. */
472 int arm_cpp_interwork = 0;
474 /* In case of a PRE_INC, POST_INC, PRE_DEC, POST_DEC memory reference, we
475 must report the mode of the memory reference from PRINT_OPERAND to
476 PRINT_OPERAND_ADDRESS. */
477 enum machine_mode output_memory_reference_mode;
479 /* The register number to be used for the PIC offset register. */
480 const char * arm_pic_register_string = NULL;
481 int arm_pic_register = INVALID_REGNUM;
483 /* Set to 1 when a return insn is output, this means that the epilogue
484 is not needed. */
485 int return_used_this_function;
487 /* Set to 1 after arm_reorg has started. Reset to start at the start of
488 the next function. */
489 static int after_arm_reorg = 0;
491 /* The maximum number of insns to be used when loading a constant. */
492 static int arm_constant_limit = 3;
494 /* For an explanation of these variables, see final_prescan_insn below. */
495 int arm_ccfsm_state;
496 enum arm_cond_code arm_current_cc;
497 rtx arm_target_insn;
498 int arm_target_label;
500 /* The condition codes of the ARM, and the inverse function. */
501 static const char * const arm_condition_codes[] =
503 "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc",
504 "hi", "ls", "ge", "lt", "gt", "le", "al", "nv"
507 #define streq(string1, string2) (strcmp (string1, string2) == 0)
509 /* Initialization code. */
511 struct processors
513 const char *const name;
514 enum processor_type core;
515 const char *arch;
516 const unsigned long flags;
517 bool (* rtx_costs) (rtx, int, int, int *);
520 /* Not all of these give usefully different compilation alternatives,
521 but there is no simple way of generalizing them. */
522 static const struct processors all_cores[] =
524 /* ARM Cores */
525 #define ARM_CORE(NAME, IDENT, ARCH, FLAGS, COSTS) \
526 {NAME, arm_none, #ARCH, FLAGS | FL_FOR_ARCH##ARCH, arm_##COSTS##_rtx_costs},
527 #include "arm-cores.def"
528 #undef ARM_CORE
529 {NULL, arm_none, NULL, 0, NULL}
532 static const struct processors all_architectures[] =
534 /* ARM Architectures */
535 /* We don't specify rtx_costs here as it will be figured out
536 from the core. */
538 {"armv2", arm2, "2", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH2, NULL},
539 {"armv2a", arm2, "2", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH2, NULL},
540 {"armv3", arm6, "3", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH3, NULL},
541 {"armv3m", arm7m, "3M", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH3M, NULL},
542 {"armv4", arm7tdmi, "4", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH4, NULL},
543 /* Strictly, FL_MODE26 is a permitted option for v4t, but there are no
544 implementations that support it, so we will leave it out for now. */
545 {"armv4t", arm7tdmi, "4T", FL_CO_PROC | FL_FOR_ARCH4T, NULL},
546 {"armv5", arm10tdmi, "5", FL_CO_PROC | FL_FOR_ARCH5, NULL},
547 {"armv5t", arm10tdmi, "5T", FL_CO_PROC | FL_FOR_ARCH5T, NULL},
548 {"armv5e", arm1026ejs, "5E", FL_CO_PROC | FL_FOR_ARCH5E, NULL},
549 {"armv5te", arm1026ejs, "5TE", FL_CO_PROC | FL_FOR_ARCH5TE, NULL},
550 {"armv6", arm1136js, "6", FL_CO_PROC | FL_FOR_ARCH6, NULL},
551 {"armv6j", arm1136js, "6J", FL_CO_PROC | FL_FOR_ARCH6J, NULL},
552 {"armv6k", mpcore, "6K", FL_CO_PROC | FL_FOR_ARCH6K, NULL},
553 {"armv6z", arm1176jzs, "6Z", FL_CO_PROC | FL_FOR_ARCH6Z, NULL},
554 {"armv6zk", arm1176jzs, "6ZK", FL_CO_PROC | FL_FOR_ARCH6ZK, NULL},
555 {"ep9312", ep9312, "4T", FL_LDSCHED | FL_CIRRUS | FL_FOR_ARCH4, NULL},
556 {"iwmmxt", iwmmxt, "5TE", FL_LDSCHED | FL_STRONG | FL_FOR_ARCH5TE | FL_XSCALE | FL_IWMMXT , NULL},
557 {NULL, arm_none, NULL, 0 , NULL}
560 /* This is a magic structure. The 'string' field is magically filled in
561 with a pointer to the value specified by the user on the command line
562 assuming that the user has specified such a value. */
564 struct arm_cpu_select arm_select[] =
566 /* string name processors */
567 { NULL, "-mcpu=", all_cores },
568 { NULL, "-march=", all_architectures },
569 { NULL, "-mtune=", all_cores }
573 /* The name of the proprocessor macro to define for this architecture. */
575 char arm_arch_name[] = "__ARM_ARCH_0UNK__";
577 struct fpu_desc
579 const char * name;
580 enum fputype fpu;
584 /* Available values for for -mfpu=. */
586 static const struct fpu_desc all_fpus[] =
588 {"fpa", FPUTYPE_FPA},
589 {"fpe2", FPUTYPE_FPA_EMU2},
590 {"fpe3", FPUTYPE_FPA_EMU2},
591 {"maverick", FPUTYPE_MAVERICK},
592 {"vfp", FPUTYPE_VFP}
596 /* Floating point models used by the different hardware.
597 See fputype in arm.h. */
599 static const enum fputype fp_model_for_fpu[] =
601 /* No FP hardware. */
602 ARM_FP_MODEL_UNKNOWN, /* FPUTYPE_NONE */
603 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA */
604 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA_EMU2 */
605 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA_EMU3 */
606 ARM_FP_MODEL_MAVERICK, /* FPUTYPE_MAVERICK */
607 ARM_FP_MODEL_VFP /* FPUTYPE_VFP */
611 struct float_abi
613 const char * name;
614 enum float_abi_type abi_type;
618 /* Available values for -mfloat-abi=. */
620 static const struct float_abi all_float_abis[] =
622 {"soft", ARM_FLOAT_ABI_SOFT},
623 {"softfp", ARM_FLOAT_ABI_SOFTFP},
624 {"hard", ARM_FLOAT_ABI_HARD}
628 struct abi_name
630 const char *name;
631 enum arm_abi_type abi_type;
635 /* Available values for -mabi=. */
637 static const struct abi_name arm_all_abis[] =
639 {"apcs-gnu", ARM_ABI_APCS},
640 {"atpcs", ARM_ABI_ATPCS},
641 {"aapcs", ARM_ABI_AAPCS},
642 {"iwmmxt", ARM_ABI_IWMMXT}
645 /* Return the number of bits set in VALUE. */
646 static unsigned
647 bit_count (unsigned long value)
649 unsigned long count = 0;
651 while (value)
653 count++;
654 value &= value - 1; /* Clear the least-significant set bit. */
657 return count;
660 /* Set up library functions unique to ARM. */
662 static void
663 arm_init_libfuncs (void)
665 /* There are no special library functions unless we are using the
666 ARM BPABI. */
667 if (!TARGET_BPABI)
668 return;
670 /* The functions below are described in Section 4 of the "Run-Time
671 ABI for the ARM architecture", Version 1.0. */
673 /* Double-precision floating-point arithmetic. Table 2. */
674 set_optab_libfunc (add_optab, DFmode, "__aeabi_dadd");
675 set_optab_libfunc (sdiv_optab, DFmode, "__aeabi_ddiv");
676 set_optab_libfunc (smul_optab, DFmode, "__aeabi_dmul");
677 set_optab_libfunc (neg_optab, DFmode, "__aeabi_dneg");
678 set_optab_libfunc (sub_optab, DFmode, "__aeabi_dsub");
680 /* Double-precision comparisons. Table 3. */
681 set_optab_libfunc (eq_optab, DFmode, "__aeabi_dcmpeq");
682 set_optab_libfunc (ne_optab, DFmode, NULL);
683 set_optab_libfunc (lt_optab, DFmode, "__aeabi_dcmplt");
684 set_optab_libfunc (le_optab, DFmode, "__aeabi_dcmple");
685 set_optab_libfunc (ge_optab, DFmode, "__aeabi_dcmpge");
686 set_optab_libfunc (gt_optab, DFmode, "__aeabi_dcmpgt");
687 set_optab_libfunc (unord_optab, DFmode, "__aeabi_dcmpun");
689 /* Single-precision floating-point arithmetic. Table 4. */
690 set_optab_libfunc (add_optab, SFmode, "__aeabi_fadd");
691 set_optab_libfunc (sdiv_optab, SFmode, "__aeabi_fdiv");
692 set_optab_libfunc (smul_optab, SFmode, "__aeabi_fmul");
693 set_optab_libfunc (neg_optab, SFmode, "__aeabi_fneg");
694 set_optab_libfunc (sub_optab, SFmode, "__aeabi_fsub");
696 /* Single-precision comparisons. Table 5. */
697 set_optab_libfunc (eq_optab, SFmode, "__aeabi_fcmpeq");
698 set_optab_libfunc (ne_optab, SFmode, NULL);
699 set_optab_libfunc (lt_optab, SFmode, "__aeabi_fcmplt");
700 set_optab_libfunc (le_optab, SFmode, "__aeabi_fcmple");
701 set_optab_libfunc (ge_optab, SFmode, "__aeabi_fcmpge");
702 set_optab_libfunc (gt_optab, SFmode, "__aeabi_fcmpgt");
703 set_optab_libfunc (unord_optab, SFmode, "__aeabi_fcmpun");
705 /* Floating-point to integer conversions. Table 6. */
706 set_conv_libfunc (sfix_optab, SImode, DFmode, "__aeabi_d2iz");
707 set_conv_libfunc (ufix_optab, SImode, DFmode, "__aeabi_d2uiz");
708 set_conv_libfunc (sfix_optab, DImode, DFmode, "__aeabi_d2lz");
709 set_conv_libfunc (ufix_optab, DImode, DFmode, "__aeabi_d2ulz");
710 set_conv_libfunc (sfix_optab, SImode, SFmode, "__aeabi_f2iz");
711 set_conv_libfunc (ufix_optab, SImode, SFmode, "__aeabi_f2uiz");
712 set_conv_libfunc (sfix_optab, DImode, SFmode, "__aeabi_f2lz");
713 set_conv_libfunc (ufix_optab, DImode, SFmode, "__aeabi_f2ulz");
715 /* Conversions between floating types. Table 7. */
716 set_conv_libfunc (trunc_optab, SFmode, DFmode, "__aeabi_d2f");
717 set_conv_libfunc (sext_optab, DFmode, SFmode, "__aeabi_f2d");
719 /* Integer to floating-point conversions. Table 8. */
720 set_conv_libfunc (sfloat_optab, DFmode, SImode, "__aeabi_i2d");
721 set_conv_libfunc (ufloat_optab, DFmode, SImode, "__aeabi_ui2d");
722 set_conv_libfunc (sfloat_optab, DFmode, DImode, "__aeabi_l2d");
723 set_conv_libfunc (ufloat_optab, DFmode, DImode, "__aeabi_ul2d");
724 set_conv_libfunc (sfloat_optab, SFmode, SImode, "__aeabi_i2f");
725 set_conv_libfunc (ufloat_optab, SFmode, SImode, "__aeabi_ui2f");
726 set_conv_libfunc (sfloat_optab, SFmode, DImode, "__aeabi_l2f");
727 set_conv_libfunc (ufloat_optab, SFmode, DImode, "__aeabi_ul2f");
729 /* Long long. Table 9. */
730 set_optab_libfunc (smul_optab, DImode, "__aeabi_lmul");
731 set_optab_libfunc (sdivmod_optab, DImode, "__aeabi_ldivmod");
732 set_optab_libfunc (udivmod_optab, DImode, "__aeabi_uldivmod");
733 set_optab_libfunc (ashl_optab, DImode, "__aeabi_llsl");
734 set_optab_libfunc (lshr_optab, DImode, "__aeabi_llsr");
735 set_optab_libfunc (ashr_optab, DImode, "__aeabi_lasr");
736 set_optab_libfunc (cmp_optab, DImode, "__aeabi_lcmp");
737 set_optab_libfunc (ucmp_optab, DImode, "__aeabi_ulcmp");
739 /* Integer (32/32->32) division. \S 4.3.1. */
740 set_optab_libfunc (sdivmod_optab, SImode, "__aeabi_idivmod");
741 set_optab_libfunc (udivmod_optab, SImode, "__aeabi_uidivmod");
743 /* The divmod functions are designed so that they can be used for
744 plain division, even though they return both the quotient and the
745 remainder. The quotient is returned in the usual location (i.e.,
746 r0 for SImode, {r0, r1} for DImode), just as would be expected
747 for an ordinary division routine. Because the AAPCS calling
748 conventions specify that all of { r0, r1, r2, r3 } are
749 callee-saved registers, there is no need to tell the compiler
750 explicitly that those registers are clobbered by these
751 routines. */
752 set_optab_libfunc (sdiv_optab, DImode, "__aeabi_ldivmod");
753 set_optab_libfunc (udiv_optab, DImode, "__aeabi_uldivmod");
754 set_optab_libfunc (sdiv_optab, SImode, "__aeabi_idivmod");
755 set_optab_libfunc (udiv_optab, SImode, "__aeabi_uidivmod");
758 /* Fix up any incompatible options that the user has specified.
759 This has now turned into a maze. */
760 void
761 arm_override_options (void)
763 unsigned i;
765 /* Set up the flags based on the cpu/architecture selected by the user. */
766 for (i = ARRAY_SIZE (arm_select); i--;)
768 struct arm_cpu_select * ptr = arm_select + i;
770 if (ptr->string != NULL && ptr->string[0] != '\0')
772 const struct processors * sel;
774 for (sel = ptr->processors; sel->name != NULL; sel++)
775 if (streq (ptr->string, sel->name))
777 /* Set the architecture define. */
778 if (i != 2)
779 sprintf (arm_arch_name, "__ARM_ARCH_%s__", sel->arch);
781 /* Determine the processor core for which we should
782 tune code-generation. */
783 if (/* -mcpu= is a sensible default. */
784 i == 0
785 /* If -march= is used, and -mcpu= has not been used,
786 assume that we should tune for a representative
787 CPU from that architecture. */
788 || i == 1
789 /* -mtune= overrides -mcpu= and -march=. */
790 || i == 2)
791 arm_tune = (enum processor_type) (sel - ptr->processors);
793 if (i != 2)
795 /* If we have been given an architecture and a processor
796 make sure that they are compatible. We only generate
797 a warning though, and we prefer the CPU over the
798 architecture. */
799 if (insn_flags != 0 && (insn_flags ^ sel->flags))
800 warning ("switch -mcpu=%s conflicts with -march= switch",
801 ptr->string);
803 insn_flags = sel->flags;
806 break;
809 if (sel->name == NULL)
810 error ("bad value (%s) for %s switch", ptr->string, ptr->name);
814 /* If the user did not specify a processor, choose one for them. */
815 if (insn_flags == 0)
817 const struct processors * sel;
818 unsigned int sought;
819 enum processor_type cpu;
821 cpu = TARGET_CPU_DEFAULT;
822 if (cpu == arm_none)
824 #ifdef SUBTARGET_CPU_DEFAULT
825 /* Use the subtarget default CPU if none was specified by
826 configure. */
827 cpu = SUBTARGET_CPU_DEFAULT;
828 #endif
829 /* Default to ARM6. */
830 if (cpu == arm_none)
831 cpu = arm6;
833 sel = &all_cores[cpu];
835 insn_flags = sel->flags;
837 /* Now check to see if the user has specified some command line
838 switch that require certain abilities from the cpu. */
839 sought = 0;
841 if (TARGET_INTERWORK || TARGET_THUMB)
843 sought |= (FL_THUMB | FL_MODE32);
845 /* There are no ARM processors that support both APCS-26 and
846 interworking. Therefore we force FL_MODE26 to be removed
847 from insn_flags here (if it was set), so that the search
848 below will always be able to find a compatible processor. */
849 insn_flags &= ~FL_MODE26;
852 if (sought != 0 && ((sought & insn_flags) != sought))
854 /* Try to locate a CPU type that supports all of the abilities
855 of the default CPU, plus the extra abilities requested by
856 the user. */
857 for (sel = all_cores; sel->name != NULL; sel++)
858 if ((sel->flags & sought) == (sought | insn_flags))
859 break;
861 if (sel->name == NULL)
863 unsigned current_bit_count = 0;
864 const struct processors * best_fit = NULL;
866 /* Ideally we would like to issue an error message here
867 saying that it was not possible to find a CPU compatible
868 with the default CPU, but which also supports the command
869 line options specified by the programmer, and so they
870 ought to use the -mcpu=<name> command line option to
871 override the default CPU type.
873 If we cannot find a cpu that has both the
874 characteristics of the default cpu and the given
875 command line options we scan the array again looking
876 for a best match. */
877 for (sel = all_cores; sel->name != NULL; sel++)
878 if ((sel->flags & sought) == sought)
880 unsigned count;
882 count = bit_count (sel->flags & insn_flags);
884 if (count >= current_bit_count)
886 best_fit = sel;
887 current_bit_count = count;
891 if (best_fit == NULL)
892 abort ();
893 else
894 sel = best_fit;
897 insn_flags = sel->flags;
899 sprintf (arm_arch_name, "__ARM_ARCH_%s__", sel->arch);
900 if (arm_tune == arm_none)
901 arm_tune = (enum processor_type) (sel - all_cores);
904 /* The processor for which we should tune should now have been
905 chosen. */
906 if (arm_tune == arm_none)
907 abort ();
909 tune_flags = all_cores[(int)arm_tune].flags;
910 if (optimize_size)
911 targetm.rtx_costs = arm_size_rtx_costs;
912 else
913 targetm.rtx_costs = all_cores[(int)arm_tune].rtx_costs;
915 /* Make sure that the processor choice does not conflict with any of the
916 other command line choices. */
917 if (TARGET_INTERWORK && !(insn_flags & FL_THUMB))
919 warning ("target CPU does not support interworking" );
920 target_flags &= ~ARM_FLAG_INTERWORK;
923 if (TARGET_THUMB && !(insn_flags & FL_THUMB))
925 warning ("target CPU does not support THUMB instructions");
926 target_flags &= ~ARM_FLAG_THUMB;
929 if (TARGET_APCS_FRAME && TARGET_THUMB)
931 /* warning ("ignoring -mapcs-frame because -mthumb was used"); */
932 target_flags &= ~ARM_FLAG_APCS_FRAME;
935 /* TARGET_BACKTRACE calls leaf_function_p, which causes a crash if done
936 from here where no function is being compiled currently. */
937 if ((target_flags & (THUMB_FLAG_LEAF_BACKTRACE | THUMB_FLAG_BACKTRACE))
938 && TARGET_ARM)
939 warning ("enabling backtrace support is only meaningful when compiling for the Thumb");
941 if (TARGET_ARM && TARGET_CALLEE_INTERWORKING)
942 warning ("enabling callee interworking support is only meaningful when compiling for the Thumb");
944 if (TARGET_ARM && TARGET_CALLER_INTERWORKING)
945 warning ("enabling caller interworking support is only meaningful when compiling for the Thumb");
947 if (TARGET_APCS_STACK && !TARGET_APCS_FRAME)
949 warning ("-mapcs-stack-check incompatible with -mno-apcs-frame");
950 target_flags |= ARM_FLAG_APCS_FRAME;
953 if (TARGET_POKE_FUNCTION_NAME)
954 target_flags |= ARM_FLAG_APCS_FRAME;
956 if (TARGET_APCS_REENT && flag_pic)
957 error ("-fpic and -mapcs-reent are incompatible");
959 if (TARGET_APCS_REENT)
960 warning ("APCS reentrant code not supported. Ignored");
962 /* If this target is normally configured to use APCS frames, warn if they
963 are turned off and debugging is turned on. */
964 if (TARGET_ARM
965 && write_symbols != NO_DEBUG
966 && !TARGET_APCS_FRAME
967 && (TARGET_DEFAULT & ARM_FLAG_APCS_FRAME))
968 warning ("-g with -mno-apcs-frame may not give sensible debugging");
970 /* If stack checking is disabled, we can use r10 as the PIC register,
971 which keeps r9 available. */
972 if (flag_pic)
973 arm_pic_register = TARGET_APCS_STACK ? 9 : 10;
975 if (TARGET_APCS_FLOAT)
976 warning ("passing floating point arguments in fp regs not yet supported");
978 /* Initialize boolean versions of the flags, for use in the arm.md file. */
979 arm_arch3m = (insn_flags & FL_ARCH3M) != 0;
980 arm_arch4 = (insn_flags & FL_ARCH4) != 0;
981 arm_arch4t = arm_arch4 & ((insn_flags & FL_THUMB) != 0);
982 arm_arch5 = (insn_flags & FL_ARCH5) != 0;
983 arm_arch5e = (insn_flags & FL_ARCH5E) != 0;
984 arm_arch6 = (insn_flags & FL_ARCH6) != 0;
985 arm_arch_xscale = (insn_flags & FL_XSCALE) != 0;
986 arm_arch_cirrus = (insn_flags & FL_CIRRUS) != 0;
988 arm_ld_sched = (tune_flags & FL_LDSCHED) != 0;
989 arm_is_strong = (tune_flags & FL_STRONG) != 0;
990 thumb_code = (TARGET_ARM == 0);
991 arm_is_6_or_7 = (((tune_flags & (FL_MODE26 | FL_MODE32))
992 && !(tune_flags & FL_ARCH4))) != 0;
993 arm_tune_xscale = (tune_flags & FL_XSCALE) != 0;
994 arm_arch_iwmmxt = (insn_flags & FL_IWMMXT) != 0;
996 /* V5 code we generate is completely interworking capable, so we turn off
997 TARGET_INTERWORK here to avoid many tests later on. */
999 /* XXX However, we must pass the right pre-processor defines to CPP
1000 or GLD can get confused. This is a hack. */
1001 if (TARGET_INTERWORK)
1002 arm_cpp_interwork = 1;
1004 if (arm_arch5)
1005 target_flags &= ~ARM_FLAG_INTERWORK;
1007 if (target_abi_name)
1009 for (i = 0; i < ARRAY_SIZE (arm_all_abis); i++)
1011 if (streq (arm_all_abis[i].name, target_abi_name))
1013 arm_abi = arm_all_abis[i].abi_type;
1014 break;
1017 if (i == ARRAY_SIZE (arm_all_abis))
1018 error ("invalid ABI option: -mabi=%s", target_abi_name);
1020 else
1021 arm_abi = ARM_DEFAULT_ABI;
1023 if (TARGET_IWMMXT && !ARM_DOUBLEWORD_ALIGN)
1024 error ("iwmmxt requires an AAPCS compatible ABI for proper operation");
1026 if (TARGET_IWMMXT_ABI && !TARGET_IWMMXT)
1027 error ("iwmmxt abi requires an iwmmxt capable cpu");
1029 arm_fp_model = ARM_FP_MODEL_UNKNOWN;
1030 if (target_fpu_name == NULL && target_fpe_name != NULL)
1032 if (streq (target_fpe_name, "2"))
1033 target_fpu_name = "fpe2";
1034 else if (streq (target_fpe_name, "3"))
1035 target_fpu_name = "fpe3";
1036 else
1037 error ("invalid floating point emulation option: -mfpe=%s",
1038 target_fpe_name);
1040 if (target_fpu_name != NULL)
1042 /* The user specified a FPU. */
1043 for (i = 0; i < ARRAY_SIZE (all_fpus); i++)
1045 if (streq (all_fpus[i].name, target_fpu_name))
1047 arm_fpu_arch = all_fpus[i].fpu;
1048 arm_fpu_tune = arm_fpu_arch;
1049 arm_fp_model = fp_model_for_fpu[arm_fpu_arch];
1050 break;
1053 if (arm_fp_model == ARM_FP_MODEL_UNKNOWN)
1054 error ("invalid floating point option: -mfpu=%s", target_fpu_name);
1056 else
1058 #ifdef FPUTYPE_DEFAULT
1059 /* Use the default if it is specified for this platform. */
1060 arm_fpu_arch = FPUTYPE_DEFAULT;
1061 arm_fpu_tune = FPUTYPE_DEFAULT;
1062 #else
1063 /* Pick one based on CPU type. */
1064 /* ??? Some targets assume FPA is the default.
1065 if ((insn_flags & FL_VFP) != 0)
1066 arm_fpu_arch = FPUTYPE_VFP;
1067 else
1069 if (arm_arch_cirrus)
1070 arm_fpu_arch = FPUTYPE_MAVERICK;
1071 else
1072 arm_fpu_arch = FPUTYPE_FPA_EMU2;
1073 #endif
1074 if (tune_flags & FL_CO_PROC && arm_fpu_arch == FPUTYPE_FPA_EMU2)
1075 arm_fpu_tune = FPUTYPE_FPA;
1076 else
1077 arm_fpu_tune = arm_fpu_arch;
1078 arm_fp_model = fp_model_for_fpu[arm_fpu_arch];
1079 if (arm_fp_model == ARM_FP_MODEL_UNKNOWN)
1080 abort ();
1083 if (target_float_abi_name != NULL)
1085 /* The user specified a FP ABI. */
1086 for (i = 0; i < ARRAY_SIZE (all_float_abis); i++)
1088 if (streq (all_float_abis[i].name, target_float_abi_name))
1090 arm_float_abi = all_float_abis[i].abi_type;
1091 break;
1094 if (i == ARRAY_SIZE (all_float_abis))
1095 error ("invalid floating point abi: -mfloat-abi=%s",
1096 target_float_abi_name);
1098 else if (target_float_switch)
1100 /* This is a bit of a hack to avoid needing target flags for these. */
1101 if (target_float_switch[0] == 'h')
1102 arm_float_abi = ARM_FLOAT_ABI_HARD;
1103 else
1104 arm_float_abi = ARM_FLOAT_ABI_SOFT;
1106 else
1107 arm_float_abi = TARGET_DEFAULT_FLOAT_ABI;
1109 if (arm_float_abi == ARM_FLOAT_ABI_HARD && TARGET_VFP)
1110 sorry ("-mfloat-abi=hard and VFP");
1112 /* If soft-float is specified then don't use FPU. */
1113 if (TARGET_SOFT_FLOAT)
1114 arm_fpu_arch = FPUTYPE_NONE;
1116 /* For arm2/3 there is no need to do any scheduling if there is only
1117 a floating point emulator, or we are doing software floating-point. */
1118 if ((TARGET_SOFT_FLOAT
1119 || arm_fpu_tune == FPUTYPE_FPA_EMU2
1120 || arm_fpu_tune == FPUTYPE_FPA_EMU3)
1121 && (tune_flags & FL_MODE32) == 0)
1122 flag_schedule_insns = flag_schedule_insns_after_reload = 0;
1124 /* Override the default structure alignment for AAPCS ABI. */
1125 if (arm_abi == ARM_ABI_AAPCS)
1126 arm_structure_size_boundary = 8;
1128 if (structure_size_string != NULL)
1130 int size = strtol (structure_size_string, NULL, 0);
1132 if (size == 8 || size == 32
1133 || (ARM_DOUBLEWORD_ALIGN && size == 64))
1134 arm_structure_size_boundary = size;
1135 else
1136 warning ("structure size boundary can only be set to %s",
1137 ARM_DOUBLEWORD_ALIGN ? "8, 32 or 64": "8 or 32");
1140 if (arm_pic_register_string != NULL)
1142 int pic_register = decode_reg_name (arm_pic_register_string);
1144 if (!flag_pic)
1145 warning ("-mpic-register= is useless without -fpic");
1147 /* Prevent the user from choosing an obviously stupid PIC register. */
1148 else if (pic_register < 0 || call_used_regs[pic_register]
1149 || pic_register == HARD_FRAME_POINTER_REGNUM
1150 || pic_register == STACK_POINTER_REGNUM
1151 || pic_register >= PC_REGNUM)
1152 error ("unable to use '%s' for PIC register", arm_pic_register_string);
1153 else
1154 arm_pic_register = pic_register;
1157 if (TARGET_THUMB && flag_schedule_insns)
1159 /* Don't warn since it's on by default in -O2. */
1160 flag_schedule_insns = 0;
1163 if (optimize_size)
1165 /* There's some dispute as to whether this should be 1 or 2. However,
1166 experiments seem to show that in pathological cases a setting of
1167 1 degrades less severely than a setting of 2. This could change if
1168 other parts of the compiler change their behavior. */
1169 arm_constant_limit = 1;
1171 /* If optimizing for size, bump the number of instructions that we
1172 are prepared to conditionally execute (even on a StrongARM). */
1173 max_insns_skipped = 6;
1175 else
1177 /* For processors with load scheduling, it never costs more than
1178 2 cycles to load a constant, and the load scheduler may well
1179 reduce that to 1. */
1180 if (arm_ld_sched)
1181 arm_constant_limit = 1;
1183 /* On XScale the longer latency of a load makes it more difficult
1184 to achieve a good schedule, so it's faster to synthesize
1185 constants that can be done in two insns. */
1186 if (arm_tune_xscale)
1187 arm_constant_limit = 2;
1189 /* StrongARM has early execution of branches, so a sequence
1190 that is worth skipping is shorter. */
1191 if (arm_is_strong)
1192 max_insns_skipped = 3;
1195 /* Register global variables with the garbage collector. */
1196 arm_add_gc_roots ();
1199 static void
1200 arm_add_gc_roots (void)
1202 gcc_obstack_init(&minipool_obstack);
1203 minipool_startobj = (char *) obstack_alloc (&minipool_obstack, 0);
1206 /* A table of known ARM exception types.
1207 For use with the interrupt function attribute. */
1209 typedef struct
1211 const char *const arg;
1212 const unsigned long return_value;
1214 isr_attribute_arg;
1216 static const isr_attribute_arg isr_attribute_args [] =
1218 { "IRQ", ARM_FT_ISR },
1219 { "irq", ARM_FT_ISR },
1220 { "FIQ", ARM_FT_FIQ },
1221 { "fiq", ARM_FT_FIQ },
1222 { "ABORT", ARM_FT_ISR },
1223 { "abort", ARM_FT_ISR },
1224 { "ABORT", ARM_FT_ISR },
1225 { "abort", ARM_FT_ISR },
1226 { "UNDEF", ARM_FT_EXCEPTION },
1227 { "undef", ARM_FT_EXCEPTION },
1228 { "SWI", ARM_FT_EXCEPTION },
1229 { "swi", ARM_FT_EXCEPTION },
1230 { NULL, ARM_FT_NORMAL }
1233 /* Returns the (interrupt) function type of the current
1234 function, or ARM_FT_UNKNOWN if the type cannot be determined. */
1236 static unsigned long
1237 arm_isr_value (tree argument)
1239 const isr_attribute_arg * ptr;
1240 const char * arg;
1242 /* No argument - default to IRQ. */
1243 if (argument == NULL_TREE)
1244 return ARM_FT_ISR;
1246 /* Get the value of the argument. */
1247 if (TREE_VALUE (argument) == NULL_TREE
1248 || TREE_CODE (TREE_VALUE (argument)) != STRING_CST)
1249 return ARM_FT_UNKNOWN;
1251 arg = TREE_STRING_POINTER (TREE_VALUE (argument));
1253 /* Check it against the list of known arguments. */
1254 for (ptr = isr_attribute_args; ptr->arg != NULL; ptr++)
1255 if (streq (arg, ptr->arg))
1256 return ptr->return_value;
1258 /* An unrecognized interrupt type. */
1259 return ARM_FT_UNKNOWN;
1262 /* Computes the type of the current function. */
1264 static unsigned long
1265 arm_compute_func_type (void)
1267 unsigned long type = ARM_FT_UNKNOWN;
1268 tree a;
1269 tree attr;
1271 if (TREE_CODE (current_function_decl) != FUNCTION_DECL)
1272 abort ();
1274 /* Decide if the current function is volatile. Such functions
1275 never return, and many memory cycles can be saved by not storing
1276 register values that will never be needed again. This optimization
1277 was added to speed up context switching in a kernel application. */
1278 if (optimize > 0
1279 && TREE_NOTHROW (current_function_decl)
1280 && TREE_THIS_VOLATILE (current_function_decl))
1281 type |= ARM_FT_VOLATILE;
1283 if (cfun->static_chain_decl != NULL)
1284 type |= ARM_FT_NESTED;
1286 attr = DECL_ATTRIBUTES (current_function_decl);
1288 a = lookup_attribute ("naked", attr);
1289 if (a != NULL_TREE)
1290 type |= ARM_FT_NAKED;
1292 a = lookup_attribute ("isr", attr);
1293 if (a == NULL_TREE)
1294 a = lookup_attribute ("interrupt", attr);
1296 if (a == NULL_TREE)
1297 type |= TARGET_INTERWORK ? ARM_FT_INTERWORKED : ARM_FT_NORMAL;
1298 else
1299 type |= arm_isr_value (TREE_VALUE (a));
1301 return type;
1304 /* Returns the type of the current function. */
1306 unsigned long
1307 arm_current_func_type (void)
1309 if (ARM_FUNC_TYPE (cfun->machine->func_type) == ARM_FT_UNKNOWN)
1310 cfun->machine->func_type = arm_compute_func_type ();
1312 return cfun->machine->func_type;
1315 /* Return 1 if it is possible to return using a single instruction.
1316 If SIBLING is non-null, this is a test for a return before a sibling
1317 call. SIBLING is the call insn, so we can examine its register usage. */
1320 use_return_insn (int iscond, rtx sibling)
1322 int regno;
1323 unsigned int func_type;
1324 unsigned long saved_int_regs;
1325 unsigned HOST_WIDE_INT stack_adjust;
1326 arm_stack_offsets *offsets;
1328 /* Never use a return instruction before reload has run. */
1329 if (!reload_completed)
1330 return 0;
1332 func_type = arm_current_func_type ();
1334 /* Naked functions and volatile functions need special
1335 consideration. */
1336 if (func_type & (ARM_FT_VOLATILE | ARM_FT_NAKED))
1337 return 0;
1339 /* So do interrupt functions that use the frame pointer. */
1340 if (IS_INTERRUPT (func_type) && frame_pointer_needed)
1341 return 0;
1343 offsets = arm_get_frame_offsets ();
1344 stack_adjust = offsets->outgoing_args - offsets->saved_regs;
1346 /* As do variadic functions. */
1347 if (current_function_pretend_args_size
1348 || cfun->machine->uses_anonymous_args
1349 /* Or if the function calls __builtin_eh_return () */
1350 || current_function_calls_eh_return
1351 /* Or if the function calls alloca */
1352 || current_function_calls_alloca
1353 /* Or if there is a stack adjustment. However, if the stack pointer
1354 is saved on the stack, we can use a pre-incrementing stack load. */
1355 || !(stack_adjust == 0 || (frame_pointer_needed && stack_adjust == 4)))
1356 return 0;
1358 saved_int_regs = arm_compute_save_reg_mask ();
1360 /* Unfortunately, the insn
1362 ldmib sp, {..., sp, ...}
1364 triggers a bug on most SA-110 based devices, such that the stack
1365 pointer won't be correctly restored if the instruction takes a
1366 page fault. We work around this problem by popping r3 along with
1367 the other registers, since that is never slower than executing
1368 another instruction.
1370 We test for !arm_arch5 here, because code for any architecture
1371 less than this could potentially be run on one of the buggy
1372 chips. */
1373 if (stack_adjust == 4 && !arm_arch5)
1375 /* Validate that r3 is a call-clobbered register (always true in
1376 the default abi) ... */
1377 if (!call_used_regs[3])
1378 return 0;
1380 /* ... that it isn't being used for a return value (always true
1381 until we implement return-in-regs), or for a tail-call
1382 argument ... */
1383 if (sibling)
1385 if (GET_CODE (sibling) != CALL_INSN)
1386 abort ();
1388 if (find_regno_fusage (sibling, USE, 3))
1389 return 0;
1392 /* ... and that there are no call-saved registers in r0-r2
1393 (always true in the default ABI). */
1394 if (saved_int_regs & 0x7)
1395 return 0;
1398 /* Can't be done if interworking with Thumb, and any registers have been
1399 stacked. */
1400 if (TARGET_INTERWORK && saved_int_regs != 0)
1401 return 0;
1403 /* On StrongARM, conditional returns are expensive if they aren't
1404 taken and multiple registers have been stacked. */
1405 if (iscond && arm_is_strong)
1407 /* Conditional return when just the LR is stored is a simple
1408 conditional-load instruction, that's not expensive. */
1409 if (saved_int_regs != 0 && saved_int_regs != (1 << LR_REGNUM))
1410 return 0;
1412 if (flag_pic && regs_ever_live[PIC_OFFSET_TABLE_REGNUM])
1413 return 0;
1416 /* If there are saved registers but the LR isn't saved, then we need
1417 two instructions for the return. */
1418 if (saved_int_regs && !(saved_int_regs & (1 << LR_REGNUM)))
1419 return 0;
1421 /* Can't be done if any of the FPA regs are pushed,
1422 since this also requires an insn. */
1423 if (TARGET_HARD_FLOAT && TARGET_FPA)
1424 for (regno = FIRST_FPA_REGNUM; regno <= LAST_FPA_REGNUM; regno++)
1425 if (regs_ever_live[regno] && !call_used_regs[regno])
1426 return 0;
1428 /* Likewise VFP regs. */
1429 if (TARGET_HARD_FLOAT && TARGET_VFP)
1430 for (regno = FIRST_VFP_REGNUM; regno <= LAST_VFP_REGNUM; regno++)
1431 if (regs_ever_live[regno] && !call_used_regs[regno])
1432 return 0;
1434 if (TARGET_REALLY_IWMMXT)
1435 for (regno = FIRST_IWMMXT_REGNUM; regno <= LAST_IWMMXT_REGNUM; regno++)
1436 if (regs_ever_live[regno] && ! call_used_regs [regno])
1437 return 0;
1439 return 1;
1442 /* Return TRUE if int I is a valid immediate ARM constant. */
1445 const_ok_for_arm (HOST_WIDE_INT i)
1447 unsigned HOST_WIDE_INT mask = ~(unsigned HOST_WIDE_INT)0xFF;
1449 /* For machines with >32 bit HOST_WIDE_INT, the bits above bit 31 must
1450 be all zero, or all one. */
1451 if ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff) != 0
1452 && ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff)
1453 != ((~(unsigned HOST_WIDE_INT) 0)
1454 & ~(unsigned HOST_WIDE_INT) 0xffffffff)))
1455 return FALSE;
1457 /* Fast return for 0 and powers of 2 */
1458 if ((i & (i - 1)) == 0)
1459 return TRUE;
1463 if ((i & mask & (unsigned HOST_WIDE_INT) 0xffffffff) == 0)
1464 return TRUE;
1465 mask =
1466 (mask << 2) | ((mask & (unsigned HOST_WIDE_INT) 0xffffffff)
1467 >> (32 - 2)) | ~(unsigned HOST_WIDE_INT) 0xffffffff;
1469 while (mask != ~(unsigned HOST_WIDE_INT) 0xFF);
1471 return FALSE;
1474 /* Return true if I is a valid constant for the operation CODE. */
1475 static int
1476 const_ok_for_op (HOST_WIDE_INT i, enum rtx_code code)
1478 if (const_ok_for_arm (i))
1479 return 1;
1481 switch (code)
1483 case PLUS:
1484 return const_ok_for_arm (ARM_SIGN_EXTEND (-i));
1486 case MINUS: /* Should only occur with (MINUS I reg) => rsb */
1487 case XOR:
1488 case IOR:
1489 return 0;
1491 case AND:
1492 return const_ok_for_arm (ARM_SIGN_EXTEND (~i));
1494 default:
1495 abort ();
1499 /* Emit a sequence of insns to handle a large constant.
1500 CODE is the code of the operation required, it can be any of SET, PLUS,
1501 IOR, AND, XOR, MINUS;
1502 MODE is the mode in which the operation is being performed;
1503 VAL is the integer to operate on;
1504 SOURCE is the other operand (a register, or a null-pointer for SET);
1505 SUBTARGETS means it is safe to create scratch registers if that will
1506 either produce a simpler sequence, or we will want to cse the values.
1507 Return value is the number of insns emitted. */
1510 arm_split_constant (enum rtx_code code, enum machine_mode mode, rtx insn,
1511 HOST_WIDE_INT val, rtx target, rtx source, int subtargets)
1513 rtx cond;
1515 if (insn && GET_CODE (PATTERN (insn)) == COND_EXEC)
1516 cond = COND_EXEC_TEST (PATTERN (insn));
1517 else
1518 cond = NULL_RTX;
1520 if (subtargets || code == SET
1521 || (GET_CODE (target) == REG && GET_CODE (source) == REG
1522 && REGNO (target) != REGNO (source)))
1524 /* After arm_reorg has been called, we can't fix up expensive
1525 constants by pushing them into memory so we must synthesize
1526 them in-line, regardless of the cost. This is only likely to
1527 be more costly on chips that have load delay slots and we are
1528 compiling without running the scheduler (so no splitting
1529 occurred before the final instruction emission).
1531 Ref: gcc -O1 -mcpu=strongarm gcc.c-torture/compile/980506-2.c
1533 if (!after_arm_reorg
1534 && !cond
1535 && (arm_gen_constant (code, mode, NULL_RTX, val, target, source,
1536 1, 0)
1537 > arm_constant_limit + (code != SET)))
1539 if (code == SET)
1541 /* Currently SET is the only monadic value for CODE, all
1542 the rest are diadic. */
1543 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (val)));
1544 return 1;
1546 else
1548 rtx temp = subtargets ? gen_reg_rtx (mode) : target;
1550 emit_insn (gen_rtx_SET (VOIDmode, temp, GEN_INT (val)));
1551 /* For MINUS, the value is subtracted from, since we never
1552 have subtraction of a constant. */
1553 if (code == MINUS)
1554 emit_insn (gen_rtx_SET (VOIDmode, target,
1555 gen_rtx_MINUS (mode, temp, source)));
1556 else
1557 emit_insn (gen_rtx_SET (VOIDmode, target,
1558 gen_rtx_fmt_ee (code, mode, source, temp)));
1559 return 2;
1564 return arm_gen_constant (code, mode, cond, val, target, source, subtargets,
1568 static int
1569 count_insns_for_constant (HOST_WIDE_INT remainder, int i)
1571 HOST_WIDE_INT temp1;
1572 int num_insns = 0;
1575 int end;
1577 if (i <= 0)
1578 i += 32;
1579 if (remainder & (3 << (i - 2)))
1581 end = i - 8;
1582 if (end < 0)
1583 end += 32;
1584 temp1 = remainder & ((0x0ff << end)
1585 | ((i < end) ? (0xff >> (32 - end)) : 0));
1586 remainder &= ~temp1;
1587 num_insns++;
1588 i -= 6;
1590 i -= 2;
1591 } while (remainder);
1592 return num_insns;
1595 /* Emit an instruction with the indicated PATTERN. If COND is
1596 non-NULL, conditionalize the execution of the instruction on COND
1597 being true. */
1599 static void
1600 emit_constant_insn (rtx cond, rtx pattern)
1602 if (cond)
1603 pattern = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (cond), pattern);
1604 emit_insn (pattern);
1607 /* As above, but extra parameter GENERATE which, if clear, suppresses
1608 RTL generation. */
1610 static int
1611 arm_gen_constant (enum rtx_code code, enum machine_mode mode, rtx cond,
1612 HOST_WIDE_INT val, rtx target, rtx source, int subtargets,
1613 int generate)
1615 int can_invert = 0;
1616 int can_negate = 0;
1617 int can_negate_initial = 0;
1618 int can_shift = 0;
1619 int i;
1620 int num_bits_set = 0;
1621 int set_sign_bit_copies = 0;
1622 int clear_sign_bit_copies = 0;
1623 int clear_zero_bit_copies = 0;
1624 int set_zero_bit_copies = 0;
1625 int insns = 0;
1626 unsigned HOST_WIDE_INT temp1, temp2;
1627 unsigned HOST_WIDE_INT remainder = val & 0xffffffff;
1629 /* Find out which operations are safe for a given CODE. Also do a quick
1630 check for degenerate cases; these can occur when DImode operations
1631 are split. */
1632 switch (code)
1634 case SET:
1635 can_invert = 1;
1636 can_shift = 1;
1637 can_negate = 1;
1638 break;
1640 case PLUS:
1641 can_negate = 1;
1642 can_negate_initial = 1;
1643 break;
1645 case IOR:
1646 if (remainder == 0xffffffff)
1648 if (generate)
1649 emit_constant_insn (cond,
1650 gen_rtx_SET (VOIDmode, target,
1651 GEN_INT (ARM_SIGN_EXTEND (val))));
1652 return 1;
1654 if (remainder == 0)
1656 if (reload_completed && rtx_equal_p (target, source))
1657 return 0;
1658 if (generate)
1659 emit_constant_insn (cond,
1660 gen_rtx_SET (VOIDmode, target, source));
1661 return 1;
1663 break;
1665 case AND:
1666 if (remainder == 0)
1668 if (generate)
1669 emit_constant_insn (cond,
1670 gen_rtx_SET (VOIDmode, target, const0_rtx));
1671 return 1;
1673 if (remainder == 0xffffffff)
1675 if (reload_completed && rtx_equal_p (target, source))
1676 return 0;
1677 if (generate)
1678 emit_constant_insn (cond,
1679 gen_rtx_SET (VOIDmode, target, source));
1680 return 1;
1682 can_invert = 1;
1683 break;
1685 case XOR:
1686 if (remainder == 0)
1688 if (reload_completed && rtx_equal_p (target, source))
1689 return 0;
1690 if (generate)
1691 emit_constant_insn (cond,
1692 gen_rtx_SET (VOIDmode, target, source));
1693 return 1;
1695 if (remainder == 0xffffffff)
1697 if (generate)
1698 emit_constant_insn (cond,
1699 gen_rtx_SET (VOIDmode, target,
1700 gen_rtx_NOT (mode, source)));
1701 return 1;
1704 /* We don't know how to handle this yet below. */
1705 abort ();
1707 case MINUS:
1708 /* We treat MINUS as (val - source), since (source - val) is always
1709 passed as (source + (-val)). */
1710 if (remainder == 0)
1712 if (generate)
1713 emit_constant_insn (cond,
1714 gen_rtx_SET (VOIDmode, target,
1715 gen_rtx_NEG (mode, source)));
1716 return 1;
1718 if (const_ok_for_arm (val))
1720 if (generate)
1721 emit_constant_insn (cond,
1722 gen_rtx_SET (VOIDmode, target,
1723 gen_rtx_MINUS (mode, GEN_INT (val),
1724 source)));
1725 return 1;
1727 can_negate = 1;
1729 break;
1731 default:
1732 abort ();
1735 /* If we can do it in one insn get out quickly. */
1736 if (const_ok_for_arm (val)
1737 || (can_negate_initial && const_ok_for_arm (-val))
1738 || (can_invert && const_ok_for_arm (~val)))
1740 if (generate)
1741 emit_constant_insn (cond,
1742 gen_rtx_SET (VOIDmode, target,
1743 (source
1744 ? gen_rtx_fmt_ee (code, mode, source,
1745 GEN_INT (val))
1746 : GEN_INT (val))));
1747 return 1;
1750 /* Calculate a few attributes that may be useful for specific
1751 optimizations. */
1752 for (i = 31; i >= 0; i--)
1754 if ((remainder & (1 << i)) == 0)
1755 clear_sign_bit_copies++;
1756 else
1757 break;
1760 for (i = 31; i >= 0; i--)
1762 if ((remainder & (1 << i)) != 0)
1763 set_sign_bit_copies++;
1764 else
1765 break;
1768 for (i = 0; i <= 31; i++)
1770 if ((remainder & (1 << i)) == 0)
1771 clear_zero_bit_copies++;
1772 else
1773 break;
1776 for (i = 0; i <= 31; i++)
1778 if ((remainder & (1 << i)) != 0)
1779 set_zero_bit_copies++;
1780 else
1781 break;
1784 switch (code)
1786 case SET:
1787 /* See if we can do this by sign_extending a constant that is known
1788 to be negative. This is a good, way of doing it, since the shift
1789 may well merge into a subsequent insn. */
1790 if (set_sign_bit_copies > 1)
1792 if (const_ok_for_arm
1793 (temp1 = ARM_SIGN_EXTEND (remainder
1794 << (set_sign_bit_copies - 1))))
1796 if (generate)
1798 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1799 emit_constant_insn (cond,
1800 gen_rtx_SET (VOIDmode, new_src,
1801 GEN_INT (temp1)));
1802 emit_constant_insn (cond,
1803 gen_ashrsi3 (target, new_src,
1804 GEN_INT (set_sign_bit_copies - 1)));
1806 return 2;
1808 /* For an inverted constant, we will need to set the low bits,
1809 these will be shifted out of harm's way. */
1810 temp1 |= (1 << (set_sign_bit_copies - 1)) - 1;
1811 if (const_ok_for_arm (~temp1))
1813 if (generate)
1815 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1816 emit_constant_insn (cond,
1817 gen_rtx_SET (VOIDmode, new_src,
1818 GEN_INT (temp1)));
1819 emit_constant_insn (cond,
1820 gen_ashrsi3 (target, new_src,
1821 GEN_INT (set_sign_bit_copies - 1)));
1823 return 2;
1827 /* See if we can generate this by setting the bottom (or the top)
1828 16 bits, and then shifting these into the other half of the
1829 word. We only look for the simplest cases, to do more would cost
1830 too much. Be careful, however, not to generate this when the
1831 alternative would take fewer insns. */
1832 if (val & 0xffff0000)
1834 temp1 = remainder & 0xffff0000;
1835 temp2 = remainder & 0x0000ffff;
1837 /* Overlaps outside this range are best done using other methods. */
1838 for (i = 9; i < 24; i++)
1840 if ((((temp2 | (temp2 << i)) & 0xffffffff) == remainder)
1841 && !const_ok_for_arm (temp2))
1843 rtx new_src = (subtargets
1844 ? (generate ? gen_reg_rtx (mode) : NULL_RTX)
1845 : target);
1846 insns = arm_gen_constant (code, mode, cond, temp2, new_src,
1847 source, subtargets, generate);
1848 source = new_src;
1849 if (generate)
1850 emit_constant_insn
1851 (cond,
1852 gen_rtx_SET
1853 (VOIDmode, target,
1854 gen_rtx_IOR (mode,
1855 gen_rtx_ASHIFT (mode, source,
1856 GEN_INT (i)),
1857 source)));
1858 return insns + 1;
1862 /* Don't duplicate cases already considered. */
1863 for (i = 17; i < 24; i++)
1865 if (((temp1 | (temp1 >> i)) == remainder)
1866 && !const_ok_for_arm (temp1))
1868 rtx new_src = (subtargets
1869 ? (generate ? gen_reg_rtx (mode) : NULL_RTX)
1870 : target);
1871 insns = arm_gen_constant (code, mode, cond, temp1, new_src,
1872 source, subtargets, generate);
1873 source = new_src;
1874 if (generate)
1875 emit_constant_insn
1876 (cond,
1877 gen_rtx_SET (VOIDmode, target,
1878 gen_rtx_IOR
1879 (mode,
1880 gen_rtx_LSHIFTRT (mode, source,
1881 GEN_INT (i)),
1882 source)));
1883 return insns + 1;
1887 break;
1889 case IOR:
1890 case XOR:
1891 /* If we have IOR or XOR, and the constant can be loaded in a
1892 single instruction, and we can find a temporary to put it in,
1893 then this can be done in two instructions instead of 3-4. */
1894 if (subtargets
1895 /* TARGET can't be NULL if SUBTARGETS is 0 */
1896 || (reload_completed && !reg_mentioned_p (target, source)))
1898 if (const_ok_for_arm (ARM_SIGN_EXTEND (~val)))
1900 if (generate)
1902 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1904 emit_constant_insn (cond,
1905 gen_rtx_SET (VOIDmode, sub,
1906 GEN_INT (val)));
1907 emit_constant_insn (cond,
1908 gen_rtx_SET (VOIDmode, target,
1909 gen_rtx_fmt_ee (code, mode,
1910 source, sub)));
1912 return 2;
1916 if (code == XOR)
1917 break;
1919 if (set_sign_bit_copies > 8
1920 && (val & (-1 << (32 - set_sign_bit_copies))) == val)
1922 if (generate)
1924 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1925 rtx shift = GEN_INT (set_sign_bit_copies);
1927 emit_constant_insn
1928 (cond,
1929 gen_rtx_SET (VOIDmode, sub,
1930 gen_rtx_NOT (mode,
1931 gen_rtx_ASHIFT (mode,
1932 source,
1933 shift))));
1934 emit_constant_insn
1935 (cond,
1936 gen_rtx_SET (VOIDmode, target,
1937 gen_rtx_NOT (mode,
1938 gen_rtx_LSHIFTRT (mode, sub,
1939 shift))));
1941 return 2;
1944 if (set_zero_bit_copies > 8
1945 && (remainder & ((1 << set_zero_bit_copies) - 1)) == remainder)
1947 if (generate)
1949 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1950 rtx shift = GEN_INT (set_zero_bit_copies);
1952 emit_constant_insn
1953 (cond,
1954 gen_rtx_SET (VOIDmode, sub,
1955 gen_rtx_NOT (mode,
1956 gen_rtx_LSHIFTRT (mode,
1957 source,
1958 shift))));
1959 emit_constant_insn
1960 (cond,
1961 gen_rtx_SET (VOIDmode, target,
1962 gen_rtx_NOT (mode,
1963 gen_rtx_ASHIFT (mode, sub,
1964 shift))));
1966 return 2;
1969 if (const_ok_for_arm (temp1 = ARM_SIGN_EXTEND (~val)))
1971 if (generate)
1973 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1974 emit_constant_insn (cond,
1975 gen_rtx_SET (VOIDmode, sub,
1976 gen_rtx_NOT (mode, source)));
1977 source = sub;
1978 if (subtargets)
1979 sub = gen_reg_rtx (mode);
1980 emit_constant_insn (cond,
1981 gen_rtx_SET (VOIDmode, sub,
1982 gen_rtx_AND (mode, source,
1983 GEN_INT (temp1))));
1984 emit_constant_insn (cond,
1985 gen_rtx_SET (VOIDmode, target,
1986 gen_rtx_NOT (mode, sub)));
1988 return 3;
1990 break;
1992 case AND:
1993 /* See if two shifts will do 2 or more insn's worth of work. */
1994 if (clear_sign_bit_copies >= 16 && clear_sign_bit_copies < 24)
1996 HOST_WIDE_INT shift_mask = ((0xffffffff
1997 << (32 - clear_sign_bit_copies))
1998 & 0xffffffff);
2000 if ((remainder | shift_mask) != 0xffffffff)
2002 if (generate)
2004 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2005 insns = arm_gen_constant (AND, mode, cond,
2006 remainder | shift_mask,
2007 new_src, source, subtargets, 1);
2008 source = new_src;
2010 else
2012 rtx targ = subtargets ? NULL_RTX : target;
2013 insns = arm_gen_constant (AND, mode, cond,
2014 remainder | shift_mask,
2015 targ, source, subtargets, 0);
2019 if (generate)
2021 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2022 rtx shift = GEN_INT (clear_sign_bit_copies);
2024 emit_insn (gen_ashlsi3 (new_src, source, shift));
2025 emit_insn (gen_lshrsi3 (target, new_src, shift));
2028 return insns + 2;
2031 if (clear_zero_bit_copies >= 16 && clear_zero_bit_copies < 24)
2033 HOST_WIDE_INT shift_mask = (1 << clear_zero_bit_copies) - 1;
2035 if ((remainder | shift_mask) != 0xffffffff)
2037 if (generate)
2039 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2041 insns = arm_gen_constant (AND, mode, cond,
2042 remainder | shift_mask,
2043 new_src, source, subtargets, 1);
2044 source = new_src;
2046 else
2048 rtx targ = subtargets ? NULL_RTX : target;
2050 insns = arm_gen_constant (AND, mode, cond,
2051 remainder | shift_mask,
2052 targ, source, subtargets, 0);
2056 if (generate)
2058 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2059 rtx shift = GEN_INT (clear_zero_bit_copies);
2061 emit_insn (gen_lshrsi3 (new_src, source, shift));
2062 emit_insn (gen_ashlsi3 (target, new_src, shift));
2065 return insns + 2;
2068 break;
2070 default:
2071 break;
2074 for (i = 0; i < 32; i++)
2075 if (remainder & (1 << i))
2076 num_bits_set++;
2078 if (code == AND || (can_invert && num_bits_set > 16))
2079 remainder = (~remainder) & 0xffffffff;
2080 else if (code == PLUS && num_bits_set > 16)
2081 remainder = (-remainder) & 0xffffffff;
2082 else
2084 can_invert = 0;
2085 can_negate = 0;
2088 /* Now try and find a way of doing the job in either two or three
2089 instructions.
2090 We start by looking for the largest block of zeros that are aligned on
2091 a 2-bit boundary, we then fill up the temps, wrapping around to the
2092 top of the word when we drop off the bottom.
2093 In the worst case this code should produce no more than four insns. */
2095 int best_start = 0;
2096 int best_consecutive_zeros = 0;
2098 for (i = 0; i < 32; i += 2)
2100 int consecutive_zeros = 0;
2102 if (!(remainder & (3 << i)))
2104 while ((i < 32) && !(remainder & (3 << i)))
2106 consecutive_zeros += 2;
2107 i += 2;
2109 if (consecutive_zeros > best_consecutive_zeros)
2111 best_consecutive_zeros = consecutive_zeros;
2112 best_start = i - consecutive_zeros;
2114 i -= 2;
2118 /* So long as it won't require any more insns to do so, it's
2119 desirable to emit a small constant (in bits 0...9) in the last
2120 insn. This way there is more chance that it can be combined with
2121 a later addressing insn to form a pre-indexed load or store
2122 operation. Consider:
2124 *((volatile int *)0xe0000100) = 1;
2125 *((volatile int *)0xe0000110) = 2;
2127 We want this to wind up as:
2129 mov rA, #0xe0000000
2130 mov rB, #1
2131 str rB, [rA, #0x100]
2132 mov rB, #2
2133 str rB, [rA, #0x110]
2135 rather than having to synthesize both large constants from scratch.
2137 Therefore, we calculate how many insns would be required to emit
2138 the constant starting from `best_start', and also starting from
2139 zero (i.e. with bit 31 first to be output). If `best_start' doesn't
2140 yield a shorter sequence, we may as well use zero. */
2141 if (best_start != 0
2142 && ((((unsigned HOST_WIDE_INT) 1) << best_start) < remainder)
2143 && (count_insns_for_constant (remainder, 0) <=
2144 count_insns_for_constant (remainder, best_start)))
2145 best_start = 0;
2147 /* Now start emitting the insns. */
2148 i = best_start;
2151 int end;
2153 if (i <= 0)
2154 i += 32;
2155 if (remainder & (3 << (i - 2)))
2157 end = i - 8;
2158 if (end < 0)
2159 end += 32;
2160 temp1 = remainder & ((0x0ff << end)
2161 | ((i < end) ? (0xff >> (32 - end)) : 0));
2162 remainder &= ~temp1;
2164 if (generate)
2166 rtx new_src, temp1_rtx;
2168 if (code == SET || code == MINUS)
2170 new_src = (subtargets ? gen_reg_rtx (mode) : target);
2171 if (can_invert && code != MINUS)
2172 temp1 = ~temp1;
2174 else
2176 if (remainder && subtargets)
2177 new_src = gen_reg_rtx (mode);
2178 else
2179 new_src = target;
2180 if (can_invert)
2181 temp1 = ~temp1;
2182 else if (can_negate)
2183 temp1 = -temp1;
2186 temp1 = trunc_int_for_mode (temp1, mode);
2187 temp1_rtx = GEN_INT (temp1);
2189 if (code == SET)
2191 else if (code == MINUS)
2192 temp1_rtx = gen_rtx_MINUS (mode, temp1_rtx, source);
2193 else
2194 temp1_rtx = gen_rtx_fmt_ee (code, mode, source, temp1_rtx);
2196 emit_constant_insn (cond,
2197 gen_rtx_SET (VOIDmode, new_src,
2198 temp1_rtx));
2199 source = new_src;
2202 if (code == SET)
2204 can_invert = 0;
2205 code = PLUS;
2207 else if (code == MINUS)
2208 code = PLUS;
2210 insns++;
2211 i -= 6;
2213 i -= 2;
2215 while (remainder);
2218 return insns;
2221 /* Canonicalize a comparison so that we are more likely to recognize it.
2222 This can be done for a few constant compares, where we can make the
2223 immediate value easier to load. */
2225 enum rtx_code
2226 arm_canonicalize_comparison (enum rtx_code code, rtx * op1)
2228 unsigned HOST_WIDE_INT i = INTVAL (*op1);
2230 switch (code)
2232 case EQ:
2233 case NE:
2234 return code;
2236 case GT:
2237 case LE:
2238 if (i != ((((unsigned HOST_WIDE_INT) 1) << (HOST_BITS_PER_WIDE_INT - 1)) - 1)
2239 && (const_ok_for_arm (i + 1) || const_ok_for_arm (-(i + 1))))
2241 *op1 = GEN_INT (i + 1);
2242 return code == GT ? GE : LT;
2244 break;
2246 case GE:
2247 case LT:
2248 if (i != (((unsigned HOST_WIDE_INT) 1) << (HOST_BITS_PER_WIDE_INT - 1))
2249 && (const_ok_for_arm (i - 1) || const_ok_for_arm (-(i - 1))))
2251 *op1 = GEN_INT (i - 1);
2252 return code == GE ? GT : LE;
2254 break;
2256 case GTU:
2257 case LEU:
2258 if (i != ~((unsigned HOST_WIDE_INT) 0)
2259 && (const_ok_for_arm (i + 1) || const_ok_for_arm (-(i + 1))))
2261 *op1 = GEN_INT (i + 1);
2262 return code == GTU ? GEU : LTU;
2264 break;
2266 case GEU:
2267 case LTU:
2268 if (i != 0
2269 && (const_ok_for_arm (i - 1) || const_ok_for_arm (-(i - 1))))
2271 *op1 = GEN_INT (i - 1);
2272 return code == GEU ? GTU : LEU;
2274 break;
2276 default:
2277 abort ();
2280 return code;
2284 /* Define how to find the value returned by a function. */
2287 arm_function_value(tree type, tree func ATTRIBUTE_UNUSED)
2289 enum machine_mode mode;
2290 int unsignedp ATTRIBUTE_UNUSED;
2291 rtx r ATTRIBUTE_UNUSED;
2294 mode = TYPE_MODE (type);
2295 /* Promote integer types. */
2296 if (INTEGRAL_TYPE_P (type))
2297 PROMOTE_FUNCTION_MODE (mode, unsignedp, type);
2298 return LIBCALL_VALUE(mode);
2301 /* Determine the amount of memory needed to store the possible return
2302 registers of an untyped call. */
2304 arm_apply_result_size (void)
2306 int size = 16;
2308 if (TARGET_ARM)
2310 if (TARGET_HARD_FLOAT_ABI)
2312 if (TARGET_FPA)
2313 size += 12;
2314 if (TARGET_MAVERICK)
2315 size += 8;
2317 if (TARGET_IWMMXT_ABI)
2318 size += 8;
2321 return size;
2324 /* Decide whether a type should be returned in memory (true)
2325 or in a register (false). This is called by the macro
2326 RETURN_IN_MEMORY. */
2328 arm_return_in_memory (tree type)
2330 HOST_WIDE_INT size;
2332 if (!AGGREGATE_TYPE_P (type) &&
2333 !(TARGET_AAPCS_BASED && TREE_CODE (type) == COMPLEX_TYPE))
2334 /* All simple types are returned in registers.
2335 For AAPCS, complex types are treated the same as aggregates. */
2336 return 0;
2338 size = int_size_in_bytes (type);
2340 if (arm_abi != ARM_ABI_APCS)
2342 /* ATPCS and later return aggregate types in memory only if they are
2343 larger than a word (or are variable size). */
2344 return (size < 0 || size > UNITS_PER_WORD);
2347 /* For the arm-wince targets we choose to be compatible with Microsoft's
2348 ARM and Thumb compilers, which always return aggregates in memory. */
2349 #ifndef ARM_WINCE
2350 /* All structures/unions bigger than one word are returned in memory.
2351 Also catch the case where int_size_in_bytes returns -1. In this case
2352 the aggregate is either huge or of variable size, and in either case
2353 we will want to return it via memory and not in a register. */
2354 if (size < 0 || size > UNITS_PER_WORD)
2355 return 1;
2357 if (TREE_CODE (type) == RECORD_TYPE)
2359 tree field;
2361 /* For a struct the APCS says that we only return in a register
2362 if the type is 'integer like' and every addressable element
2363 has an offset of zero. For practical purposes this means
2364 that the structure can have at most one non bit-field element
2365 and that this element must be the first one in the structure. */
2367 /* Find the first field, ignoring non FIELD_DECL things which will
2368 have been created by C++. */
2369 for (field = TYPE_FIELDS (type);
2370 field && TREE_CODE (field) != FIELD_DECL;
2371 field = TREE_CHAIN (field))
2372 continue;
2374 if (field == NULL)
2375 return 0; /* An empty structure. Allowed by an extension to ANSI C. */
2377 /* Check that the first field is valid for returning in a register. */
2379 /* ... Floats are not allowed */
2380 if (FLOAT_TYPE_P (TREE_TYPE (field)))
2381 return 1;
2383 /* ... Aggregates that are not themselves valid for returning in
2384 a register are not allowed. */
2385 if (RETURN_IN_MEMORY (TREE_TYPE (field)))
2386 return 1;
2388 /* Now check the remaining fields, if any. Only bitfields are allowed,
2389 since they are not addressable. */
2390 for (field = TREE_CHAIN (field);
2391 field;
2392 field = TREE_CHAIN (field))
2394 if (TREE_CODE (field) != FIELD_DECL)
2395 continue;
2397 if (!DECL_BIT_FIELD_TYPE (field))
2398 return 1;
2401 return 0;
2404 if (TREE_CODE (type) == UNION_TYPE)
2406 tree field;
2408 /* Unions can be returned in registers if every element is
2409 integral, or can be returned in an integer register. */
2410 for (field = TYPE_FIELDS (type);
2411 field;
2412 field = TREE_CHAIN (field))
2414 if (TREE_CODE (field) != FIELD_DECL)
2415 continue;
2417 if (FLOAT_TYPE_P (TREE_TYPE (field)))
2418 return 1;
2420 if (RETURN_IN_MEMORY (TREE_TYPE (field)))
2421 return 1;
2424 return 0;
2426 #endif /* not ARM_WINCE */
2428 /* Return all other types in memory. */
2429 return 1;
2432 /* Indicate whether or not words of a double are in big-endian order. */
2435 arm_float_words_big_endian (void)
2437 if (TARGET_MAVERICK)
2438 return 0;
2440 /* For FPA, float words are always big-endian. For VFP, floats words
2441 follow the memory system mode. */
2443 if (TARGET_FPA)
2445 return 1;
2448 if (TARGET_VFP)
2449 return (TARGET_BIG_END ? 1 : 0);
2451 return 1;
2454 /* Initialize a variable CUM of type CUMULATIVE_ARGS
2455 for a call to a function whose data type is FNTYPE.
2456 For a library call, FNTYPE is NULL. */
2457 void
2458 arm_init_cumulative_args (CUMULATIVE_ARGS *pcum, tree fntype,
2459 rtx libname ATTRIBUTE_UNUSED,
2460 tree fndecl ATTRIBUTE_UNUSED)
2462 /* On the ARM, the offset starts at 0. */
2463 pcum->nregs = ((fntype && aggregate_value_p (TREE_TYPE (fntype), fntype)) ? 1 : 0);
2464 pcum->iwmmxt_nregs = 0;
2465 pcum->can_split = true;
2467 pcum->call_cookie = CALL_NORMAL;
2469 if (TARGET_LONG_CALLS)
2470 pcum->call_cookie = CALL_LONG;
2472 /* Check for long call/short call attributes. The attributes
2473 override any command line option. */
2474 if (fntype)
2476 if (lookup_attribute ("short_call", TYPE_ATTRIBUTES (fntype)))
2477 pcum->call_cookie = CALL_SHORT;
2478 else if (lookup_attribute ("long_call", TYPE_ATTRIBUTES (fntype)))
2479 pcum->call_cookie = CALL_LONG;
2482 /* Varargs vectors are treated the same as long long.
2483 named_count avoids having to change the way arm handles 'named' */
2484 pcum->named_count = 0;
2485 pcum->nargs = 0;
2487 if (TARGET_REALLY_IWMMXT && fntype)
2489 tree fn_arg;
2491 for (fn_arg = TYPE_ARG_TYPES (fntype);
2492 fn_arg;
2493 fn_arg = TREE_CHAIN (fn_arg))
2494 pcum->named_count += 1;
2496 if (! pcum->named_count)
2497 pcum->named_count = INT_MAX;
2502 /* Return true if mode/type need doubleword alignment. */
2503 bool
2504 arm_needs_doubleword_align (enum machine_mode mode, tree type)
2506 return (GET_MODE_ALIGNMENT (mode) > PARM_BOUNDARY
2507 || (type && TYPE_ALIGN (type) > PARM_BOUNDARY));
2511 /* Determine where to put an argument to a function.
2512 Value is zero to push the argument on the stack,
2513 or a hard register in which to store the argument.
2515 MODE is the argument's machine mode.
2516 TYPE is the data type of the argument (as a tree).
2517 This is null for libcalls where that information may
2518 not be available.
2519 CUM is a variable of type CUMULATIVE_ARGS which gives info about
2520 the preceding args and about the function being called.
2521 NAMED is nonzero if this argument is a named parameter
2522 (otherwise it is an extra parameter matching an ellipsis). */
2525 arm_function_arg (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
2526 tree type, int named)
2528 int nregs;
2530 /* Varargs vectors are treated the same as long long.
2531 named_count avoids having to change the way arm handles 'named' */
2532 if (TARGET_IWMMXT_ABI
2533 && arm_vector_mode_supported_p (mode)
2534 && pcum->named_count > pcum->nargs + 1)
2536 if (pcum->iwmmxt_nregs <= 9)
2537 return gen_rtx_REG (mode, pcum->iwmmxt_nregs + FIRST_IWMMXT_REGNUM);
2538 else
2540 pcum->can_split = false;
2541 return NULL_RTX;
2545 /* Put doubleword aligned quantities in even register pairs. */
2546 if (pcum->nregs & 1
2547 && ARM_DOUBLEWORD_ALIGN
2548 && arm_needs_doubleword_align (mode, type))
2549 pcum->nregs++;
2551 if (mode == VOIDmode)
2552 /* Compute operand 2 of the call insn. */
2553 return GEN_INT (pcum->call_cookie);
2555 /* Only allow splitting an arg between regs and memory if all preceding
2556 args were allocated to regs. For args passed by reference we only count
2557 the reference pointer. */
2558 if (pcum->can_split)
2559 nregs = 1;
2560 else
2561 nregs = ARM_NUM_REGS2 (mode, type);
2563 if (!named || pcum->nregs + nregs > NUM_ARG_REGS)
2564 return NULL_RTX;
2566 return gen_rtx_REG (mode, pcum->nregs);
2569 static int
2570 arm_arg_partial_bytes (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
2571 tree type, bool named ATTRIBUTE_UNUSED)
2573 int nregs = pcum->nregs;
2575 if (arm_vector_mode_supported_p (mode))
2576 return 0;
2578 if (NUM_ARG_REGS > nregs
2579 && (NUM_ARG_REGS < nregs + ARM_NUM_REGS2 (mode, type))
2580 && pcum->can_split)
2581 return (NUM_ARG_REGS - nregs) * UNITS_PER_WORD;
2583 return 0;
2586 /* Variable sized types are passed by reference. This is a GCC
2587 extension to the ARM ABI. */
2589 static bool
2590 arm_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
2591 enum machine_mode mode ATTRIBUTE_UNUSED,
2592 tree type, bool named ATTRIBUTE_UNUSED)
2594 return type && TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST;
2597 /* Encode the current state of the #pragma [no_]long_calls. */
2598 typedef enum
2600 OFF, /* No #pramgma [no_]long_calls is in effect. */
2601 LONG, /* #pragma long_calls is in effect. */
2602 SHORT /* #pragma no_long_calls is in effect. */
2603 } arm_pragma_enum;
2605 static arm_pragma_enum arm_pragma_long_calls = OFF;
2607 void
2608 arm_pr_long_calls (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2610 arm_pragma_long_calls = LONG;
2613 void
2614 arm_pr_no_long_calls (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2616 arm_pragma_long_calls = SHORT;
2619 void
2620 arm_pr_long_calls_off (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2622 arm_pragma_long_calls = OFF;
2625 /* Table of machine attributes. */
2626 const struct attribute_spec arm_attribute_table[] =
2628 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
2629 /* Function calls made to this symbol must be done indirectly, because
2630 it may lie outside of the 26 bit addressing range of a normal function
2631 call. */
2632 { "long_call", 0, 0, false, true, true, NULL },
2633 /* Whereas these functions are always known to reside within the 26 bit
2634 addressing range. */
2635 { "short_call", 0, 0, false, true, true, NULL },
2636 /* Interrupt Service Routines have special prologue and epilogue requirements. */
2637 { "isr", 0, 1, false, false, false, arm_handle_isr_attribute },
2638 { "interrupt", 0, 1, false, false, false, arm_handle_isr_attribute },
2639 { "naked", 0, 0, true, false, false, arm_handle_fndecl_attribute },
2640 #ifdef ARM_PE
2641 /* ARM/PE has three new attributes:
2642 interfacearm - ?
2643 dllexport - for exporting a function/variable that will live in a dll
2644 dllimport - for importing a function/variable from a dll
2646 Microsoft allows multiple declspecs in one __declspec, separating
2647 them with spaces. We do NOT support this. Instead, use __declspec
2648 multiple times.
2650 { "dllimport", 0, 0, true, false, false, NULL },
2651 { "dllexport", 0, 0, true, false, false, NULL },
2652 { "interfacearm", 0, 0, true, false, false, arm_handle_fndecl_attribute },
2653 #elif TARGET_DLLIMPORT_DECL_ATTRIBUTES
2654 { "dllimport", 0, 0, false, false, false, handle_dll_attribute },
2655 { "dllexport", 0, 0, false, false, false, handle_dll_attribute },
2656 { "notshared", 0, 0, false, true, false, arm_handle_notshared_attribute },
2657 #endif
2658 { NULL, 0, 0, false, false, false, NULL }
2661 /* Handle an attribute requiring a FUNCTION_DECL;
2662 arguments as in struct attribute_spec.handler. */
2663 static tree
2664 arm_handle_fndecl_attribute (tree *node, tree name, tree args ATTRIBUTE_UNUSED,
2665 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
2667 if (TREE_CODE (*node) != FUNCTION_DECL)
2669 warning ("%qs attribute only applies to functions",
2670 IDENTIFIER_POINTER (name));
2671 *no_add_attrs = true;
2674 return NULL_TREE;
2677 /* Handle an "interrupt" or "isr" attribute;
2678 arguments as in struct attribute_spec.handler. */
2679 static tree
2680 arm_handle_isr_attribute (tree *node, tree name, tree args, int flags,
2681 bool *no_add_attrs)
2683 if (DECL_P (*node))
2685 if (TREE_CODE (*node) != FUNCTION_DECL)
2687 warning ("%qs attribute only applies to functions",
2688 IDENTIFIER_POINTER (name));
2689 *no_add_attrs = true;
2691 /* FIXME: the argument if any is checked for type attributes;
2692 should it be checked for decl ones? */
2694 else
2696 if (TREE_CODE (*node) == FUNCTION_TYPE
2697 || TREE_CODE (*node) == METHOD_TYPE)
2699 if (arm_isr_value (args) == ARM_FT_UNKNOWN)
2701 warning ("%qs attribute ignored", IDENTIFIER_POINTER (name));
2702 *no_add_attrs = true;
2705 else if (TREE_CODE (*node) == POINTER_TYPE
2706 && (TREE_CODE (TREE_TYPE (*node)) == FUNCTION_TYPE
2707 || TREE_CODE (TREE_TYPE (*node)) == METHOD_TYPE)
2708 && arm_isr_value (args) != ARM_FT_UNKNOWN)
2710 *node = build_variant_type_copy (*node);
2711 TREE_TYPE (*node) = build_type_attribute_variant
2712 (TREE_TYPE (*node),
2713 tree_cons (name, args, TYPE_ATTRIBUTES (TREE_TYPE (*node))));
2714 *no_add_attrs = true;
2716 else
2718 /* Possibly pass this attribute on from the type to a decl. */
2719 if (flags & ((int) ATTR_FLAG_DECL_NEXT
2720 | (int) ATTR_FLAG_FUNCTION_NEXT
2721 | (int) ATTR_FLAG_ARRAY_NEXT))
2723 *no_add_attrs = true;
2724 return tree_cons (name, args, NULL_TREE);
2726 else
2728 warning ("%qs attribute ignored", IDENTIFIER_POINTER (name));
2733 return NULL_TREE;
2736 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
2737 /* Handle the "notshared" attribute. This attribute is another way of
2738 requesting hidden visibility. ARM's compiler supports
2739 "__declspec(notshared)"; we support the same thing via an
2740 attribute. */
2742 static tree
2743 arm_handle_notshared_attribute (tree *node,
2744 tree name ATTRIBUTE_UNUSED,
2745 tree args ATTRIBUTE_UNUSED,
2746 int flags ATTRIBUTE_UNUSED,
2747 bool *no_add_attrs)
2749 tree decl = TYPE_NAME (*node);
2751 if (decl)
2753 DECL_VISIBILITY (decl) = VISIBILITY_HIDDEN;
2754 DECL_VISIBILITY_SPECIFIED (decl) = 1;
2755 *no_add_attrs = false;
2757 return NULL_TREE;
2759 #endif
2761 /* Return 0 if the attributes for two types are incompatible, 1 if they
2762 are compatible, and 2 if they are nearly compatible (which causes a
2763 warning to be generated). */
2764 static int
2765 arm_comp_type_attributes (tree type1, tree type2)
2767 int l1, l2, s1, s2;
2769 /* Check for mismatch of non-default calling convention. */
2770 if (TREE_CODE (type1) != FUNCTION_TYPE)
2771 return 1;
2773 /* Check for mismatched call attributes. */
2774 l1 = lookup_attribute ("long_call", TYPE_ATTRIBUTES (type1)) != NULL;
2775 l2 = lookup_attribute ("long_call", TYPE_ATTRIBUTES (type2)) != NULL;
2776 s1 = lookup_attribute ("short_call", TYPE_ATTRIBUTES (type1)) != NULL;
2777 s2 = lookup_attribute ("short_call", TYPE_ATTRIBUTES (type2)) != NULL;
2779 /* Only bother to check if an attribute is defined. */
2780 if (l1 | l2 | s1 | s2)
2782 /* If one type has an attribute, the other must have the same attribute. */
2783 if ((l1 != l2) || (s1 != s2))
2784 return 0;
2786 /* Disallow mixed attributes. */
2787 if ((l1 & s2) || (l2 & s1))
2788 return 0;
2791 /* Check for mismatched ISR attribute. */
2792 l1 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type1)) != NULL;
2793 if (! l1)
2794 l1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type1)) != NULL;
2795 l2 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type2)) != NULL;
2796 if (! l2)
2797 l1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type2)) != NULL;
2798 if (l1 != l2)
2799 return 0;
2801 return 1;
2804 /* Encode long_call or short_call attribute by prefixing
2805 symbol name in DECL with a special character FLAG. */
2806 void
2807 arm_encode_call_attribute (tree decl, int flag)
2809 const char * str = XSTR (XEXP (DECL_RTL (decl), 0), 0);
2810 int len = strlen (str);
2811 char * newstr;
2813 /* Do not allow weak functions to be treated as short call. */
2814 if (DECL_WEAK (decl) && flag == SHORT_CALL_FLAG_CHAR)
2815 return;
2817 newstr = alloca (len + 2);
2818 newstr[0] = flag;
2819 strcpy (newstr + 1, str);
2821 newstr = (char *) ggc_alloc_string (newstr, len + 1);
2822 XSTR (XEXP (DECL_RTL (decl), 0), 0) = newstr;
2825 /* Assigns default attributes to newly defined type. This is used to
2826 set short_call/long_call attributes for function types of
2827 functions defined inside corresponding #pragma scopes. */
2828 static void
2829 arm_set_default_type_attributes (tree type)
2831 /* Add __attribute__ ((long_call)) to all functions, when
2832 inside #pragma long_calls or __attribute__ ((short_call)),
2833 when inside #pragma no_long_calls. */
2834 if (TREE_CODE (type) == FUNCTION_TYPE || TREE_CODE (type) == METHOD_TYPE)
2836 tree type_attr_list, attr_name;
2837 type_attr_list = TYPE_ATTRIBUTES (type);
2839 if (arm_pragma_long_calls == LONG)
2840 attr_name = get_identifier ("long_call");
2841 else if (arm_pragma_long_calls == SHORT)
2842 attr_name = get_identifier ("short_call");
2843 else
2844 return;
2846 type_attr_list = tree_cons (attr_name, NULL_TREE, type_attr_list);
2847 TYPE_ATTRIBUTES (type) = type_attr_list;
2851 /* Return 1 if the operand is a SYMBOL_REF for a function known to be
2852 defined within the current compilation unit. If this cannot be
2853 determined, then 0 is returned. */
2854 static int
2855 current_file_function_operand (rtx sym_ref)
2857 /* This is a bit of a fib. A function will have a short call flag
2858 applied to its name if it has the short call attribute, or it has
2859 already been defined within the current compilation unit. */
2860 if (ENCODED_SHORT_CALL_ATTR_P (XSTR (sym_ref, 0)))
2861 return 1;
2863 /* The current function is always defined within the current compilation
2864 unit. If it s a weak definition however, then this may not be the real
2865 definition of the function, and so we have to say no. */
2866 if (sym_ref == XEXP (DECL_RTL (current_function_decl), 0)
2867 && !DECL_WEAK (current_function_decl))
2868 return 1;
2870 /* We cannot make the determination - default to returning 0. */
2871 return 0;
2874 /* Return nonzero if a 32 bit "long_call" should be generated for
2875 this call. We generate a long_call if the function:
2877 a. has an __attribute__((long call))
2878 or b. is within the scope of a #pragma long_calls
2879 or c. the -mlong-calls command line switch has been specified
2880 . and either:
2881 1. -ffunction-sections is in effect
2882 or 2. the current function has __attribute__ ((section))
2883 or 3. the target function has __attribute__ ((section))
2885 However we do not generate a long call if the function:
2887 d. has an __attribute__ ((short_call))
2888 or e. is inside the scope of a #pragma no_long_calls
2889 or f. is defined within the current compilation unit.
2891 This function will be called by C fragments contained in the machine
2892 description file. SYM_REF and CALL_COOKIE correspond to the matched
2893 rtl operands. CALL_SYMBOL is used to distinguish between
2894 two different callers of the function. It is set to 1 in the
2895 "call_symbol" and "call_symbol_value" patterns and to 0 in the "call"
2896 and "call_value" patterns. This is because of the difference in the
2897 SYM_REFs passed by these patterns. */
2899 arm_is_longcall_p (rtx sym_ref, int call_cookie, int call_symbol)
2901 if (!call_symbol)
2903 if (GET_CODE (sym_ref) != MEM)
2904 return 0;
2906 sym_ref = XEXP (sym_ref, 0);
2909 if (GET_CODE (sym_ref) != SYMBOL_REF)
2910 return 0;
2912 if (call_cookie & CALL_SHORT)
2913 return 0;
2915 if (TARGET_LONG_CALLS)
2917 if (flag_function_sections
2918 || DECL_SECTION_NAME (current_function_decl))
2919 /* c.3 is handled by the definition of the
2920 ARM_DECLARE_FUNCTION_SIZE macro. */
2921 return 1;
2924 if (current_file_function_operand (sym_ref))
2925 return 0;
2927 return (call_cookie & CALL_LONG)
2928 || ENCODED_LONG_CALL_ATTR_P (XSTR (sym_ref, 0))
2929 || TARGET_LONG_CALLS;
2932 /* Return nonzero if it is ok to make a tail-call to DECL. */
2933 static bool
2934 arm_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
2936 int call_type = TARGET_LONG_CALLS ? CALL_LONG : CALL_NORMAL;
2938 if (cfun->machine->sibcall_blocked)
2939 return false;
2941 /* Never tailcall something for which we have no decl, or if we
2942 are in Thumb mode. */
2943 if (decl == NULL || TARGET_THUMB)
2944 return false;
2946 /* Get the calling method. */
2947 if (lookup_attribute ("short_call", TYPE_ATTRIBUTES (TREE_TYPE (decl))))
2948 call_type = CALL_SHORT;
2949 else if (lookup_attribute ("long_call", TYPE_ATTRIBUTES (TREE_TYPE (decl))))
2950 call_type = CALL_LONG;
2952 /* Cannot tail-call to long calls, since these are out of range of
2953 a branch instruction. However, if not compiling PIC, we know
2954 we can reach the symbol if it is in this compilation unit. */
2955 if (call_type == CALL_LONG && (flag_pic || !TREE_ASM_WRITTEN (decl)))
2956 return false;
2958 /* If we are interworking and the function is not declared static
2959 then we can't tail-call it unless we know that it exists in this
2960 compilation unit (since it might be a Thumb routine). */
2961 if (TARGET_INTERWORK && TREE_PUBLIC (decl) && !TREE_ASM_WRITTEN (decl))
2962 return false;
2964 /* Never tailcall from an ISR routine - it needs a special exit sequence. */
2965 if (IS_INTERRUPT (arm_current_func_type ()))
2966 return false;
2968 /* Everything else is ok. */
2969 return true;
2973 /* Addressing mode support functions. */
2975 /* Return nonzero if X is a legitimate immediate operand when compiling
2976 for PIC. */
2978 legitimate_pic_operand_p (rtx x)
2980 if (CONSTANT_P (x)
2981 && flag_pic
2982 && (GET_CODE (x) == SYMBOL_REF
2983 || (GET_CODE (x) == CONST
2984 && GET_CODE (XEXP (x, 0)) == PLUS
2985 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF)))
2986 return 0;
2988 return 1;
2992 legitimize_pic_address (rtx orig, enum machine_mode mode, rtx reg)
2994 if (GET_CODE (orig) == SYMBOL_REF
2995 || GET_CODE (orig) == LABEL_REF)
2997 #ifndef AOF_ASSEMBLER
2998 rtx pic_ref, address;
2999 #endif
3000 rtx insn;
3001 int subregs = 0;
3003 if (reg == 0)
3005 if (no_new_pseudos)
3006 abort ();
3007 else
3008 reg = gen_reg_rtx (Pmode);
3010 subregs = 1;
3013 #ifdef AOF_ASSEMBLER
3014 /* The AOF assembler can generate relocations for these directly, and
3015 understands that the PIC register has to be added into the offset. */
3016 insn = emit_insn (gen_pic_load_addr_based (reg, orig));
3017 #else
3018 if (subregs)
3019 address = gen_reg_rtx (Pmode);
3020 else
3021 address = reg;
3023 if (TARGET_ARM)
3024 emit_insn (gen_pic_load_addr_arm (address, orig));
3025 else
3026 emit_insn (gen_pic_load_addr_thumb (address, orig));
3028 if ((GET_CODE (orig) == LABEL_REF
3029 || (GET_CODE (orig) == SYMBOL_REF &&
3030 SYMBOL_REF_LOCAL_P (orig)))
3031 && NEED_GOT_RELOC)
3032 pic_ref = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, address);
3033 else
3035 pic_ref = gen_const_mem (Pmode,
3036 gen_rtx_PLUS (Pmode, pic_offset_table_rtx,
3037 address));
3040 insn = emit_move_insn (reg, pic_ref);
3041 #endif
3042 current_function_uses_pic_offset_table = 1;
3043 /* Put a REG_EQUAL note on this insn, so that it can be optimized
3044 by loop. */
3045 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EQUAL, orig,
3046 REG_NOTES (insn));
3047 return reg;
3049 else if (GET_CODE (orig) == CONST)
3051 rtx base, offset;
3053 if (GET_CODE (XEXP (orig, 0)) == PLUS
3054 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
3055 return orig;
3057 if (reg == 0)
3059 if (no_new_pseudos)
3060 abort ();
3061 else
3062 reg = gen_reg_rtx (Pmode);
3065 if (GET_CODE (XEXP (orig, 0)) == PLUS)
3067 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
3068 offset = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
3069 base == reg ? 0 : reg);
3071 else
3072 abort ();
3074 if (GET_CODE (offset) == CONST_INT)
3076 /* The base register doesn't really matter, we only want to
3077 test the index for the appropriate mode. */
3078 if (!arm_legitimate_index_p (mode, offset, SET, 0))
3080 if (!no_new_pseudos)
3081 offset = force_reg (Pmode, offset);
3082 else
3083 abort ();
3086 if (GET_CODE (offset) == CONST_INT)
3087 return plus_constant (base, INTVAL (offset));
3090 if (GET_MODE_SIZE (mode) > 4
3091 && (GET_MODE_CLASS (mode) == MODE_INT
3092 || TARGET_SOFT_FLOAT))
3094 emit_insn (gen_addsi3 (reg, base, offset));
3095 return reg;
3098 return gen_rtx_PLUS (Pmode, base, offset);
3101 return orig;
3105 /* Find a spare low register. */
3107 static int
3108 thumb_find_work_register (int live_regs_mask)
3110 int reg;
3112 /* Use a spare arg register. */
3113 if (!regs_ever_live[LAST_ARG_REGNUM])
3114 return LAST_ARG_REGNUM;
3116 /* Look for a pushed register. This is used before the frame pointer is
3117 setup, so r7 is a candidate. */
3118 for (reg = LAST_LO_REGNUM; reg >=0; reg--)
3119 if (live_regs_mask & (1 << reg))
3120 return reg;
3122 /* Something went wrong. */
3123 abort ();
3127 /* Generate code to load the PIC register. In thumb mode SCRATCH is a
3128 low register. */
3130 void
3131 arm_load_pic_register (unsigned int scratch)
3133 #ifndef AOF_ASSEMBLER
3134 rtx l1, pic_tmp, pic_tmp2, pic_rtx;
3135 rtx global_offset_table;
3137 if (current_function_uses_pic_offset_table == 0 || TARGET_SINGLE_PIC_BASE)
3138 return;
3140 if (!flag_pic)
3141 abort ();
3143 l1 = gen_label_rtx ();
3145 global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
3146 /* On the ARM the PC register contains 'dot + 8' at the time of the
3147 addition, on the Thumb it is 'dot + 4'. */
3148 pic_tmp = plus_constant (gen_rtx_LABEL_REF (Pmode, l1), TARGET_ARM ? 8 : 4);
3149 if (GOT_PCREL)
3150 pic_tmp2 = gen_rtx_CONST (VOIDmode,
3151 gen_rtx_PLUS (Pmode, global_offset_table, pc_rtx));
3152 else
3153 pic_tmp2 = gen_rtx_CONST (VOIDmode, global_offset_table);
3155 pic_rtx = gen_rtx_CONST (Pmode, gen_rtx_MINUS (Pmode, pic_tmp2, pic_tmp));
3157 if (TARGET_ARM)
3159 emit_insn (gen_pic_load_addr_arm (pic_offset_table_rtx, pic_rtx));
3160 emit_insn (gen_pic_add_dot_plus_eight (pic_offset_table_rtx, l1));
3162 else
3164 if (REGNO (pic_offset_table_rtx) > LAST_LO_REGNUM)
3166 /* We will have pushed the pic register, so should always be
3167 able to find a work register. */
3168 pic_tmp = gen_rtx_REG (SImode, scratch);
3169 emit_insn (gen_pic_load_addr_thumb (pic_tmp, pic_rtx));
3170 emit_insn (gen_movsi (pic_offset_table_rtx, pic_tmp));
3172 else
3173 emit_insn (gen_pic_load_addr_thumb (pic_offset_table_rtx, pic_rtx));
3174 emit_insn (gen_pic_add_dot_plus_four (pic_offset_table_rtx, l1));
3177 /* Need to emit this whether or not we obey regdecls,
3178 since setjmp/longjmp can cause life info to screw up. */
3179 emit_insn (gen_rtx_USE (VOIDmode, pic_offset_table_rtx));
3180 #endif /* AOF_ASSEMBLER */
3184 /* Return nonzero if X is valid as an ARM state addressing register. */
3185 static int
3186 arm_address_register_rtx_p (rtx x, int strict_p)
3188 int regno;
3190 if (GET_CODE (x) != REG)
3191 return 0;
3193 regno = REGNO (x);
3195 if (strict_p)
3196 return ARM_REGNO_OK_FOR_BASE_P (regno);
3198 return (regno <= LAST_ARM_REGNUM
3199 || regno >= FIRST_PSEUDO_REGISTER
3200 || regno == FRAME_POINTER_REGNUM
3201 || regno == ARG_POINTER_REGNUM);
3204 /* Return nonzero if X is a valid ARM state address operand. */
3206 arm_legitimate_address_p (enum machine_mode mode, rtx x, RTX_CODE outer,
3207 int strict_p)
3209 bool use_ldrd;
3210 enum rtx_code code = GET_CODE (x);
3212 if (arm_address_register_rtx_p (x, strict_p))
3213 return 1;
3215 use_ldrd = (TARGET_LDRD
3216 && (mode == DImode
3217 || (mode == DFmode && (TARGET_SOFT_FLOAT || TARGET_VFP))));
3219 if (code == POST_INC || code == PRE_DEC
3220 || ((code == PRE_INC || code == POST_DEC)
3221 && (use_ldrd || GET_MODE_SIZE (mode) <= 4)))
3222 return arm_address_register_rtx_p (XEXP (x, 0), strict_p);
3224 else if ((code == POST_MODIFY || code == PRE_MODIFY)
3225 && arm_address_register_rtx_p (XEXP (x, 0), strict_p)
3226 && GET_CODE (XEXP (x, 1)) == PLUS
3227 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
3229 rtx addend = XEXP (XEXP (x, 1), 1);
3231 /* Don't allow ldrd post increment by register because it's hard
3232 to fixup invalid register choices. */
3233 if (use_ldrd
3234 && GET_CODE (x) == POST_MODIFY
3235 && GET_CODE (addend) == REG)
3236 return 0;
3238 return ((use_ldrd || GET_MODE_SIZE (mode) <= 4)
3239 && arm_legitimate_index_p (mode, addend, outer, strict_p));
3242 /* After reload constants split into minipools will have addresses
3243 from a LABEL_REF. */
3244 else if (reload_completed
3245 && (code == LABEL_REF
3246 || (code == CONST
3247 && GET_CODE (XEXP (x, 0)) == PLUS
3248 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
3249 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
3250 return 1;
3252 else if (mode == TImode)
3253 return 0;
3255 else if (code == PLUS)
3257 rtx xop0 = XEXP (x, 0);
3258 rtx xop1 = XEXP (x, 1);
3260 return ((arm_address_register_rtx_p (xop0, strict_p)
3261 && arm_legitimate_index_p (mode, xop1, outer, strict_p))
3262 || (arm_address_register_rtx_p (xop1, strict_p)
3263 && arm_legitimate_index_p (mode, xop0, outer, strict_p)));
3266 #if 0
3267 /* Reload currently can't handle MINUS, so disable this for now */
3268 else if (GET_CODE (x) == MINUS)
3270 rtx xop0 = XEXP (x, 0);
3271 rtx xop1 = XEXP (x, 1);
3273 return (arm_address_register_rtx_p (xop0, strict_p)
3274 && arm_legitimate_index_p (mode, xop1, outer, strict_p));
3276 #endif
3278 else if (GET_MODE_CLASS (mode) != MODE_FLOAT
3279 && code == SYMBOL_REF
3280 && CONSTANT_POOL_ADDRESS_P (x)
3281 && ! (flag_pic
3282 && symbol_mentioned_p (get_pool_constant (x))))
3283 return 1;
3285 return 0;
3288 /* Return nonzero if INDEX is valid for an address index operand in
3289 ARM state. */
3290 static int
3291 arm_legitimate_index_p (enum machine_mode mode, rtx index, RTX_CODE outer,
3292 int strict_p)
3294 HOST_WIDE_INT range;
3295 enum rtx_code code = GET_CODE (index);
3297 /* Standard coprocessor addressing modes. */
3298 if (TARGET_HARD_FLOAT
3299 && (TARGET_FPA || TARGET_MAVERICK)
3300 && (GET_MODE_CLASS (mode) == MODE_FLOAT
3301 || (TARGET_MAVERICK && mode == DImode)))
3302 return (code == CONST_INT && INTVAL (index) < 1024
3303 && INTVAL (index) > -1024
3304 && (INTVAL (index) & 3) == 0);
3306 if (TARGET_REALLY_IWMMXT && VALID_IWMMXT_REG_MODE (mode))
3307 return (code == CONST_INT
3308 && INTVAL (index) < 1024
3309 && INTVAL (index) > -1024
3310 && (INTVAL (index) & 3) == 0);
3312 if (arm_address_register_rtx_p (index, strict_p)
3313 && (GET_MODE_SIZE (mode) <= 4))
3314 return 1;
3316 if (mode == DImode || mode == DFmode)
3318 if (code == CONST_INT)
3320 HOST_WIDE_INT val = INTVAL (index);
3322 if (TARGET_LDRD)
3323 return val > -256 && val < 256;
3324 else
3325 return val > -4096 && val < 4092;
3328 return TARGET_LDRD && arm_address_register_rtx_p (index, strict_p);
3331 if (GET_MODE_SIZE (mode) <= 4
3332 && ! (arm_arch4
3333 && (mode == HImode
3334 || (mode == QImode && outer == SIGN_EXTEND))))
3336 if (code == MULT)
3338 rtx xiop0 = XEXP (index, 0);
3339 rtx xiop1 = XEXP (index, 1);
3341 return ((arm_address_register_rtx_p (xiop0, strict_p)
3342 && power_of_two_operand (xiop1, SImode))
3343 || (arm_address_register_rtx_p (xiop1, strict_p)
3344 && power_of_two_operand (xiop0, SImode)));
3346 else if (code == LSHIFTRT || code == ASHIFTRT
3347 || code == ASHIFT || code == ROTATERT)
3349 rtx op = XEXP (index, 1);
3351 return (arm_address_register_rtx_p (XEXP (index, 0), strict_p)
3352 && GET_CODE (op) == CONST_INT
3353 && INTVAL (op) > 0
3354 && INTVAL (op) <= 31);
3358 /* For ARM v4 we may be doing a sign-extend operation during the
3359 load. */
3360 if (arm_arch4)
3362 if (mode == HImode || (outer == SIGN_EXTEND && mode == QImode))
3363 range = 256;
3364 else
3365 range = 4096;
3367 else
3368 range = (mode == HImode) ? 4095 : 4096;
3370 return (code == CONST_INT
3371 && INTVAL (index) < range
3372 && INTVAL (index) > -range);
3375 /* Return nonzero if X is valid as a Thumb state base register. */
3376 static int
3377 thumb_base_register_rtx_p (rtx x, enum machine_mode mode, int strict_p)
3379 int regno;
3381 if (GET_CODE (x) != REG)
3382 return 0;
3384 regno = REGNO (x);
3386 if (strict_p)
3387 return THUMB_REGNO_MODE_OK_FOR_BASE_P (regno, mode);
3389 return (regno <= LAST_LO_REGNUM
3390 || regno > LAST_VIRTUAL_REGISTER
3391 || regno == FRAME_POINTER_REGNUM
3392 || (GET_MODE_SIZE (mode) >= 4
3393 && (regno == STACK_POINTER_REGNUM
3394 || regno >= FIRST_PSEUDO_REGISTER
3395 || x == hard_frame_pointer_rtx
3396 || x == arg_pointer_rtx)));
3399 /* Return nonzero if x is a legitimate index register. This is the case
3400 for any base register that can access a QImode object. */
3401 inline static int
3402 thumb_index_register_rtx_p (rtx x, int strict_p)
3404 return thumb_base_register_rtx_p (x, QImode, strict_p);
3407 /* Return nonzero if x is a legitimate Thumb-state address.
3409 The AP may be eliminated to either the SP or the FP, so we use the
3410 least common denominator, e.g. SImode, and offsets from 0 to 64.
3412 ??? Verify whether the above is the right approach.
3414 ??? Also, the FP may be eliminated to the SP, so perhaps that
3415 needs special handling also.
3417 ??? Look at how the mips16 port solves this problem. It probably uses
3418 better ways to solve some of these problems.
3420 Although it is not incorrect, we don't accept QImode and HImode
3421 addresses based on the frame pointer or arg pointer until the
3422 reload pass starts. This is so that eliminating such addresses
3423 into stack based ones won't produce impossible code. */
3425 thumb_legitimate_address_p (enum machine_mode mode, rtx x, int strict_p)
3427 /* ??? Not clear if this is right. Experiment. */
3428 if (GET_MODE_SIZE (mode) < 4
3429 && !(reload_in_progress || reload_completed)
3430 && (reg_mentioned_p (frame_pointer_rtx, x)
3431 || reg_mentioned_p (arg_pointer_rtx, x)
3432 || reg_mentioned_p (virtual_incoming_args_rtx, x)
3433 || reg_mentioned_p (virtual_outgoing_args_rtx, x)
3434 || reg_mentioned_p (virtual_stack_dynamic_rtx, x)
3435 || reg_mentioned_p (virtual_stack_vars_rtx, x)))
3436 return 0;
3438 /* Accept any base register. SP only in SImode or larger. */
3439 else if (thumb_base_register_rtx_p (x, mode, strict_p))
3440 return 1;
3442 /* This is PC relative data before arm_reorg runs. */
3443 else if (GET_MODE_SIZE (mode) >= 4 && CONSTANT_P (x)
3444 && GET_CODE (x) == SYMBOL_REF
3445 && CONSTANT_POOL_ADDRESS_P (x) && ! flag_pic)
3446 return 1;
3448 /* This is PC relative data after arm_reorg runs. */
3449 else if (GET_MODE_SIZE (mode) >= 4 && reload_completed
3450 && (GET_CODE (x) == LABEL_REF
3451 || (GET_CODE (x) == CONST
3452 && GET_CODE (XEXP (x, 0)) == PLUS
3453 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
3454 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
3455 return 1;
3457 /* Post-inc indexing only supported for SImode and larger. */
3458 else if (GET_CODE (x) == POST_INC && GET_MODE_SIZE (mode) >= 4
3459 && thumb_index_register_rtx_p (XEXP (x, 0), strict_p))
3460 return 1;
3462 else if (GET_CODE (x) == PLUS)
3464 /* REG+REG address can be any two index registers. */
3465 /* We disallow FRAME+REG addressing since we know that FRAME
3466 will be replaced with STACK, and SP relative addressing only
3467 permits SP+OFFSET. */
3468 if (GET_MODE_SIZE (mode) <= 4
3469 && XEXP (x, 0) != frame_pointer_rtx
3470 && XEXP (x, 1) != frame_pointer_rtx
3471 && thumb_index_register_rtx_p (XEXP (x, 0), strict_p)
3472 && thumb_index_register_rtx_p (XEXP (x, 1), strict_p))
3473 return 1;
3475 /* REG+const has 5-7 bit offset for non-SP registers. */
3476 else if ((thumb_index_register_rtx_p (XEXP (x, 0), strict_p)
3477 || XEXP (x, 0) == arg_pointer_rtx)
3478 && GET_CODE (XEXP (x, 1)) == CONST_INT
3479 && thumb_legitimate_offset_p (mode, INTVAL (XEXP (x, 1))))
3480 return 1;
3482 /* REG+const has 10 bit offset for SP, but only SImode and
3483 larger is supported. */
3484 /* ??? Should probably check for DI/DFmode overflow here
3485 just like GO_IF_LEGITIMATE_OFFSET does. */
3486 else if (GET_CODE (XEXP (x, 0)) == REG
3487 && REGNO (XEXP (x, 0)) == STACK_POINTER_REGNUM
3488 && GET_MODE_SIZE (mode) >= 4
3489 && GET_CODE (XEXP (x, 1)) == CONST_INT
3490 && INTVAL (XEXP (x, 1)) >= 0
3491 && INTVAL (XEXP (x, 1)) + GET_MODE_SIZE (mode) <= 1024
3492 && (INTVAL (XEXP (x, 1)) & 3) == 0)
3493 return 1;
3495 else if (GET_CODE (XEXP (x, 0)) == REG
3496 && REGNO (XEXP (x, 0)) == FRAME_POINTER_REGNUM
3497 && GET_MODE_SIZE (mode) >= 4
3498 && GET_CODE (XEXP (x, 1)) == CONST_INT
3499 && (INTVAL (XEXP (x, 1)) & 3) == 0)
3500 return 1;
3503 else if (GET_MODE_CLASS (mode) != MODE_FLOAT
3504 && GET_MODE_SIZE (mode) == 4
3505 && GET_CODE (x) == SYMBOL_REF
3506 && CONSTANT_POOL_ADDRESS_P (x)
3507 && !(flag_pic
3508 && symbol_mentioned_p (get_pool_constant (x))))
3509 return 1;
3511 return 0;
3514 /* Return nonzero if VAL can be used as an offset in a Thumb-state address
3515 instruction of mode MODE. */
3517 thumb_legitimate_offset_p (enum machine_mode mode, HOST_WIDE_INT val)
3519 switch (GET_MODE_SIZE (mode))
3521 case 1:
3522 return val >= 0 && val < 32;
3524 case 2:
3525 return val >= 0 && val < 64 && (val & 1) == 0;
3527 default:
3528 return (val >= 0
3529 && (val + GET_MODE_SIZE (mode)) <= 128
3530 && (val & 3) == 0);
3534 /* Try machine-dependent ways of modifying an illegitimate address
3535 to be legitimate. If we find one, return the new, valid address. */
3537 arm_legitimize_address (rtx x, rtx orig_x, enum machine_mode mode)
3539 if (GET_CODE (x) == PLUS)
3541 rtx xop0 = XEXP (x, 0);
3542 rtx xop1 = XEXP (x, 1);
3544 if (CONSTANT_P (xop0) && !symbol_mentioned_p (xop0))
3545 xop0 = force_reg (SImode, xop0);
3547 if (CONSTANT_P (xop1) && !symbol_mentioned_p (xop1))
3548 xop1 = force_reg (SImode, xop1);
3550 if (ARM_BASE_REGISTER_RTX_P (xop0)
3551 && GET_CODE (xop1) == CONST_INT)
3553 HOST_WIDE_INT n, low_n;
3554 rtx base_reg, val;
3555 n = INTVAL (xop1);
3557 /* VFP addressing modes actually allow greater offsets, but for
3558 now we just stick with the lowest common denominator. */
3559 if (mode == DImode
3560 || ((TARGET_SOFT_FLOAT || TARGET_VFP) && mode == DFmode))
3562 low_n = n & 0x0f;
3563 n &= ~0x0f;
3564 if (low_n > 4)
3566 n += 16;
3567 low_n -= 16;
3570 else
3572 low_n = ((mode) == TImode ? 0
3573 : n >= 0 ? (n & 0xfff) : -((-n) & 0xfff));
3574 n -= low_n;
3577 base_reg = gen_reg_rtx (SImode);
3578 val = force_operand (gen_rtx_PLUS (SImode, xop0,
3579 GEN_INT (n)), NULL_RTX);
3580 emit_move_insn (base_reg, val);
3581 x = (low_n == 0 ? base_reg
3582 : gen_rtx_PLUS (SImode, base_reg, GEN_INT (low_n)));
3584 else if (xop0 != XEXP (x, 0) || xop1 != XEXP (x, 1))
3585 x = gen_rtx_PLUS (SImode, xop0, xop1);
3588 /* XXX We don't allow MINUS any more -- see comment in
3589 arm_legitimate_address_p (). */
3590 else if (GET_CODE (x) == MINUS)
3592 rtx xop0 = XEXP (x, 0);
3593 rtx xop1 = XEXP (x, 1);
3595 if (CONSTANT_P (xop0))
3596 xop0 = force_reg (SImode, xop0);
3598 if (CONSTANT_P (xop1) && ! symbol_mentioned_p (xop1))
3599 xop1 = force_reg (SImode, xop1);
3601 if (xop0 != XEXP (x, 0) || xop1 != XEXP (x, 1))
3602 x = gen_rtx_MINUS (SImode, xop0, xop1);
3605 if (flag_pic)
3607 /* We need to find and carefully transform any SYMBOL and LABEL
3608 references; so go back to the original address expression. */
3609 rtx new_x = legitimize_pic_address (orig_x, mode, NULL_RTX);
3611 if (new_x != orig_x)
3612 x = new_x;
3615 return x;
3619 /* Try machine-dependent ways of modifying an illegitimate Thumb address
3620 to be legitimate. If we find one, return the new, valid address. */
3622 thumb_legitimize_address (rtx x, rtx orig_x, enum machine_mode mode)
3624 if (GET_CODE (x) == PLUS
3625 && GET_CODE (XEXP (x, 1)) == CONST_INT
3626 && (INTVAL (XEXP (x, 1)) >= 32 * GET_MODE_SIZE (mode)
3627 || INTVAL (XEXP (x, 1)) < 0))
3629 rtx xop0 = XEXP (x, 0);
3630 rtx xop1 = XEXP (x, 1);
3631 HOST_WIDE_INT offset = INTVAL (xop1);
3633 /* Try and fold the offset into a biasing of the base register and
3634 then offsetting that. Don't do this when optimizing for space
3635 since it can cause too many CSEs. */
3636 if (optimize_size && offset >= 0
3637 && offset < 256 + 31 * GET_MODE_SIZE (mode))
3639 HOST_WIDE_INT delta;
3641 if (offset >= 256)
3642 delta = offset - (256 - GET_MODE_SIZE (mode));
3643 else if (offset < 32 * GET_MODE_SIZE (mode) + 8)
3644 delta = 31 * GET_MODE_SIZE (mode);
3645 else
3646 delta = offset & (~31 * GET_MODE_SIZE (mode));
3648 xop0 = force_operand (plus_constant (xop0, offset - delta),
3649 NULL_RTX);
3650 x = plus_constant (xop0, delta);
3652 else if (offset < 0 && offset > -256)
3653 /* Small negative offsets are best done with a subtract before the
3654 dereference, forcing these into a register normally takes two
3655 instructions. */
3656 x = force_operand (x, NULL_RTX);
3657 else
3659 /* For the remaining cases, force the constant into a register. */
3660 xop1 = force_reg (SImode, xop1);
3661 x = gen_rtx_PLUS (SImode, xop0, xop1);
3664 else if (GET_CODE (x) == PLUS
3665 && s_register_operand (XEXP (x, 1), SImode)
3666 && !s_register_operand (XEXP (x, 0), SImode))
3668 rtx xop0 = force_operand (XEXP (x, 0), NULL_RTX);
3670 x = gen_rtx_PLUS (SImode, xop0, XEXP (x, 1));
3673 if (flag_pic)
3675 /* We need to find and carefully transform any SYMBOL and LABEL
3676 references; so go back to the original address expression. */
3677 rtx new_x = legitimize_pic_address (orig_x, mode, NULL_RTX);
3679 if (new_x != orig_x)
3680 x = new_x;
3683 return x;
3688 #define REG_OR_SUBREG_REG(X) \
3689 (GET_CODE (X) == REG \
3690 || (GET_CODE (X) == SUBREG && GET_CODE (SUBREG_REG (X)) == REG))
3692 #define REG_OR_SUBREG_RTX(X) \
3693 (GET_CODE (X) == REG ? (X) : SUBREG_REG (X))
3695 #ifndef COSTS_N_INSNS
3696 #define COSTS_N_INSNS(N) ((N) * 4 - 2)
3697 #endif
3698 static inline int
3699 thumb_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer)
3701 enum machine_mode mode = GET_MODE (x);
3703 switch (code)
3705 case ASHIFT:
3706 case ASHIFTRT:
3707 case LSHIFTRT:
3708 case ROTATERT:
3709 case PLUS:
3710 case MINUS:
3711 case COMPARE:
3712 case NEG:
3713 case NOT:
3714 return COSTS_N_INSNS (1);
3716 case MULT:
3717 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
3719 int cycles = 0;
3720 unsigned HOST_WIDE_INT i = INTVAL (XEXP (x, 1));
3722 while (i)
3724 i >>= 2;
3725 cycles++;
3727 return COSTS_N_INSNS (2) + cycles;
3729 return COSTS_N_INSNS (1) + 16;
3731 case SET:
3732 return (COSTS_N_INSNS (1)
3733 + 4 * ((GET_CODE (SET_SRC (x)) == MEM)
3734 + GET_CODE (SET_DEST (x)) == MEM));
3736 case CONST_INT:
3737 if (outer == SET)
3739 if ((unsigned HOST_WIDE_INT) INTVAL (x) < 256)
3740 return 0;
3741 if (thumb_shiftable_const (INTVAL (x)))
3742 return COSTS_N_INSNS (2);
3743 return COSTS_N_INSNS (3);
3745 else if ((outer == PLUS || outer == COMPARE)
3746 && INTVAL (x) < 256 && INTVAL (x) > -256)
3747 return 0;
3748 else if (outer == AND
3749 && INTVAL (x) < 256 && INTVAL (x) >= -256)
3750 return COSTS_N_INSNS (1);
3751 else if (outer == ASHIFT || outer == ASHIFTRT
3752 || outer == LSHIFTRT)
3753 return 0;
3754 return COSTS_N_INSNS (2);
3756 case CONST:
3757 case CONST_DOUBLE:
3758 case LABEL_REF:
3759 case SYMBOL_REF:
3760 return COSTS_N_INSNS (3);
3762 case UDIV:
3763 case UMOD:
3764 case DIV:
3765 case MOD:
3766 return 100;
3768 case TRUNCATE:
3769 return 99;
3771 case AND:
3772 case XOR:
3773 case IOR:
3774 /* XXX guess. */
3775 return 8;
3777 case MEM:
3778 /* XXX another guess. */
3779 /* Memory costs quite a lot for the first word, but subsequent words
3780 load at the equivalent of a single insn each. */
3781 return (10 + 4 * ((GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD)
3782 + ((GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
3783 ? 4 : 0));
3785 case IF_THEN_ELSE:
3786 /* XXX a guess. */
3787 if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
3788 return 14;
3789 return 2;
3791 case ZERO_EXTEND:
3792 /* XXX still guessing. */
3793 switch (GET_MODE (XEXP (x, 0)))
3795 case QImode:
3796 return (1 + (mode == DImode ? 4 : 0)
3797 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3799 case HImode:
3800 return (4 + (mode == DImode ? 4 : 0)
3801 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3803 case SImode:
3804 return (1 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3806 default:
3807 return 99;
3810 default:
3811 return 99;
3816 /* Worker routine for arm_rtx_costs. */
3817 static inline int
3818 arm_rtx_costs_1 (rtx x, enum rtx_code code, enum rtx_code outer)
3820 enum machine_mode mode = GET_MODE (x);
3821 enum rtx_code subcode;
3822 int extra_cost;
3824 switch (code)
3826 case MEM:
3827 /* Memory costs quite a lot for the first word, but subsequent words
3828 load at the equivalent of a single insn each. */
3829 return (10 + 4 * ((GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD)
3830 + (GET_CODE (x) == SYMBOL_REF
3831 && CONSTANT_POOL_ADDRESS_P (x) ? 4 : 0));
3833 case DIV:
3834 case MOD:
3835 case UDIV:
3836 case UMOD:
3837 return optimize_size ? COSTS_N_INSNS (2) : 100;
3839 case ROTATE:
3840 if (mode == SImode && GET_CODE (XEXP (x, 1)) == REG)
3841 return 4;
3842 /* Fall through */
3843 case ROTATERT:
3844 if (mode != SImode)
3845 return 8;
3846 /* Fall through */
3847 case ASHIFT: case LSHIFTRT: case ASHIFTRT:
3848 if (mode == DImode)
3849 return (8 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : 8)
3850 + ((GET_CODE (XEXP (x, 0)) == REG
3851 || (GET_CODE (XEXP (x, 0)) == SUBREG
3852 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG))
3853 ? 0 : 8));
3854 return (1 + ((GET_CODE (XEXP (x, 0)) == REG
3855 || (GET_CODE (XEXP (x, 0)) == SUBREG
3856 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG))
3857 ? 0 : 4)
3858 + ((GET_CODE (XEXP (x, 1)) == REG
3859 || (GET_CODE (XEXP (x, 1)) == SUBREG
3860 && GET_CODE (SUBREG_REG (XEXP (x, 1))) == REG)
3861 || (GET_CODE (XEXP (x, 1)) == CONST_INT))
3862 ? 0 : 4));
3864 case MINUS:
3865 if (mode == DImode)
3866 return (4 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 8)
3867 + ((REG_OR_SUBREG_REG (XEXP (x, 0))
3868 || (GET_CODE (XEXP (x, 0)) == CONST_INT
3869 && const_ok_for_arm (INTVAL (XEXP (x, 0)))))
3870 ? 0 : 8));
3872 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
3873 return (2 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
3874 || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
3875 && arm_const_double_rtx (XEXP (x, 1))))
3876 ? 0 : 8)
3877 + ((REG_OR_SUBREG_REG (XEXP (x, 0))
3878 || (GET_CODE (XEXP (x, 0)) == CONST_DOUBLE
3879 && arm_const_double_rtx (XEXP (x, 0))))
3880 ? 0 : 8));
3882 if (((GET_CODE (XEXP (x, 0)) == CONST_INT
3883 && const_ok_for_arm (INTVAL (XEXP (x, 0)))
3884 && REG_OR_SUBREG_REG (XEXP (x, 1))))
3885 || (((subcode = GET_CODE (XEXP (x, 1))) == ASHIFT
3886 || subcode == ASHIFTRT || subcode == LSHIFTRT
3887 || subcode == ROTATE || subcode == ROTATERT
3888 || (subcode == MULT
3889 && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
3890 && ((INTVAL (XEXP (XEXP (x, 1), 1)) &
3891 (INTVAL (XEXP (XEXP (x, 1), 1)) - 1)) == 0)))
3892 && REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 0))
3893 && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 1))
3894 || GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT)
3895 && REG_OR_SUBREG_REG (XEXP (x, 0))))
3896 return 1;
3897 /* Fall through */
3899 case PLUS:
3900 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
3901 return (2 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8)
3902 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
3903 || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
3904 && arm_const_double_rtx (XEXP (x, 1))))
3905 ? 0 : 8));
3907 /* Fall through */
3908 case AND: case XOR: case IOR:
3909 extra_cost = 0;
3911 /* Normally the frame registers will be spilt into reg+const during
3912 reload, so it is a bad idea to combine them with other instructions,
3913 since then they might not be moved outside of loops. As a compromise
3914 we allow integration with ops that have a constant as their second
3915 operand. */
3916 if ((REG_OR_SUBREG_REG (XEXP (x, 0))
3917 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0)))
3918 && GET_CODE (XEXP (x, 1)) != CONST_INT)
3919 || (REG_OR_SUBREG_REG (XEXP (x, 0))
3920 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0)))))
3921 extra_cost = 4;
3923 if (mode == DImode)
3924 return (4 + extra_cost + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8)
3925 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
3926 || (GET_CODE (XEXP (x, 1)) == CONST_INT
3927 && const_ok_for_op (INTVAL (XEXP (x, 1)), code)))
3928 ? 0 : 8));
3930 if (REG_OR_SUBREG_REG (XEXP (x, 0)))
3931 return (1 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : extra_cost)
3932 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
3933 || (GET_CODE (XEXP (x, 1)) == CONST_INT
3934 && const_ok_for_op (INTVAL (XEXP (x, 1)), code)))
3935 ? 0 : 4));
3937 else if (REG_OR_SUBREG_REG (XEXP (x, 1)))
3938 return (1 + extra_cost
3939 + ((((subcode = GET_CODE (XEXP (x, 0))) == ASHIFT
3940 || subcode == LSHIFTRT || subcode == ASHIFTRT
3941 || subcode == ROTATE || subcode == ROTATERT
3942 || (subcode == MULT
3943 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
3944 && ((INTVAL (XEXP (XEXP (x, 0), 1)) &
3945 (INTVAL (XEXP (XEXP (x, 0), 1)) - 1)) == 0)))
3946 && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 0)))
3947 && ((REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 1)))
3948 || GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT))
3949 ? 0 : 4));
3951 return 8;
3953 case MULT:
3954 /* This should have been handled by the CPU specific routines. */
3955 abort ();
3957 case TRUNCATE:
3958 if (arm_arch3m && mode == SImode
3959 && GET_CODE (XEXP (x, 0)) == LSHIFTRT
3960 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
3961 && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0))
3962 == GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)))
3963 && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == ZERO_EXTEND
3964 || GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == SIGN_EXTEND))
3965 return 8;
3966 return 99;
3968 case NEG:
3969 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
3970 return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 6);
3971 /* Fall through */
3972 case NOT:
3973 if (mode == DImode)
3974 return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4);
3976 return 1 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4);
3978 case IF_THEN_ELSE:
3979 if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
3980 return 14;
3981 return 2;
3983 case COMPARE:
3984 return 1;
3986 case ABS:
3987 return 4 + (mode == DImode ? 4 : 0);
3989 case SIGN_EXTEND:
3990 if (GET_MODE (XEXP (x, 0)) == QImode)
3991 return (4 + (mode == DImode ? 4 : 0)
3992 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3993 /* Fall through */
3994 case ZERO_EXTEND:
3995 switch (GET_MODE (XEXP (x, 0)))
3997 case QImode:
3998 return (1 + (mode == DImode ? 4 : 0)
3999 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4001 case HImode:
4002 return (4 + (mode == DImode ? 4 : 0)
4003 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4005 case SImode:
4006 return (1 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4008 case V8QImode:
4009 case V4HImode:
4010 case V2SImode:
4011 case V4QImode:
4012 case V2HImode:
4013 return 1;
4015 default:
4016 break;
4018 abort ();
4020 case CONST_INT:
4021 if (const_ok_for_arm (INTVAL (x)))
4022 return outer == SET ? 2 : -1;
4023 else if (outer == AND
4024 && const_ok_for_arm (~INTVAL (x)))
4025 return -1;
4026 else if ((outer == COMPARE
4027 || outer == PLUS || outer == MINUS)
4028 && const_ok_for_arm (-INTVAL (x)))
4029 return -1;
4030 else
4031 return 5;
4033 case CONST:
4034 case LABEL_REF:
4035 case SYMBOL_REF:
4036 return 6;
4038 case CONST_DOUBLE:
4039 if (arm_const_double_rtx (x))
4040 return outer == SET ? 2 : -1;
4041 else if ((outer == COMPARE || outer == PLUS)
4042 && neg_const_double_rtx_ok_for_fpa (x))
4043 return -1;
4044 return 7;
4046 default:
4047 return 99;
4051 /* RTX costs when optimizing for size. */
4052 static bool
4053 arm_size_rtx_costs (rtx x, int code, int outer_code, int *total)
4055 enum machine_mode mode = GET_MODE (x);
4057 if (TARGET_THUMB)
4059 /* XXX TBD. For now, use the standard costs. */
4060 *total = thumb_rtx_costs (x, code, outer_code);
4061 return true;
4064 switch (code)
4066 case MEM:
4067 /* A memory access costs 1 insn if the mode is small, or the address is
4068 a single register, otherwise it costs one insn per word. */
4069 if (REG_P (XEXP (x, 0)))
4070 *total = COSTS_N_INSNS (1);
4071 else
4072 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4073 return true;
4075 case DIV:
4076 case MOD:
4077 case UDIV:
4078 case UMOD:
4079 /* Needs a libcall, so it costs about this. */
4080 *total = COSTS_N_INSNS (2);
4081 return false;
4083 case ROTATE:
4084 if (mode == SImode && GET_CODE (XEXP (x, 1)) == REG)
4086 *total = COSTS_N_INSNS (2) + rtx_cost (XEXP (x, 0), code);
4087 return true;
4089 /* Fall through */
4090 case ROTATERT:
4091 case ASHIFT:
4092 case LSHIFTRT:
4093 case ASHIFTRT:
4094 if (mode == DImode && GET_CODE (XEXP (x, 1)) == CONST_INT)
4096 *total = COSTS_N_INSNS (3) + rtx_cost (XEXP (x, 0), code);
4097 return true;
4099 else if (mode == SImode)
4101 *total = COSTS_N_INSNS (1) + rtx_cost (XEXP (x, 0), code);
4102 /* Slightly disparage register shifts, but not by much. */
4103 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
4104 *total += 1 + rtx_cost (XEXP (x, 1), code);
4105 return true;
4108 /* Needs a libcall. */
4109 *total = COSTS_N_INSNS (2);
4110 return false;
4112 case MINUS:
4113 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4115 *total = COSTS_N_INSNS (1);
4116 return false;
4119 if (mode == SImode)
4121 enum rtx_code subcode0 = GET_CODE (XEXP (x, 0));
4122 enum rtx_code subcode1 = GET_CODE (XEXP (x, 1));
4124 if (subcode0 == ROTATE || subcode0 == ROTATERT || subcode0 == ASHIFT
4125 || subcode0 == LSHIFTRT || subcode0 == ASHIFTRT
4126 || subcode1 == ROTATE || subcode1 == ROTATERT
4127 || subcode1 == ASHIFT || subcode1 == LSHIFTRT
4128 || subcode1 == ASHIFTRT)
4130 /* It's just the cost of the two operands. */
4131 *total = 0;
4132 return false;
4135 *total = COSTS_N_INSNS (1);
4136 return false;
4139 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4140 return false;
4142 case PLUS:
4143 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4145 *total = COSTS_N_INSNS (1);
4146 return false;
4149 /* Fall through */
4150 case AND: case XOR: case IOR:
4151 if (mode == SImode)
4153 enum rtx_code subcode = GET_CODE (XEXP (x, 0));
4155 if (subcode == ROTATE || subcode == ROTATERT || subcode == ASHIFT
4156 || subcode == LSHIFTRT || subcode == ASHIFTRT
4157 || (code == AND && subcode == NOT))
4159 /* It's just the cost of the two operands. */
4160 *total = 0;
4161 return false;
4165 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4166 return false;
4168 case MULT:
4169 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4170 return false;
4172 case NEG:
4173 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4174 *total = COSTS_N_INSNS (1);
4175 /* Fall through */
4176 case NOT:
4177 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4179 return false;
4181 case IF_THEN_ELSE:
4182 *total = 0;
4183 return false;
4185 case COMPARE:
4186 if (cc_register (XEXP (x, 0), VOIDmode))
4187 * total = 0;
4188 else
4189 *total = COSTS_N_INSNS (1);
4190 return false;
4192 case ABS:
4193 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4194 *total = COSTS_N_INSNS (1);
4195 else
4196 *total = COSTS_N_INSNS (1 + ARM_NUM_REGS (mode));
4197 return false;
4199 case SIGN_EXTEND:
4200 *total = 0;
4201 if (GET_MODE_SIZE (GET_MODE (XEXP (x, 0))) < 4)
4203 if (!(arm_arch4 && MEM_P (XEXP (x, 0))))
4204 *total += COSTS_N_INSNS (arm_arch6 ? 1 : 2);
4206 if (mode == DImode)
4207 *total += COSTS_N_INSNS (1);
4208 return false;
4210 case ZERO_EXTEND:
4211 *total = 0;
4212 if (!(arm_arch4 && MEM_P (XEXP (x, 0))))
4214 switch (GET_MODE (XEXP (x, 0)))
4216 case QImode:
4217 *total += COSTS_N_INSNS (1);
4218 break;
4220 case HImode:
4221 *total += COSTS_N_INSNS (arm_arch6 ? 1 : 2);
4223 case SImode:
4224 break;
4226 default:
4227 *total += COSTS_N_INSNS (2);
4231 if (mode == DImode)
4232 *total += COSTS_N_INSNS (1);
4234 return false;
4236 case CONST_INT:
4237 if (const_ok_for_arm (INTVAL (x)))
4238 *total = COSTS_N_INSNS (outer_code == SET ? 1 : 0);
4239 else if (const_ok_for_arm (~INTVAL (x)))
4240 *total = COSTS_N_INSNS (outer_code == AND ? 0 : 1);
4241 else if (const_ok_for_arm (-INTVAL (x)))
4243 if (outer_code == COMPARE || outer_code == PLUS
4244 || outer_code == MINUS)
4245 *total = 0;
4246 else
4247 *total = COSTS_N_INSNS (1);
4249 else
4250 *total = COSTS_N_INSNS (2);
4251 return true;
4253 case CONST:
4254 case LABEL_REF:
4255 case SYMBOL_REF:
4256 *total = COSTS_N_INSNS (2);
4257 return true;
4259 case CONST_DOUBLE:
4260 *total = COSTS_N_INSNS (4);
4261 return true;
4263 default:
4264 if (mode != VOIDmode)
4265 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4266 else
4267 *total = COSTS_N_INSNS (4); /* How knows? */
4268 return false;
4272 /* RTX costs for cores with a slow MUL implementation. */
4274 static bool
4275 arm_slowmul_rtx_costs (rtx x, int code, int outer_code, int *total)
4277 enum machine_mode mode = GET_MODE (x);
4279 if (TARGET_THUMB)
4281 *total = thumb_rtx_costs (x, code, outer_code);
4282 return true;
4285 switch (code)
4287 case MULT:
4288 if (GET_MODE_CLASS (mode) == MODE_FLOAT
4289 || mode == DImode)
4291 *total = 30;
4292 return true;
4295 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
4297 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
4298 & (unsigned HOST_WIDE_INT) 0xffffffff);
4299 int cost, const_ok = const_ok_for_arm (i);
4300 int j, booth_unit_size;
4302 /* Tune as appropriate. */
4303 cost = const_ok ? 4 : 8;
4304 booth_unit_size = 2;
4305 for (j = 0; i && j < 32; j += booth_unit_size)
4307 i >>= booth_unit_size;
4308 cost += 2;
4311 *total = cost;
4312 return true;
4315 *total = 30 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
4316 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
4317 return true;
4319 default:
4320 *total = arm_rtx_costs_1 (x, code, outer_code);
4321 return true;
4326 /* RTX cost for cores with a fast multiply unit (M variants). */
4328 static bool
4329 arm_fastmul_rtx_costs (rtx x, int code, int outer_code, int *total)
4331 enum machine_mode mode = GET_MODE (x);
4333 if (TARGET_THUMB)
4335 *total = thumb_rtx_costs (x, code, outer_code);
4336 return true;
4339 switch (code)
4341 case MULT:
4342 /* There is no point basing this on the tuning, since it is always the
4343 fast variant if it exists at all. */
4344 if (mode == DImode
4345 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
4346 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
4347 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
4349 *total = 8;
4350 return true;
4354 if (GET_MODE_CLASS (mode) == MODE_FLOAT
4355 || mode == DImode)
4357 *total = 30;
4358 return true;
4361 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
4363 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
4364 & (unsigned HOST_WIDE_INT) 0xffffffff);
4365 int cost, const_ok = const_ok_for_arm (i);
4366 int j, booth_unit_size;
4368 /* Tune as appropriate. */
4369 cost = const_ok ? 4 : 8;
4370 booth_unit_size = 8;
4371 for (j = 0; i && j < 32; j += booth_unit_size)
4373 i >>= booth_unit_size;
4374 cost += 2;
4377 *total = cost;
4378 return true;
4381 *total = 8 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
4382 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
4383 return true;
4385 default:
4386 *total = arm_rtx_costs_1 (x, code, outer_code);
4387 return true;
4392 /* RTX cost for XScale CPUs. */
4394 static bool
4395 arm_xscale_rtx_costs (rtx x, int code, int outer_code, int *total)
4397 enum machine_mode mode = GET_MODE (x);
4399 if (TARGET_THUMB)
4401 *total = thumb_rtx_costs (x, code, outer_code);
4402 return true;
4405 switch (code)
4407 case MULT:
4408 /* There is no point basing this on the tuning, since it is always the
4409 fast variant if it exists at all. */
4410 if (mode == DImode
4411 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
4412 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
4413 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
4415 *total = 8;
4416 return true;
4420 if (GET_MODE_CLASS (mode) == MODE_FLOAT
4421 || mode == DImode)
4423 *total = 30;
4424 return true;
4427 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
4429 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
4430 & (unsigned HOST_WIDE_INT) 0xffffffff);
4431 int cost, const_ok = const_ok_for_arm (i);
4432 unsigned HOST_WIDE_INT masked_const;
4434 /* The cost will be related to two insns.
4435 First a load of the constant (MOV or LDR), then a multiply. */
4436 cost = 2;
4437 if (! const_ok)
4438 cost += 1; /* LDR is probably more expensive because
4439 of longer result latency. */
4440 masked_const = i & 0xffff8000;
4441 if (masked_const != 0 && masked_const != 0xffff8000)
4443 masked_const = i & 0xf8000000;
4444 if (masked_const == 0 || masked_const == 0xf8000000)
4445 cost += 1;
4446 else
4447 cost += 2;
4449 *total = cost;
4450 return true;
4453 *total = 8 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
4454 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
4455 return true;
4457 case COMPARE:
4458 /* A COMPARE of a MULT is slow on XScale; the muls instruction
4459 will stall until the multiplication is complete. */
4460 if (GET_CODE (XEXP (x, 0)) == MULT)
4461 *total = 4 + rtx_cost (XEXP (x, 0), code);
4462 else
4463 *total = arm_rtx_costs_1 (x, code, outer_code);
4464 return true;
4466 default:
4467 *total = arm_rtx_costs_1 (x, code, outer_code);
4468 return true;
4473 /* RTX costs for 9e (and later) cores. */
4475 static bool
4476 arm_9e_rtx_costs (rtx x, int code, int outer_code, int *total)
4478 enum machine_mode mode = GET_MODE (x);
4479 int nonreg_cost;
4480 int cost;
4482 if (TARGET_THUMB)
4484 switch (code)
4486 case MULT:
4487 *total = COSTS_N_INSNS (3);
4488 return true;
4490 default:
4491 *total = thumb_rtx_costs (x, code, outer_code);
4492 return true;
4496 switch (code)
4498 case MULT:
4499 /* There is no point basing this on the tuning, since it is always the
4500 fast variant if it exists at all. */
4501 if (mode == DImode
4502 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
4503 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
4504 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
4506 *total = 3;
4507 return true;
4511 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
4513 *total = 30;
4514 return true;
4516 if (mode == DImode)
4518 cost = 7;
4519 nonreg_cost = 8;
4521 else
4523 cost = 2;
4524 nonreg_cost = 4;
4528 *total = cost + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : nonreg_cost)
4529 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : nonreg_cost);
4530 return true;
4532 default:
4533 *total = arm_rtx_costs_1 (x, code, outer_code);
4534 return true;
4537 /* All address computations that can be done are free, but rtx cost returns
4538 the same for practically all of them. So we weight the different types
4539 of address here in the order (most pref first):
4540 PRE/POST_INC/DEC, SHIFT or NON-INT sum, INT sum, REG, MEM or LABEL. */
4541 static inline int
4542 arm_arm_address_cost (rtx x)
4544 enum rtx_code c = GET_CODE (x);
4546 if (c == PRE_INC || c == PRE_DEC || c == POST_INC || c == POST_DEC)
4547 return 0;
4548 if (c == MEM || c == LABEL_REF || c == SYMBOL_REF)
4549 return 10;
4551 if (c == PLUS || c == MINUS)
4553 if (GET_CODE (XEXP (x, 0)) == CONST_INT)
4554 return 2;
4556 if (ARITHMETIC_P (XEXP (x, 0)) || ARITHMETIC_P (XEXP (x, 1)))
4557 return 3;
4559 return 4;
4562 return 6;
4565 static inline int
4566 arm_thumb_address_cost (rtx x)
4568 enum rtx_code c = GET_CODE (x);
4570 if (c == REG)
4571 return 1;
4572 if (c == PLUS
4573 && GET_CODE (XEXP (x, 0)) == REG
4574 && GET_CODE (XEXP (x, 1)) == CONST_INT)
4575 return 1;
4577 return 2;
4580 static int
4581 arm_address_cost (rtx x)
4583 return TARGET_ARM ? arm_arm_address_cost (x) : arm_thumb_address_cost (x);
4586 static int
4587 arm_adjust_cost (rtx insn, rtx link, rtx dep, int cost)
4589 rtx i_pat, d_pat;
4591 /* Some true dependencies can have a higher cost depending
4592 on precisely how certain input operands are used. */
4593 if (arm_tune_xscale
4594 && REG_NOTE_KIND (link) == 0
4595 && recog_memoized (insn) >= 0
4596 && recog_memoized (dep) >= 0)
4598 int shift_opnum = get_attr_shift (insn);
4599 enum attr_type attr_type = get_attr_type (dep);
4601 /* If nonzero, SHIFT_OPNUM contains the operand number of a shifted
4602 operand for INSN. If we have a shifted input operand and the
4603 instruction we depend on is another ALU instruction, then we may
4604 have to account for an additional stall. */
4605 if (shift_opnum != 0
4606 && (attr_type == TYPE_ALU_SHIFT || attr_type == TYPE_ALU_SHIFT_REG))
4608 rtx shifted_operand;
4609 int opno;
4611 /* Get the shifted operand. */
4612 extract_insn (insn);
4613 shifted_operand = recog_data.operand[shift_opnum];
4615 /* Iterate over all the operands in DEP. If we write an operand
4616 that overlaps with SHIFTED_OPERAND, then we have increase the
4617 cost of this dependency. */
4618 extract_insn (dep);
4619 preprocess_constraints ();
4620 for (opno = 0; opno < recog_data.n_operands; opno++)
4622 /* We can ignore strict inputs. */
4623 if (recog_data.operand_type[opno] == OP_IN)
4624 continue;
4626 if (reg_overlap_mentioned_p (recog_data.operand[opno],
4627 shifted_operand))
4628 return 2;
4633 /* XXX This is not strictly true for the FPA. */
4634 if (REG_NOTE_KIND (link) == REG_DEP_ANTI
4635 || REG_NOTE_KIND (link) == REG_DEP_OUTPUT)
4636 return 0;
4638 /* Call insns don't incur a stall, even if they follow a load. */
4639 if (REG_NOTE_KIND (link) == 0
4640 && GET_CODE (insn) == CALL_INSN)
4641 return 1;
4643 if ((i_pat = single_set (insn)) != NULL
4644 && GET_CODE (SET_SRC (i_pat)) == MEM
4645 && (d_pat = single_set (dep)) != NULL
4646 && GET_CODE (SET_DEST (d_pat)) == MEM)
4648 rtx src_mem = XEXP (SET_SRC (i_pat), 0);
4649 /* This is a load after a store, there is no conflict if the load reads
4650 from a cached area. Assume that loads from the stack, and from the
4651 constant pool are cached, and that others will miss. This is a
4652 hack. */
4654 if ((GET_CODE (src_mem) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (src_mem))
4655 || reg_mentioned_p (stack_pointer_rtx, src_mem)
4656 || reg_mentioned_p (frame_pointer_rtx, src_mem)
4657 || reg_mentioned_p (hard_frame_pointer_rtx, src_mem))
4658 return 1;
4661 return cost;
4664 static int fp_consts_inited = 0;
4666 /* Only zero is valid for VFP. Other values are also valid for FPA. */
4667 static const char * const strings_fp[8] =
4669 "0", "1", "2", "3",
4670 "4", "5", "0.5", "10"
4673 static REAL_VALUE_TYPE values_fp[8];
4675 static void
4676 init_fp_table (void)
4678 int i;
4679 REAL_VALUE_TYPE r;
4681 if (TARGET_VFP)
4682 fp_consts_inited = 1;
4683 else
4684 fp_consts_inited = 8;
4686 for (i = 0; i < fp_consts_inited; i++)
4688 r = REAL_VALUE_ATOF (strings_fp[i], DFmode);
4689 values_fp[i] = r;
4693 /* Return TRUE if rtx X is a valid immediate FP constant. */
4695 arm_const_double_rtx (rtx x)
4697 REAL_VALUE_TYPE r;
4698 int i;
4700 if (!fp_consts_inited)
4701 init_fp_table ();
4703 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
4704 if (REAL_VALUE_MINUS_ZERO (r))
4705 return 0;
4707 for (i = 0; i < fp_consts_inited; i++)
4708 if (REAL_VALUES_EQUAL (r, values_fp[i]))
4709 return 1;
4711 return 0;
4714 /* Return TRUE if rtx X is a valid immediate FPA constant. */
4716 neg_const_double_rtx_ok_for_fpa (rtx x)
4718 REAL_VALUE_TYPE r;
4719 int i;
4721 if (!fp_consts_inited)
4722 init_fp_table ();
4724 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
4725 r = REAL_VALUE_NEGATE (r);
4726 if (REAL_VALUE_MINUS_ZERO (r))
4727 return 0;
4729 for (i = 0; i < 8; i++)
4730 if (REAL_VALUES_EQUAL (r, values_fp[i]))
4731 return 1;
4733 return 0;
4736 /* Predicates for `match_operand' and `match_operator'. */
4738 /* Return nonzero if OP is a valid Cirrus memory address pattern. */
4740 cirrus_memory_offset (rtx op)
4742 /* Reject eliminable registers. */
4743 if (! (reload_in_progress || reload_completed)
4744 && ( reg_mentioned_p (frame_pointer_rtx, op)
4745 || reg_mentioned_p (arg_pointer_rtx, op)
4746 || reg_mentioned_p (virtual_incoming_args_rtx, op)
4747 || reg_mentioned_p (virtual_outgoing_args_rtx, op)
4748 || reg_mentioned_p (virtual_stack_dynamic_rtx, op)
4749 || reg_mentioned_p (virtual_stack_vars_rtx, op)))
4750 return 0;
4752 if (GET_CODE (op) == MEM)
4754 rtx ind;
4756 ind = XEXP (op, 0);
4758 /* Match: (mem (reg)). */
4759 if (GET_CODE (ind) == REG)
4760 return 1;
4762 /* Match:
4763 (mem (plus (reg)
4764 (const))). */
4765 if (GET_CODE (ind) == PLUS
4766 && GET_CODE (XEXP (ind, 0)) == REG
4767 && REG_MODE_OK_FOR_BASE_P (XEXP (ind, 0), VOIDmode)
4768 && GET_CODE (XEXP (ind, 1)) == CONST_INT)
4769 return 1;
4772 return 0;
4775 /* Return TRUE if OP is a valid VFP memory address pattern.
4776 WB if true if writeback address modes are allowed. */
4779 arm_coproc_mem_operand (rtx op, bool wb)
4781 rtx ind;
4783 /* Reject eliminable registers. */
4784 if (! (reload_in_progress || reload_completed)
4785 && ( reg_mentioned_p (frame_pointer_rtx, op)
4786 || reg_mentioned_p (arg_pointer_rtx, op)
4787 || reg_mentioned_p (virtual_incoming_args_rtx, op)
4788 || reg_mentioned_p (virtual_outgoing_args_rtx, op)
4789 || reg_mentioned_p (virtual_stack_dynamic_rtx, op)
4790 || reg_mentioned_p (virtual_stack_vars_rtx, op)))
4791 return FALSE;
4793 /* Constants are converted into offsets from labels. */
4794 if (GET_CODE (op) != MEM)
4795 return FALSE;
4797 ind = XEXP (op, 0);
4799 if (reload_completed
4800 && (GET_CODE (ind) == LABEL_REF
4801 || (GET_CODE (ind) == CONST
4802 && GET_CODE (XEXP (ind, 0)) == PLUS
4803 && GET_CODE (XEXP (XEXP (ind, 0), 0)) == LABEL_REF
4804 && GET_CODE (XEXP (XEXP (ind, 0), 1)) == CONST_INT)))
4805 return TRUE;
4807 /* Match: (mem (reg)). */
4808 if (GET_CODE (ind) == REG)
4809 return arm_address_register_rtx_p (ind, 0);
4811 /* Autoincremment addressing modes. */
4812 if (wb
4813 && (GET_CODE (ind) == PRE_INC
4814 || GET_CODE (ind) == POST_INC
4815 || GET_CODE (ind) == PRE_DEC
4816 || GET_CODE (ind) == POST_DEC))
4817 return arm_address_register_rtx_p (XEXP (ind, 0), 0);
4819 if (wb
4820 && (GET_CODE (ind) == POST_MODIFY || GET_CODE (ind) == PRE_MODIFY)
4821 && arm_address_register_rtx_p (XEXP (ind, 0), 0)
4822 && GET_CODE (XEXP (ind, 1)) == PLUS
4823 && rtx_equal_p (XEXP (XEXP (ind, 1), 0), XEXP (ind, 0)))
4824 ind = XEXP (ind, 1);
4826 /* Match:
4827 (plus (reg)
4828 (const)). */
4829 if (GET_CODE (ind) == PLUS
4830 && GET_CODE (XEXP (ind, 0)) == REG
4831 && REG_MODE_OK_FOR_BASE_P (XEXP (ind, 0), VOIDmode)
4832 && GET_CODE (XEXP (ind, 1)) == CONST_INT
4833 && INTVAL (XEXP (ind, 1)) > -1024
4834 && INTVAL (XEXP (ind, 1)) < 1024
4835 && (INTVAL (XEXP (ind, 1)) & 3) == 0)
4836 return TRUE;
4838 return FALSE;
4841 /* Return true if X is a register that will be eliminated later on. */
4843 arm_eliminable_register (rtx x)
4845 return REG_P (x) && (REGNO (x) == FRAME_POINTER_REGNUM
4846 || REGNO (x) == ARG_POINTER_REGNUM
4847 || (REGNO (x) >= FIRST_VIRTUAL_REGISTER
4848 && REGNO (x) <= LAST_VIRTUAL_REGISTER));
4851 /* Return GENERAL_REGS if a scratch register required to reload x to/from
4852 VFP registers. Otherwise return NO_REGS. */
4854 enum reg_class
4855 vfp_secondary_reload_class (enum machine_mode mode, rtx x)
4857 if (arm_coproc_mem_operand (x, FALSE) || s_register_operand (x, mode))
4858 return NO_REGS;
4860 return GENERAL_REGS;
4864 /* Returns TRUE if INSN is an "LDR REG, ADDR" instruction.
4865 Use by the Cirrus Maverick code which has to workaround
4866 a hardware bug triggered by such instructions. */
4867 static bool
4868 arm_memory_load_p (rtx insn)
4870 rtx body, lhs, rhs;;
4872 if (insn == NULL_RTX || GET_CODE (insn) != INSN)
4873 return false;
4875 body = PATTERN (insn);
4877 if (GET_CODE (body) != SET)
4878 return false;
4880 lhs = XEXP (body, 0);
4881 rhs = XEXP (body, 1);
4883 lhs = REG_OR_SUBREG_RTX (lhs);
4885 /* If the destination is not a general purpose
4886 register we do not have to worry. */
4887 if (GET_CODE (lhs) != REG
4888 || REGNO_REG_CLASS (REGNO (lhs)) != GENERAL_REGS)
4889 return false;
4891 /* As well as loads from memory we also have to react
4892 to loads of invalid constants which will be turned
4893 into loads from the minipool. */
4894 return (GET_CODE (rhs) == MEM
4895 || GET_CODE (rhs) == SYMBOL_REF
4896 || note_invalid_constants (insn, -1, false));
4899 /* Return TRUE if INSN is a Cirrus instruction. */
4900 static bool
4901 arm_cirrus_insn_p (rtx insn)
4903 enum attr_cirrus attr;
4905 /* get_attr aborts on USE and CLOBBER. */
4906 if (!insn
4907 || GET_CODE (insn) != INSN
4908 || GET_CODE (PATTERN (insn)) == USE
4909 || GET_CODE (PATTERN (insn)) == CLOBBER)
4910 return 0;
4912 attr = get_attr_cirrus (insn);
4914 return attr != CIRRUS_NOT;
4917 /* Cirrus reorg for invalid instruction combinations. */
4918 static void
4919 cirrus_reorg (rtx first)
4921 enum attr_cirrus attr;
4922 rtx body = PATTERN (first);
4923 rtx t;
4924 int nops;
4926 /* Any branch must be followed by 2 non Cirrus instructions. */
4927 if (GET_CODE (first) == JUMP_INSN && GET_CODE (body) != RETURN)
4929 nops = 0;
4930 t = next_nonnote_insn (first);
4932 if (arm_cirrus_insn_p (t))
4933 ++ nops;
4935 if (arm_cirrus_insn_p (next_nonnote_insn (t)))
4936 ++ nops;
4938 while (nops --)
4939 emit_insn_after (gen_nop (), first);
4941 return;
4944 /* (float (blah)) is in parallel with a clobber. */
4945 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) > 0)
4946 body = XVECEXP (body, 0, 0);
4948 if (GET_CODE (body) == SET)
4950 rtx lhs = XEXP (body, 0), rhs = XEXP (body, 1);
4952 /* cfldrd, cfldr64, cfstrd, cfstr64 must
4953 be followed by a non Cirrus insn. */
4954 if (get_attr_cirrus (first) == CIRRUS_DOUBLE)
4956 if (arm_cirrus_insn_p (next_nonnote_insn (first)))
4957 emit_insn_after (gen_nop (), first);
4959 return;
4961 else if (arm_memory_load_p (first))
4963 unsigned int arm_regno;
4965 /* Any ldr/cfmvdlr, ldr/cfmvdhr, ldr/cfmvsr, ldr/cfmv64lr,
4966 ldr/cfmv64hr combination where the Rd field is the same
4967 in both instructions must be split with a non Cirrus
4968 insn. Example:
4970 ldr r0, blah
4972 cfmvsr mvf0, r0. */
4974 /* Get Arm register number for ldr insn. */
4975 if (GET_CODE (lhs) == REG)
4976 arm_regno = REGNO (lhs);
4977 else if (GET_CODE (rhs) == REG)
4978 arm_regno = REGNO (rhs);
4979 else
4980 abort ();
4982 /* Next insn. */
4983 first = next_nonnote_insn (first);
4985 if (! arm_cirrus_insn_p (first))
4986 return;
4988 body = PATTERN (first);
4990 /* (float (blah)) is in parallel with a clobber. */
4991 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0))
4992 body = XVECEXP (body, 0, 0);
4994 if (GET_CODE (body) == FLOAT)
4995 body = XEXP (body, 0);
4997 if (get_attr_cirrus (first) == CIRRUS_MOVE
4998 && GET_CODE (XEXP (body, 1)) == REG
4999 && arm_regno == REGNO (XEXP (body, 1)))
5000 emit_insn_after (gen_nop (), first);
5002 return;
5006 /* get_attr aborts on USE and CLOBBER. */
5007 if (!first
5008 || GET_CODE (first) != INSN
5009 || GET_CODE (PATTERN (first)) == USE
5010 || GET_CODE (PATTERN (first)) == CLOBBER)
5011 return;
5013 attr = get_attr_cirrus (first);
5015 /* Any coprocessor compare instruction (cfcmps, cfcmpd, ...)
5016 must be followed by a non-coprocessor instruction. */
5017 if (attr == CIRRUS_COMPARE)
5019 nops = 0;
5021 t = next_nonnote_insn (first);
5023 if (arm_cirrus_insn_p (t))
5024 ++ nops;
5026 if (arm_cirrus_insn_p (next_nonnote_insn (t)))
5027 ++ nops;
5029 while (nops --)
5030 emit_insn_after (gen_nop (), first);
5032 return;
5036 /* Return TRUE if X references a SYMBOL_REF. */
5038 symbol_mentioned_p (rtx x)
5040 const char * fmt;
5041 int i;
5043 if (GET_CODE (x) == SYMBOL_REF)
5044 return 1;
5046 fmt = GET_RTX_FORMAT (GET_CODE (x));
5048 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5050 if (fmt[i] == 'E')
5052 int j;
5054 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5055 if (symbol_mentioned_p (XVECEXP (x, i, j)))
5056 return 1;
5058 else if (fmt[i] == 'e' && symbol_mentioned_p (XEXP (x, i)))
5059 return 1;
5062 return 0;
5065 /* Return TRUE if X references a LABEL_REF. */
5067 label_mentioned_p (rtx x)
5069 const char * fmt;
5070 int i;
5072 if (GET_CODE (x) == LABEL_REF)
5073 return 1;
5075 fmt = GET_RTX_FORMAT (GET_CODE (x));
5076 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5078 if (fmt[i] == 'E')
5080 int j;
5082 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5083 if (label_mentioned_p (XVECEXP (x, i, j)))
5084 return 1;
5086 else if (fmt[i] == 'e' && label_mentioned_p (XEXP (x, i)))
5087 return 1;
5090 return 0;
5093 enum rtx_code
5094 minmax_code (rtx x)
5096 enum rtx_code code = GET_CODE (x);
5098 if (code == SMAX)
5099 return GE;
5100 else if (code == SMIN)
5101 return LE;
5102 else if (code == UMIN)
5103 return LEU;
5104 else if (code == UMAX)
5105 return GEU;
5107 abort ();
5110 /* Return 1 if memory locations are adjacent. */
5112 adjacent_mem_locations (rtx a, rtx b)
5114 if ((GET_CODE (XEXP (a, 0)) == REG
5115 || (GET_CODE (XEXP (a, 0)) == PLUS
5116 && GET_CODE (XEXP (XEXP (a, 0), 1)) == CONST_INT))
5117 && (GET_CODE (XEXP (b, 0)) == REG
5118 || (GET_CODE (XEXP (b, 0)) == PLUS
5119 && GET_CODE (XEXP (XEXP (b, 0), 1)) == CONST_INT)))
5121 HOST_WIDE_INT val0 = 0, val1 = 0;
5122 rtx reg0, reg1;
5123 int val_diff;
5125 if (GET_CODE (XEXP (a, 0)) == PLUS)
5127 reg0 = XEXP (XEXP (a, 0), 0);
5128 val0 = INTVAL (XEXP (XEXP (a, 0), 1));
5130 else
5131 reg0 = XEXP (a, 0);
5133 if (GET_CODE (XEXP (b, 0)) == PLUS)
5135 reg1 = XEXP (XEXP (b, 0), 0);
5136 val1 = INTVAL (XEXP (XEXP (b, 0), 1));
5138 else
5139 reg1 = XEXP (b, 0);
5141 /* Don't accept any offset that will require multiple
5142 instructions to handle, since this would cause the
5143 arith_adjacentmem pattern to output an overlong sequence. */
5144 if (!const_ok_for_op (PLUS, val0) || !const_ok_for_op (PLUS, val1))
5145 return 0;
5147 /* Don't allow an eliminable register: register elimination can make
5148 the offset too large. */
5149 if (arm_eliminable_register (reg0))
5150 return 0;
5152 val_diff = val1 - val0;
5153 return ((REGNO (reg0) == REGNO (reg1))
5154 && (val_diff == 4 || val_diff == -4));
5157 return 0;
5161 load_multiple_sequence (rtx *operands, int nops, int *regs, int *base,
5162 HOST_WIDE_INT *load_offset)
5164 int unsorted_regs[4];
5165 HOST_WIDE_INT unsorted_offsets[4];
5166 int order[4];
5167 int base_reg = -1;
5168 int i;
5170 /* Can only handle 2, 3, or 4 insns at present,
5171 though could be easily extended if required. */
5172 if (nops < 2 || nops > 4)
5173 abort ();
5175 /* Loop over the operands and check that the memory references are
5176 suitable (i.e. immediate offsets from the same base register). At
5177 the same time, extract the target register, and the memory
5178 offsets. */
5179 for (i = 0; i < nops; i++)
5181 rtx reg;
5182 rtx offset;
5184 /* Convert a subreg of a mem into the mem itself. */
5185 if (GET_CODE (operands[nops + i]) == SUBREG)
5186 operands[nops + i] = alter_subreg (operands + (nops + i));
5188 if (GET_CODE (operands[nops + i]) != MEM)
5189 abort ();
5191 /* Don't reorder volatile memory references; it doesn't seem worth
5192 looking for the case where the order is ok anyway. */
5193 if (MEM_VOLATILE_P (operands[nops + i]))
5194 return 0;
5196 offset = const0_rtx;
5198 if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG
5199 || (GET_CODE (reg) == SUBREG
5200 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5201 || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
5202 && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0))
5203 == REG)
5204 || (GET_CODE (reg) == SUBREG
5205 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5206 && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1))
5207 == CONST_INT)))
5209 if (i == 0)
5211 base_reg = REGNO (reg);
5212 unsorted_regs[0] = (GET_CODE (operands[i]) == REG
5213 ? REGNO (operands[i])
5214 : REGNO (SUBREG_REG (operands[i])));
5215 order[0] = 0;
5217 else
5219 if (base_reg != (int) REGNO (reg))
5220 /* Not addressed from the same base register. */
5221 return 0;
5223 unsorted_regs[i] = (GET_CODE (operands[i]) == REG
5224 ? REGNO (operands[i])
5225 : REGNO (SUBREG_REG (operands[i])));
5226 if (unsorted_regs[i] < unsorted_regs[order[0]])
5227 order[0] = i;
5230 /* If it isn't an integer register, or if it overwrites the
5231 base register but isn't the last insn in the list, then
5232 we can't do this. */
5233 if (unsorted_regs[i] < 0 || unsorted_regs[i] > 14
5234 || (i != nops - 1 && unsorted_regs[i] == base_reg))
5235 return 0;
5237 unsorted_offsets[i] = INTVAL (offset);
5239 else
5240 /* Not a suitable memory address. */
5241 return 0;
5244 /* All the useful information has now been extracted from the
5245 operands into unsorted_regs and unsorted_offsets; additionally,
5246 order[0] has been set to the lowest numbered register in the
5247 list. Sort the registers into order, and check that the memory
5248 offsets are ascending and adjacent. */
5250 for (i = 1; i < nops; i++)
5252 int j;
5254 order[i] = order[i - 1];
5255 for (j = 0; j < nops; j++)
5256 if (unsorted_regs[j] > unsorted_regs[order[i - 1]]
5257 && (order[i] == order[i - 1]
5258 || unsorted_regs[j] < unsorted_regs[order[i]]))
5259 order[i] = j;
5261 /* Have we found a suitable register? if not, one must be used more
5262 than once. */
5263 if (order[i] == order[i - 1])
5264 return 0;
5266 /* Is the memory address adjacent and ascending? */
5267 if (unsorted_offsets[order[i]] != unsorted_offsets[order[i - 1]] + 4)
5268 return 0;
5271 if (base)
5273 *base = base_reg;
5275 for (i = 0; i < nops; i++)
5276 regs[i] = unsorted_regs[order[i]];
5278 *load_offset = unsorted_offsets[order[0]];
5281 if (unsorted_offsets[order[0]] == 0)
5282 return 1; /* ldmia */
5284 if (unsorted_offsets[order[0]] == 4)
5285 return 2; /* ldmib */
5287 if (unsorted_offsets[order[nops - 1]] == 0)
5288 return 3; /* ldmda */
5290 if (unsorted_offsets[order[nops - 1]] == -4)
5291 return 4; /* ldmdb */
5293 /* For ARM8,9 & StrongARM, 2 ldr instructions are faster than an ldm
5294 if the offset isn't small enough. The reason 2 ldrs are faster
5295 is because these ARMs are able to do more than one cache access
5296 in a single cycle. The ARM9 and StrongARM have Harvard caches,
5297 whilst the ARM8 has a double bandwidth cache. This means that
5298 these cores can do both an instruction fetch and a data fetch in
5299 a single cycle, so the trick of calculating the address into a
5300 scratch register (one of the result regs) and then doing a load
5301 multiple actually becomes slower (and no smaller in code size).
5302 That is the transformation
5304 ldr rd1, [rbase + offset]
5305 ldr rd2, [rbase + offset + 4]
5309 add rd1, rbase, offset
5310 ldmia rd1, {rd1, rd2}
5312 produces worse code -- '3 cycles + any stalls on rd2' instead of
5313 '2 cycles + any stalls on rd2'. On ARMs with only one cache
5314 access per cycle, the first sequence could never complete in less
5315 than 6 cycles, whereas the ldm sequence would only take 5 and
5316 would make better use of sequential accesses if not hitting the
5317 cache.
5319 We cheat here and test 'arm_ld_sched' which we currently know to
5320 only be true for the ARM8, ARM9 and StrongARM. If this ever
5321 changes, then the test below needs to be reworked. */
5322 if (nops == 2 && arm_ld_sched)
5323 return 0;
5325 /* Can't do it without setting up the offset, only do this if it takes
5326 no more than one insn. */
5327 return (const_ok_for_arm (unsorted_offsets[order[0]])
5328 || const_ok_for_arm (-unsorted_offsets[order[0]])) ? 5 : 0;
5331 const char *
5332 emit_ldm_seq (rtx *operands, int nops)
5334 int regs[4];
5335 int base_reg;
5336 HOST_WIDE_INT offset;
5337 char buf[100];
5338 int i;
5340 switch (load_multiple_sequence (operands, nops, regs, &base_reg, &offset))
5342 case 1:
5343 strcpy (buf, "ldm%?ia\t");
5344 break;
5346 case 2:
5347 strcpy (buf, "ldm%?ib\t");
5348 break;
5350 case 3:
5351 strcpy (buf, "ldm%?da\t");
5352 break;
5354 case 4:
5355 strcpy (buf, "ldm%?db\t");
5356 break;
5358 case 5:
5359 if (offset >= 0)
5360 sprintf (buf, "add%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX,
5361 reg_names[regs[0]], REGISTER_PREFIX, reg_names[base_reg],
5362 (long) offset);
5363 else
5364 sprintf (buf, "sub%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX,
5365 reg_names[regs[0]], REGISTER_PREFIX, reg_names[base_reg],
5366 (long) -offset);
5367 output_asm_insn (buf, operands);
5368 base_reg = regs[0];
5369 strcpy (buf, "ldm%?ia\t");
5370 break;
5372 default:
5373 abort ();
5376 sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
5377 reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
5379 for (i = 1; i < nops; i++)
5380 sprintf (buf + strlen (buf), ", %s%s", REGISTER_PREFIX,
5381 reg_names[regs[i]]);
5383 strcat (buf, "}\t%@ phole ldm");
5385 output_asm_insn (buf, operands);
5386 return "";
5390 store_multiple_sequence (rtx *operands, int nops, int *regs, int *base,
5391 HOST_WIDE_INT * load_offset)
5393 int unsorted_regs[4];
5394 HOST_WIDE_INT unsorted_offsets[4];
5395 int order[4];
5396 int base_reg = -1;
5397 int i;
5399 /* Can only handle 2, 3, or 4 insns at present, though could be easily
5400 extended if required. */
5401 if (nops < 2 || nops > 4)
5402 abort ();
5404 /* Loop over the operands and check that the memory references are
5405 suitable (i.e. immediate offsets from the same base register). At
5406 the same time, extract the target register, and the memory
5407 offsets. */
5408 for (i = 0; i < nops; i++)
5410 rtx reg;
5411 rtx offset;
5413 /* Convert a subreg of a mem into the mem itself. */
5414 if (GET_CODE (operands[nops + i]) == SUBREG)
5415 operands[nops + i] = alter_subreg (operands + (nops + i));
5417 if (GET_CODE (operands[nops + i]) != MEM)
5418 abort ();
5420 /* Don't reorder volatile memory references; it doesn't seem worth
5421 looking for the case where the order is ok anyway. */
5422 if (MEM_VOLATILE_P (operands[nops + i]))
5423 return 0;
5425 offset = const0_rtx;
5427 if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG
5428 || (GET_CODE (reg) == SUBREG
5429 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5430 || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
5431 && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0))
5432 == REG)
5433 || (GET_CODE (reg) == SUBREG
5434 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5435 && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1))
5436 == CONST_INT)))
5438 if (i == 0)
5440 base_reg = REGNO (reg);
5441 unsorted_regs[0] = (GET_CODE (operands[i]) == REG
5442 ? REGNO (operands[i])
5443 : REGNO (SUBREG_REG (operands[i])));
5444 order[0] = 0;
5446 else
5448 if (base_reg != (int) REGNO (reg))
5449 /* Not addressed from the same base register. */
5450 return 0;
5452 unsorted_regs[i] = (GET_CODE (operands[i]) == REG
5453 ? REGNO (operands[i])
5454 : REGNO (SUBREG_REG (operands[i])));
5455 if (unsorted_regs[i] < unsorted_regs[order[0]])
5456 order[0] = i;
5459 /* If it isn't an integer register, then we can't do this. */
5460 if (unsorted_regs[i] < 0 || unsorted_regs[i] > 14)
5461 return 0;
5463 unsorted_offsets[i] = INTVAL (offset);
5465 else
5466 /* Not a suitable memory address. */
5467 return 0;
5470 /* All the useful information has now been extracted from the
5471 operands into unsorted_regs and unsorted_offsets; additionally,
5472 order[0] has been set to the lowest numbered register in the
5473 list. Sort the registers into order, and check that the memory
5474 offsets are ascending and adjacent. */
5476 for (i = 1; i < nops; i++)
5478 int j;
5480 order[i] = order[i - 1];
5481 for (j = 0; j < nops; j++)
5482 if (unsorted_regs[j] > unsorted_regs[order[i - 1]]
5483 && (order[i] == order[i - 1]
5484 || unsorted_regs[j] < unsorted_regs[order[i]]))
5485 order[i] = j;
5487 /* Have we found a suitable register? if not, one must be used more
5488 than once. */
5489 if (order[i] == order[i - 1])
5490 return 0;
5492 /* Is the memory address adjacent and ascending? */
5493 if (unsorted_offsets[order[i]] != unsorted_offsets[order[i - 1]] + 4)
5494 return 0;
5497 if (base)
5499 *base = base_reg;
5501 for (i = 0; i < nops; i++)
5502 regs[i] = unsorted_regs[order[i]];
5504 *load_offset = unsorted_offsets[order[0]];
5507 if (unsorted_offsets[order[0]] == 0)
5508 return 1; /* stmia */
5510 if (unsorted_offsets[order[0]] == 4)
5511 return 2; /* stmib */
5513 if (unsorted_offsets[order[nops - 1]] == 0)
5514 return 3; /* stmda */
5516 if (unsorted_offsets[order[nops - 1]] == -4)
5517 return 4; /* stmdb */
5519 return 0;
5522 const char *
5523 emit_stm_seq (rtx *operands, int nops)
5525 int regs[4];
5526 int base_reg;
5527 HOST_WIDE_INT offset;
5528 char buf[100];
5529 int i;
5531 switch (store_multiple_sequence (operands, nops, regs, &base_reg, &offset))
5533 case 1:
5534 strcpy (buf, "stm%?ia\t");
5535 break;
5537 case 2:
5538 strcpy (buf, "stm%?ib\t");
5539 break;
5541 case 3:
5542 strcpy (buf, "stm%?da\t");
5543 break;
5545 case 4:
5546 strcpy (buf, "stm%?db\t");
5547 break;
5549 default:
5550 abort ();
5553 sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
5554 reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
5556 for (i = 1; i < nops; i++)
5557 sprintf (buf + strlen (buf), ", %s%s", REGISTER_PREFIX,
5558 reg_names[regs[i]]);
5560 strcat (buf, "}\t%@ phole stm");
5562 output_asm_insn (buf, operands);
5563 return "";
5567 /* Routines for use in generating RTL. */
5570 arm_gen_load_multiple (int base_regno, int count, rtx from, int up,
5571 int write_back, rtx basemem, HOST_WIDE_INT *offsetp)
5573 HOST_WIDE_INT offset = *offsetp;
5574 int i = 0, j;
5575 rtx result;
5576 int sign = up ? 1 : -1;
5577 rtx mem, addr;
5579 /* XScale has load-store double instructions, but they have stricter
5580 alignment requirements than load-store multiple, so we cannot
5581 use them.
5583 For XScale ldm requires 2 + NREGS cycles to complete and blocks
5584 the pipeline until completion.
5586 NREGS CYCLES
5592 An ldr instruction takes 1-3 cycles, but does not block the
5593 pipeline.
5595 NREGS CYCLES
5596 1 1-3
5597 2 2-6
5598 3 3-9
5599 4 4-12
5601 Best case ldr will always win. However, the more ldr instructions
5602 we issue, the less likely we are to be able to schedule them well.
5603 Using ldr instructions also increases code size.
5605 As a compromise, we use ldr for counts of 1 or 2 regs, and ldm
5606 for counts of 3 or 4 regs. */
5607 if (arm_tune_xscale && count <= 2 && ! optimize_size)
5609 rtx seq;
5611 start_sequence ();
5613 for (i = 0; i < count; i++)
5615 addr = plus_constant (from, i * 4 * sign);
5616 mem = adjust_automodify_address (basemem, SImode, addr, offset);
5617 emit_move_insn (gen_rtx_REG (SImode, base_regno + i), mem);
5618 offset += 4 * sign;
5621 if (write_back)
5623 emit_move_insn (from, plus_constant (from, count * 4 * sign));
5624 *offsetp = offset;
5627 seq = get_insns ();
5628 end_sequence ();
5630 return seq;
5633 result = gen_rtx_PARALLEL (VOIDmode,
5634 rtvec_alloc (count + (write_back ? 1 : 0)));
5635 if (write_back)
5637 XVECEXP (result, 0, 0)
5638 = gen_rtx_SET (GET_MODE (from), from,
5639 plus_constant (from, count * 4 * sign));
5640 i = 1;
5641 count++;
5644 for (j = 0; i < count; i++, j++)
5646 addr = plus_constant (from, j * 4 * sign);
5647 mem = adjust_automodify_address_nv (basemem, SImode, addr, offset);
5648 XVECEXP (result, 0, i)
5649 = gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, base_regno + j), mem);
5650 offset += 4 * sign;
5653 if (write_back)
5654 *offsetp = offset;
5656 return result;
5660 arm_gen_store_multiple (int base_regno, int count, rtx to, int up,
5661 int write_back, rtx basemem, HOST_WIDE_INT *offsetp)
5663 HOST_WIDE_INT offset = *offsetp;
5664 int i = 0, j;
5665 rtx result;
5666 int sign = up ? 1 : -1;
5667 rtx mem, addr;
5669 /* See arm_gen_load_multiple for discussion of
5670 the pros/cons of ldm/stm usage for XScale. */
5671 if (arm_tune_xscale && count <= 2 && ! optimize_size)
5673 rtx seq;
5675 start_sequence ();
5677 for (i = 0; i < count; i++)
5679 addr = plus_constant (to, i * 4 * sign);
5680 mem = adjust_automodify_address (basemem, SImode, addr, offset);
5681 emit_move_insn (mem, gen_rtx_REG (SImode, base_regno + i));
5682 offset += 4 * sign;
5685 if (write_back)
5687 emit_move_insn (to, plus_constant (to, count * 4 * sign));
5688 *offsetp = offset;
5691 seq = get_insns ();
5692 end_sequence ();
5694 return seq;
5697 result = gen_rtx_PARALLEL (VOIDmode,
5698 rtvec_alloc (count + (write_back ? 1 : 0)));
5699 if (write_back)
5701 XVECEXP (result, 0, 0)
5702 = gen_rtx_SET (GET_MODE (to), to,
5703 plus_constant (to, count * 4 * sign));
5704 i = 1;
5705 count++;
5708 for (j = 0; i < count; i++, j++)
5710 addr = plus_constant (to, j * 4 * sign);
5711 mem = adjust_automodify_address_nv (basemem, SImode, addr, offset);
5712 XVECEXP (result, 0, i)
5713 = gen_rtx_SET (VOIDmode, mem, gen_rtx_REG (SImode, base_regno + j));
5714 offset += 4 * sign;
5717 if (write_back)
5718 *offsetp = offset;
5720 return result;
5724 arm_gen_movmemqi (rtx *operands)
5726 HOST_WIDE_INT in_words_to_go, out_words_to_go, last_bytes;
5727 HOST_WIDE_INT srcoffset, dstoffset;
5728 int i;
5729 rtx src, dst, srcbase, dstbase;
5730 rtx part_bytes_reg = NULL;
5731 rtx mem;
5733 if (GET_CODE (operands[2]) != CONST_INT
5734 || GET_CODE (operands[3]) != CONST_INT
5735 || INTVAL (operands[2]) > 64
5736 || INTVAL (operands[3]) & 3)
5737 return 0;
5739 dstbase = operands[0];
5740 srcbase = operands[1];
5742 dst = copy_to_mode_reg (SImode, XEXP (dstbase, 0));
5743 src = copy_to_mode_reg (SImode, XEXP (srcbase, 0));
5745 in_words_to_go = ARM_NUM_INTS (INTVAL (operands[2]));
5746 out_words_to_go = INTVAL (operands[2]) / 4;
5747 last_bytes = INTVAL (operands[2]) & 3;
5748 dstoffset = srcoffset = 0;
5750 if (out_words_to_go != in_words_to_go && ((in_words_to_go - 1) & 3) != 0)
5751 part_bytes_reg = gen_rtx_REG (SImode, (in_words_to_go - 1) & 3);
5753 for (i = 0; in_words_to_go >= 2; i+=4)
5755 if (in_words_to_go > 4)
5756 emit_insn (arm_gen_load_multiple (0, 4, src, TRUE, TRUE,
5757 srcbase, &srcoffset));
5758 else
5759 emit_insn (arm_gen_load_multiple (0, in_words_to_go, src, TRUE,
5760 FALSE, srcbase, &srcoffset));
5762 if (out_words_to_go)
5764 if (out_words_to_go > 4)
5765 emit_insn (arm_gen_store_multiple (0, 4, dst, TRUE, TRUE,
5766 dstbase, &dstoffset));
5767 else if (out_words_to_go != 1)
5768 emit_insn (arm_gen_store_multiple (0, out_words_to_go,
5769 dst, TRUE,
5770 (last_bytes == 0
5771 ? FALSE : TRUE),
5772 dstbase, &dstoffset));
5773 else
5775 mem = adjust_automodify_address (dstbase, SImode, dst, dstoffset);
5776 emit_move_insn (mem, gen_rtx_REG (SImode, 0));
5777 if (last_bytes != 0)
5779 emit_insn (gen_addsi3 (dst, dst, GEN_INT (4)));
5780 dstoffset += 4;
5785 in_words_to_go -= in_words_to_go < 4 ? in_words_to_go : 4;
5786 out_words_to_go -= out_words_to_go < 4 ? out_words_to_go : 4;
5789 /* OUT_WORDS_TO_GO will be zero here if there are byte stores to do. */
5790 if (out_words_to_go)
5792 rtx sreg;
5794 mem = adjust_automodify_address (srcbase, SImode, src, srcoffset);
5795 sreg = copy_to_reg (mem);
5797 mem = adjust_automodify_address (dstbase, SImode, dst, dstoffset);
5798 emit_move_insn (mem, sreg);
5799 in_words_to_go--;
5801 if (in_words_to_go) /* Sanity check */
5802 abort ();
5805 if (in_words_to_go)
5807 if (in_words_to_go < 0)
5808 abort ();
5810 mem = adjust_automodify_address (srcbase, SImode, src, srcoffset);
5811 part_bytes_reg = copy_to_mode_reg (SImode, mem);
5814 if (last_bytes && part_bytes_reg == NULL)
5815 abort ();
5817 if (BYTES_BIG_ENDIAN && last_bytes)
5819 rtx tmp = gen_reg_rtx (SImode);
5821 /* The bytes we want are in the top end of the word. */
5822 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg,
5823 GEN_INT (8 * (4 - last_bytes))));
5824 part_bytes_reg = tmp;
5826 while (last_bytes)
5828 mem = adjust_automodify_address (dstbase, QImode,
5829 plus_constant (dst, last_bytes - 1),
5830 dstoffset + last_bytes - 1);
5831 emit_move_insn (mem, gen_lowpart (QImode, part_bytes_reg));
5833 if (--last_bytes)
5835 tmp = gen_reg_rtx (SImode);
5836 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (8)));
5837 part_bytes_reg = tmp;
5842 else
5844 if (last_bytes > 1)
5846 mem = adjust_automodify_address (dstbase, HImode, dst, dstoffset);
5847 emit_move_insn (mem, gen_lowpart (HImode, part_bytes_reg));
5848 last_bytes -= 2;
5849 if (last_bytes)
5851 rtx tmp = gen_reg_rtx (SImode);
5852 emit_insn (gen_addsi3 (dst, dst, const2_rtx));
5853 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (16)));
5854 part_bytes_reg = tmp;
5855 dstoffset += 2;
5859 if (last_bytes)
5861 mem = adjust_automodify_address (dstbase, QImode, dst, dstoffset);
5862 emit_move_insn (mem, gen_lowpart (QImode, part_bytes_reg));
5866 return 1;
5869 /* Generate a memory reference for a half word, such that it will be loaded
5870 into the top 16 bits of the word. We can assume that the address is
5871 known to be alignable and of the form reg, or plus (reg, const). */
5874 arm_gen_rotated_half_load (rtx memref)
5876 HOST_WIDE_INT offset = 0;
5877 rtx base = XEXP (memref, 0);
5879 if (GET_CODE (base) == PLUS)
5881 offset = INTVAL (XEXP (base, 1));
5882 base = XEXP (base, 0);
5885 /* If we aren't allowed to generate unaligned addresses, then fail. */
5886 if ((BYTES_BIG_ENDIAN ? 1 : 0) ^ ((offset & 2) == 0))
5887 return NULL;
5889 base = gen_rtx_MEM (SImode, plus_constant (base, offset & ~2));
5891 if ((BYTES_BIG_ENDIAN ? 1 : 0) ^ ((offset & 2) == 2))
5892 return base;
5894 return gen_rtx_ROTATE (SImode, base, GEN_INT (16));
5897 /* Select a dominance comparison mode if possible for a test of the general
5898 form (OP (COND_OR (X) (Y)) (const_int 0)). We support three forms.
5899 COND_OR == DOM_CC_X_AND_Y => (X && Y)
5900 COND_OR == DOM_CC_NX_OR_Y => ((! X) || Y)
5901 COND_OR == DOM_CC_X_OR_Y => (X || Y)
5902 In all cases OP will be either EQ or NE, but we don't need to know which
5903 here. If we are unable to support a dominance comparison we return
5904 CC mode. This will then fail to match for the RTL expressions that
5905 generate this call. */
5906 enum machine_mode
5907 arm_select_dominance_cc_mode (rtx x, rtx y, HOST_WIDE_INT cond_or)
5909 enum rtx_code cond1, cond2;
5910 int swapped = 0;
5912 /* Currently we will probably get the wrong result if the individual
5913 comparisons are not simple. This also ensures that it is safe to
5914 reverse a comparison if necessary. */
5915 if ((arm_select_cc_mode (cond1 = GET_CODE (x), XEXP (x, 0), XEXP (x, 1))
5916 != CCmode)
5917 || (arm_select_cc_mode (cond2 = GET_CODE (y), XEXP (y, 0), XEXP (y, 1))
5918 != CCmode))
5919 return CCmode;
5921 /* The if_then_else variant of this tests the second condition if the
5922 first passes, but is true if the first fails. Reverse the first
5923 condition to get a true "inclusive-or" expression. */
5924 if (cond_or == DOM_CC_NX_OR_Y)
5925 cond1 = reverse_condition (cond1);
5927 /* If the comparisons are not equal, and one doesn't dominate the other,
5928 then we can't do this. */
5929 if (cond1 != cond2
5930 && !comparison_dominates_p (cond1, cond2)
5931 && (swapped = 1, !comparison_dominates_p (cond2, cond1)))
5932 return CCmode;
5934 if (swapped)
5936 enum rtx_code temp = cond1;
5937 cond1 = cond2;
5938 cond2 = temp;
5941 switch (cond1)
5943 case EQ:
5944 if (cond2 == EQ || cond_or == DOM_CC_X_AND_Y)
5945 return CC_DEQmode;
5947 switch (cond2)
5949 case LE: return CC_DLEmode;
5950 case LEU: return CC_DLEUmode;
5951 case GE: return CC_DGEmode;
5952 case GEU: return CC_DGEUmode;
5953 default: break;
5956 break;
5958 case LT:
5959 if (cond2 == LT || cond_or == DOM_CC_X_AND_Y)
5960 return CC_DLTmode;
5961 if (cond2 == LE)
5962 return CC_DLEmode;
5963 if (cond2 == NE)
5964 return CC_DNEmode;
5965 break;
5967 case GT:
5968 if (cond2 == GT || cond_or == DOM_CC_X_AND_Y)
5969 return CC_DGTmode;
5970 if (cond2 == GE)
5971 return CC_DGEmode;
5972 if (cond2 == NE)
5973 return CC_DNEmode;
5974 break;
5976 case LTU:
5977 if (cond2 == LTU || cond_or == DOM_CC_X_AND_Y)
5978 return CC_DLTUmode;
5979 if (cond2 == LEU)
5980 return CC_DLEUmode;
5981 if (cond2 == NE)
5982 return CC_DNEmode;
5983 break;
5985 case GTU:
5986 if (cond2 == GTU || cond_or == DOM_CC_X_AND_Y)
5987 return CC_DGTUmode;
5988 if (cond2 == GEU)
5989 return CC_DGEUmode;
5990 if (cond2 == NE)
5991 return CC_DNEmode;
5992 break;
5994 /* The remaining cases only occur when both comparisons are the
5995 same. */
5996 case NE:
5997 return CC_DNEmode;
5999 case LE:
6000 return CC_DLEmode;
6002 case GE:
6003 return CC_DGEmode;
6005 case LEU:
6006 return CC_DLEUmode;
6008 case GEU:
6009 return CC_DGEUmode;
6011 default:
6012 break;
6015 abort ();
6018 enum machine_mode
6019 arm_select_cc_mode (enum rtx_code op, rtx x, rtx y)
6021 /* All floating point compares return CCFP if it is an equality
6022 comparison, and CCFPE otherwise. */
6023 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
6025 switch (op)
6027 case EQ:
6028 case NE:
6029 case UNORDERED:
6030 case ORDERED:
6031 case UNLT:
6032 case UNLE:
6033 case UNGT:
6034 case UNGE:
6035 case UNEQ:
6036 case LTGT:
6037 return CCFPmode;
6039 case LT:
6040 case LE:
6041 case GT:
6042 case GE:
6043 if (TARGET_HARD_FLOAT && TARGET_MAVERICK)
6044 return CCFPmode;
6045 return CCFPEmode;
6047 default:
6048 abort ();
6052 /* A compare with a shifted operand. Because of canonicalization, the
6053 comparison will have to be swapped when we emit the assembler. */
6054 if (GET_MODE (y) == SImode && GET_CODE (y) == REG
6055 && (GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
6056 || GET_CODE (x) == LSHIFTRT || GET_CODE (x) == ROTATE
6057 || GET_CODE (x) == ROTATERT))
6058 return CC_SWPmode;
6060 /* This is a special case that is used by combine to allow a
6061 comparison of a shifted byte load to be split into a zero-extend
6062 followed by a comparison of the shifted integer (only valid for
6063 equalities and unsigned inequalities). */
6064 if (GET_MODE (x) == SImode
6065 && GET_CODE (x) == ASHIFT
6066 && GET_CODE (XEXP (x, 1)) == CONST_INT && INTVAL (XEXP (x, 1)) == 24
6067 && GET_CODE (XEXP (x, 0)) == SUBREG
6068 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == MEM
6069 && GET_MODE (SUBREG_REG (XEXP (x, 0))) == QImode
6070 && (op == EQ || op == NE
6071 || op == GEU || op == GTU || op == LTU || op == LEU)
6072 && GET_CODE (y) == CONST_INT)
6073 return CC_Zmode;
6075 /* A construct for a conditional compare, if the false arm contains
6076 0, then both conditions must be true, otherwise either condition
6077 must be true. Not all conditions are possible, so CCmode is
6078 returned if it can't be done. */
6079 if (GET_CODE (x) == IF_THEN_ELSE
6080 && (XEXP (x, 2) == const0_rtx
6081 || XEXP (x, 2) == const1_rtx)
6082 && COMPARISON_P (XEXP (x, 0))
6083 && COMPARISON_P (XEXP (x, 1)))
6084 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
6085 INTVAL (XEXP (x, 2)));
6087 /* Alternate canonicalizations of the above. These are somewhat cleaner. */
6088 if (GET_CODE (x) == AND
6089 && COMPARISON_P (XEXP (x, 0))
6090 && COMPARISON_P (XEXP (x, 1)))
6091 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
6092 DOM_CC_X_AND_Y);
6094 if (GET_CODE (x) == IOR
6095 && COMPARISON_P (XEXP (x, 0))
6096 && COMPARISON_P (XEXP (x, 1)))
6097 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
6098 DOM_CC_X_OR_Y);
6100 /* An operation (on Thumb) where we want to test for a single bit.
6101 This is done by shifting that bit up into the top bit of a
6102 scratch register; we can then branch on the sign bit. */
6103 if (TARGET_THUMB
6104 && GET_MODE (x) == SImode
6105 && (op == EQ || op == NE)
6106 && (GET_CODE (x) == ZERO_EXTRACT))
6107 return CC_Nmode;
6109 /* An operation that sets the condition codes as a side-effect, the
6110 V flag is not set correctly, so we can only use comparisons where
6111 this doesn't matter. (For LT and GE we can use "mi" and "pl"
6112 instead.) */
6113 if (GET_MODE (x) == SImode
6114 && y == const0_rtx
6115 && (op == EQ || op == NE || op == LT || op == GE)
6116 && (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
6117 || GET_CODE (x) == AND || GET_CODE (x) == IOR
6118 || GET_CODE (x) == XOR || GET_CODE (x) == MULT
6119 || GET_CODE (x) == NOT || GET_CODE (x) == NEG
6120 || GET_CODE (x) == LSHIFTRT
6121 || GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
6122 || GET_CODE (x) == ROTATERT
6123 || (TARGET_ARM && GET_CODE (x) == ZERO_EXTRACT)))
6124 return CC_NOOVmode;
6126 if (GET_MODE (x) == QImode && (op == EQ || op == NE))
6127 return CC_Zmode;
6129 if (GET_MODE (x) == SImode && (op == LTU || op == GEU)
6130 && GET_CODE (x) == PLUS
6131 && (rtx_equal_p (XEXP (x, 0), y) || rtx_equal_p (XEXP (x, 1), y)))
6132 return CC_Cmode;
6134 return CCmode;
6137 /* X and Y are two things to compare using CODE. Emit the compare insn and
6138 return the rtx for register 0 in the proper mode. FP means this is a
6139 floating point compare: I don't think that it is needed on the arm. */
6141 arm_gen_compare_reg (enum rtx_code code, rtx x, rtx y)
6143 enum machine_mode mode = SELECT_CC_MODE (code, x, y);
6144 rtx cc_reg = gen_rtx_REG (mode, CC_REGNUM);
6146 emit_insn (gen_rtx_SET (VOIDmode, cc_reg,
6147 gen_rtx_COMPARE (mode, x, y)));
6149 return cc_reg;
6152 /* Generate a sequence of insns that will generate the correct return
6153 address mask depending on the physical architecture that the program
6154 is running on. */
6156 arm_gen_return_addr_mask (void)
6158 rtx reg = gen_reg_rtx (Pmode);
6160 emit_insn (gen_return_addr_mask (reg));
6161 return reg;
6164 void
6165 arm_reload_in_hi (rtx *operands)
6167 rtx ref = operands[1];
6168 rtx base, scratch;
6169 HOST_WIDE_INT offset = 0;
6171 if (GET_CODE (ref) == SUBREG)
6173 offset = SUBREG_BYTE (ref);
6174 ref = SUBREG_REG (ref);
6177 if (GET_CODE (ref) == REG)
6179 /* We have a pseudo which has been spilt onto the stack; there
6180 are two cases here: the first where there is a simple
6181 stack-slot replacement and a second where the stack-slot is
6182 out of range, or is used as a subreg. */
6183 if (reg_equiv_mem[REGNO (ref)])
6185 ref = reg_equiv_mem[REGNO (ref)];
6186 base = find_replacement (&XEXP (ref, 0));
6188 else
6189 /* The slot is out of range, or was dressed up in a SUBREG. */
6190 base = reg_equiv_address[REGNO (ref)];
6192 else
6193 base = find_replacement (&XEXP (ref, 0));
6195 /* Handle the case where the address is too complex to be offset by 1. */
6196 if (GET_CODE (base) == MINUS
6197 || (GET_CODE (base) == PLUS && GET_CODE (XEXP (base, 1)) != CONST_INT))
6199 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6201 emit_insn (gen_rtx_SET (VOIDmode, base_plus, base));
6202 base = base_plus;
6204 else if (GET_CODE (base) == PLUS)
6206 /* The addend must be CONST_INT, or we would have dealt with it above. */
6207 HOST_WIDE_INT hi, lo;
6209 offset += INTVAL (XEXP (base, 1));
6210 base = XEXP (base, 0);
6212 /* Rework the address into a legal sequence of insns. */
6213 /* Valid range for lo is -4095 -> 4095 */
6214 lo = (offset >= 0
6215 ? (offset & 0xfff)
6216 : -((-offset) & 0xfff));
6218 /* Corner case, if lo is the max offset then we would be out of range
6219 once we have added the additional 1 below, so bump the msb into the
6220 pre-loading insn(s). */
6221 if (lo == 4095)
6222 lo &= 0x7ff;
6224 hi = ((((offset - lo) & (HOST_WIDE_INT) 0xffffffff)
6225 ^ (HOST_WIDE_INT) 0x80000000)
6226 - (HOST_WIDE_INT) 0x80000000);
6228 if (hi + lo != offset)
6229 abort ();
6231 if (hi != 0)
6233 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6235 /* Get the base address; addsi3 knows how to handle constants
6236 that require more than one insn. */
6237 emit_insn (gen_addsi3 (base_plus, base, GEN_INT (hi)));
6238 base = base_plus;
6239 offset = lo;
6243 /* Operands[2] may overlap operands[0] (though it won't overlap
6244 operands[1]), that's why we asked for a DImode reg -- so we can
6245 use the bit that does not overlap. */
6246 if (REGNO (operands[2]) == REGNO (operands[0]))
6247 scratch = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6248 else
6249 scratch = gen_rtx_REG (SImode, REGNO (operands[2]));
6251 emit_insn (gen_zero_extendqisi2 (scratch,
6252 gen_rtx_MEM (QImode,
6253 plus_constant (base,
6254 offset))));
6255 emit_insn (gen_zero_extendqisi2 (gen_rtx_SUBREG (SImode, operands[0], 0),
6256 gen_rtx_MEM (QImode,
6257 plus_constant (base,
6258 offset + 1))));
6259 if (!BYTES_BIG_ENDIAN)
6260 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_SUBREG (SImode, operands[0], 0),
6261 gen_rtx_IOR (SImode,
6262 gen_rtx_ASHIFT
6263 (SImode,
6264 gen_rtx_SUBREG (SImode, operands[0], 0),
6265 GEN_INT (8)),
6266 scratch)));
6267 else
6268 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_SUBREG (SImode, operands[0], 0),
6269 gen_rtx_IOR (SImode,
6270 gen_rtx_ASHIFT (SImode, scratch,
6271 GEN_INT (8)),
6272 gen_rtx_SUBREG (SImode, operands[0],
6273 0))));
6276 /* Handle storing a half-word to memory during reload by synthesizing as two
6277 byte stores. Take care not to clobber the input values until after we
6278 have moved them somewhere safe. This code assumes that if the DImode
6279 scratch in operands[2] overlaps either the input value or output address
6280 in some way, then that value must die in this insn (we absolutely need
6281 two scratch registers for some corner cases). */
6282 void
6283 arm_reload_out_hi (rtx *operands)
6285 rtx ref = operands[0];
6286 rtx outval = operands[1];
6287 rtx base, scratch;
6288 HOST_WIDE_INT offset = 0;
6290 if (GET_CODE (ref) == SUBREG)
6292 offset = SUBREG_BYTE (ref);
6293 ref = SUBREG_REG (ref);
6296 if (GET_CODE (ref) == REG)
6298 /* We have a pseudo which has been spilt onto the stack; there
6299 are two cases here: the first where there is a simple
6300 stack-slot replacement and a second where the stack-slot is
6301 out of range, or is used as a subreg. */
6302 if (reg_equiv_mem[REGNO (ref)])
6304 ref = reg_equiv_mem[REGNO (ref)];
6305 base = find_replacement (&XEXP (ref, 0));
6307 else
6308 /* The slot is out of range, or was dressed up in a SUBREG. */
6309 base = reg_equiv_address[REGNO (ref)];
6311 else
6312 base = find_replacement (&XEXP (ref, 0));
6314 scratch = gen_rtx_REG (SImode, REGNO (operands[2]));
6316 /* Handle the case where the address is too complex to be offset by 1. */
6317 if (GET_CODE (base) == MINUS
6318 || (GET_CODE (base) == PLUS && GET_CODE (XEXP (base, 1)) != CONST_INT))
6320 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6322 /* Be careful not to destroy OUTVAL. */
6323 if (reg_overlap_mentioned_p (base_plus, outval))
6325 /* Updating base_plus might destroy outval, see if we can
6326 swap the scratch and base_plus. */
6327 if (!reg_overlap_mentioned_p (scratch, outval))
6329 rtx tmp = scratch;
6330 scratch = base_plus;
6331 base_plus = tmp;
6333 else
6335 rtx scratch_hi = gen_rtx_REG (HImode, REGNO (operands[2]));
6337 /* Be conservative and copy OUTVAL into the scratch now,
6338 this should only be necessary if outval is a subreg
6339 of something larger than a word. */
6340 /* XXX Might this clobber base? I can't see how it can,
6341 since scratch is known to overlap with OUTVAL, and
6342 must be wider than a word. */
6343 emit_insn (gen_movhi (scratch_hi, outval));
6344 outval = scratch_hi;
6348 emit_insn (gen_rtx_SET (VOIDmode, base_plus, base));
6349 base = base_plus;
6351 else if (GET_CODE (base) == PLUS)
6353 /* The addend must be CONST_INT, or we would have dealt with it above. */
6354 HOST_WIDE_INT hi, lo;
6356 offset += INTVAL (XEXP (base, 1));
6357 base = XEXP (base, 0);
6359 /* Rework the address into a legal sequence of insns. */
6360 /* Valid range for lo is -4095 -> 4095 */
6361 lo = (offset >= 0
6362 ? (offset & 0xfff)
6363 : -((-offset) & 0xfff));
6365 /* Corner case, if lo is the max offset then we would be out of range
6366 once we have added the additional 1 below, so bump the msb into the
6367 pre-loading insn(s). */
6368 if (lo == 4095)
6369 lo &= 0x7ff;
6371 hi = ((((offset - lo) & (HOST_WIDE_INT) 0xffffffff)
6372 ^ (HOST_WIDE_INT) 0x80000000)
6373 - (HOST_WIDE_INT) 0x80000000);
6375 if (hi + lo != offset)
6376 abort ();
6378 if (hi != 0)
6380 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6382 /* Be careful not to destroy OUTVAL. */
6383 if (reg_overlap_mentioned_p (base_plus, outval))
6385 /* Updating base_plus might destroy outval, see if we
6386 can swap the scratch and base_plus. */
6387 if (!reg_overlap_mentioned_p (scratch, outval))
6389 rtx tmp = scratch;
6390 scratch = base_plus;
6391 base_plus = tmp;
6393 else
6395 rtx scratch_hi = gen_rtx_REG (HImode, REGNO (operands[2]));
6397 /* Be conservative and copy outval into scratch now,
6398 this should only be necessary if outval is a
6399 subreg of something larger than a word. */
6400 /* XXX Might this clobber base? I can't see how it
6401 can, since scratch is known to overlap with
6402 outval. */
6403 emit_insn (gen_movhi (scratch_hi, outval));
6404 outval = scratch_hi;
6408 /* Get the base address; addsi3 knows how to handle constants
6409 that require more than one insn. */
6410 emit_insn (gen_addsi3 (base_plus, base, GEN_INT (hi)));
6411 base = base_plus;
6412 offset = lo;
6416 if (BYTES_BIG_ENDIAN)
6418 emit_insn (gen_movqi (gen_rtx_MEM (QImode,
6419 plus_constant (base, offset + 1)),
6420 gen_lowpart (QImode, outval)));
6421 emit_insn (gen_lshrsi3 (scratch,
6422 gen_rtx_SUBREG (SImode, outval, 0),
6423 GEN_INT (8)));
6424 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (base, offset)),
6425 gen_lowpart (QImode, scratch)));
6427 else
6429 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (base, offset)),
6430 gen_lowpart (QImode, outval)));
6431 emit_insn (gen_lshrsi3 (scratch,
6432 gen_rtx_SUBREG (SImode, outval, 0),
6433 GEN_INT (8)));
6434 emit_insn (gen_movqi (gen_rtx_MEM (QImode,
6435 plus_constant (base, offset + 1)),
6436 gen_lowpart (QImode, scratch)));
6440 /* Print a symbolic form of X to the debug file, F. */
6441 static void
6442 arm_print_value (FILE *f, rtx x)
6444 switch (GET_CODE (x))
6446 case CONST_INT:
6447 fprintf (f, HOST_WIDE_INT_PRINT_HEX, INTVAL (x));
6448 return;
6450 case CONST_DOUBLE:
6451 fprintf (f, "<0x%lx,0x%lx>", (long)XWINT (x, 2), (long)XWINT (x, 3));
6452 return;
6454 case CONST_VECTOR:
6456 int i;
6458 fprintf (f, "<");
6459 for (i = 0; i < CONST_VECTOR_NUNITS (x); i++)
6461 fprintf (f, HOST_WIDE_INT_PRINT_HEX, INTVAL (CONST_VECTOR_ELT (x, i)));
6462 if (i < (CONST_VECTOR_NUNITS (x) - 1))
6463 fputc (',', f);
6465 fprintf (f, ">");
6467 return;
6469 case CONST_STRING:
6470 fprintf (f, "\"%s\"", XSTR (x, 0));
6471 return;
6473 case SYMBOL_REF:
6474 fprintf (f, "`%s'", XSTR (x, 0));
6475 return;
6477 case LABEL_REF:
6478 fprintf (f, "L%d", INSN_UID (XEXP (x, 0)));
6479 return;
6481 case CONST:
6482 arm_print_value (f, XEXP (x, 0));
6483 return;
6485 case PLUS:
6486 arm_print_value (f, XEXP (x, 0));
6487 fprintf (f, "+");
6488 arm_print_value (f, XEXP (x, 1));
6489 return;
6491 case PC:
6492 fprintf (f, "pc");
6493 return;
6495 default:
6496 fprintf (f, "????");
6497 return;
6501 /* Routines for manipulation of the constant pool. */
6503 /* Arm instructions cannot load a large constant directly into a
6504 register; they have to come from a pc relative load. The constant
6505 must therefore be placed in the addressable range of the pc
6506 relative load. Depending on the precise pc relative load
6507 instruction the range is somewhere between 256 bytes and 4k. This
6508 means that we often have to dump a constant inside a function, and
6509 generate code to branch around it.
6511 It is important to minimize this, since the branches will slow
6512 things down and make the code larger.
6514 Normally we can hide the table after an existing unconditional
6515 branch so that there is no interruption of the flow, but in the
6516 worst case the code looks like this:
6518 ldr rn, L1
6520 b L2
6521 align
6522 L1: .long value
6526 ldr rn, L3
6528 b L4
6529 align
6530 L3: .long value
6534 We fix this by performing a scan after scheduling, which notices
6535 which instructions need to have their operands fetched from the
6536 constant table and builds the table.
6538 The algorithm starts by building a table of all the constants that
6539 need fixing up and all the natural barriers in the function (places
6540 where a constant table can be dropped without breaking the flow).
6541 For each fixup we note how far the pc-relative replacement will be
6542 able to reach and the offset of the instruction into the function.
6544 Having built the table we then group the fixes together to form
6545 tables that are as large as possible (subject to addressing
6546 constraints) and emit each table of constants after the last
6547 barrier that is within range of all the instructions in the group.
6548 If a group does not contain a barrier, then we forcibly create one
6549 by inserting a jump instruction into the flow. Once the table has
6550 been inserted, the insns are then modified to reference the
6551 relevant entry in the pool.
6553 Possible enhancements to the algorithm (not implemented) are:
6555 1) For some processors and object formats, there may be benefit in
6556 aligning the pools to the start of cache lines; this alignment
6557 would need to be taken into account when calculating addressability
6558 of a pool. */
6560 /* These typedefs are located at the start of this file, so that
6561 they can be used in the prototypes there. This comment is to
6562 remind readers of that fact so that the following structures
6563 can be understood more easily.
6565 typedef struct minipool_node Mnode;
6566 typedef struct minipool_fixup Mfix; */
6568 struct minipool_node
6570 /* Doubly linked chain of entries. */
6571 Mnode * next;
6572 Mnode * prev;
6573 /* The maximum offset into the code that this entry can be placed. While
6574 pushing fixes for forward references, all entries are sorted in order
6575 of increasing max_address. */
6576 HOST_WIDE_INT max_address;
6577 /* Similarly for an entry inserted for a backwards ref. */
6578 HOST_WIDE_INT min_address;
6579 /* The number of fixes referencing this entry. This can become zero
6580 if we "unpush" an entry. In this case we ignore the entry when we
6581 come to emit the code. */
6582 int refcount;
6583 /* The offset from the start of the minipool. */
6584 HOST_WIDE_INT offset;
6585 /* The value in table. */
6586 rtx value;
6587 /* The mode of value. */
6588 enum machine_mode mode;
6589 /* The size of the value. With iWMMXt enabled
6590 sizes > 4 also imply an alignment of 8-bytes. */
6591 int fix_size;
6594 struct minipool_fixup
6596 Mfix * next;
6597 rtx insn;
6598 HOST_WIDE_INT address;
6599 rtx * loc;
6600 enum machine_mode mode;
6601 int fix_size;
6602 rtx value;
6603 Mnode * minipool;
6604 HOST_WIDE_INT forwards;
6605 HOST_WIDE_INT backwards;
6608 /* Fixes less than a word need padding out to a word boundary. */
6609 #define MINIPOOL_FIX_SIZE(mode) \
6610 (GET_MODE_SIZE ((mode)) >= 4 ? GET_MODE_SIZE ((mode)) : 4)
6612 static Mnode * minipool_vector_head;
6613 static Mnode * minipool_vector_tail;
6614 static rtx minipool_vector_label;
6616 /* The linked list of all minipool fixes required for this function. */
6617 Mfix * minipool_fix_head;
6618 Mfix * minipool_fix_tail;
6619 /* The fix entry for the current minipool, once it has been placed. */
6620 Mfix * minipool_barrier;
6622 /* Determines if INSN is the start of a jump table. Returns the end
6623 of the TABLE or NULL_RTX. */
6624 static rtx
6625 is_jump_table (rtx insn)
6627 rtx table;
6629 if (GET_CODE (insn) == JUMP_INSN
6630 && JUMP_LABEL (insn) != NULL
6631 && ((table = next_real_insn (JUMP_LABEL (insn)))
6632 == next_real_insn (insn))
6633 && table != NULL
6634 && GET_CODE (table) == JUMP_INSN
6635 && (GET_CODE (PATTERN (table)) == ADDR_VEC
6636 || GET_CODE (PATTERN (table)) == ADDR_DIFF_VEC))
6637 return table;
6639 return NULL_RTX;
6642 #ifndef JUMP_TABLES_IN_TEXT_SECTION
6643 #define JUMP_TABLES_IN_TEXT_SECTION 0
6644 #endif
6646 static HOST_WIDE_INT
6647 get_jump_table_size (rtx insn)
6649 /* ADDR_VECs only take room if read-only data does into the text
6650 section. */
6651 if (JUMP_TABLES_IN_TEXT_SECTION
6652 #if !defined(READONLY_DATA_SECTION) && !defined(READONLY_DATA_SECTION_ASM_OP)
6653 || 1
6654 #endif
6657 rtx body = PATTERN (insn);
6658 int elt = GET_CODE (body) == ADDR_DIFF_VEC ? 1 : 0;
6660 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, elt);
6663 return 0;
6666 /* Move a minipool fix MP from its current location to before MAX_MP.
6667 If MAX_MP is NULL, then MP doesn't need moving, but the addressing
6668 constraints may need updating. */
6669 static Mnode *
6670 move_minipool_fix_forward_ref (Mnode *mp, Mnode *max_mp,
6671 HOST_WIDE_INT max_address)
6673 /* This should never be true and the code below assumes these are
6674 different. */
6675 if (mp == max_mp)
6676 abort ();
6678 if (max_mp == NULL)
6680 if (max_address < mp->max_address)
6681 mp->max_address = max_address;
6683 else
6685 if (max_address > max_mp->max_address - mp->fix_size)
6686 mp->max_address = max_mp->max_address - mp->fix_size;
6687 else
6688 mp->max_address = max_address;
6690 /* Unlink MP from its current position. Since max_mp is non-null,
6691 mp->prev must be non-null. */
6692 mp->prev->next = mp->next;
6693 if (mp->next != NULL)
6694 mp->next->prev = mp->prev;
6695 else
6696 minipool_vector_tail = mp->prev;
6698 /* Re-insert it before MAX_MP. */
6699 mp->next = max_mp;
6700 mp->prev = max_mp->prev;
6701 max_mp->prev = mp;
6703 if (mp->prev != NULL)
6704 mp->prev->next = mp;
6705 else
6706 minipool_vector_head = mp;
6709 /* Save the new entry. */
6710 max_mp = mp;
6712 /* Scan over the preceding entries and adjust their addresses as
6713 required. */
6714 while (mp->prev != NULL
6715 && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
6717 mp->prev->max_address = mp->max_address - mp->prev->fix_size;
6718 mp = mp->prev;
6721 return max_mp;
6724 /* Add a constant to the minipool for a forward reference. Returns the
6725 node added or NULL if the constant will not fit in this pool. */
6726 static Mnode *
6727 add_minipool_forward_ref (Mfix *fix)
6729 /* If set, max_mp is the first pool_entry that has a lower
6730 constraint than the one we are trying to add. */
6731 Mnode * max_mp = NULL;
6732 HOST_WIDE_INT max_address = fix->address + fix->forwards;
6733 Mnode * mp;
6735 /* If this fix's address is greater than the address of the first
6736 entry, then we can't put the fix in this pool. We subtract the
6737 size of the current fix to ensure that if the table is fully
6738 packed we still have enough room to insert this value by suffling
6739 the other fixes forwards. */
6740 if (minipool_vector_head &&
6741 fix->address >= minipool_vector_head->max_address - fix->fix_size)
6742 return NULL;
6744 /* Scan the pool to see if a constant with the same value has
6745 already been added. While we are doing this, also note the
6746 location where we must insert the constant if it doesn't already
6747 exist. */
6748 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
6750 if (GET_CODE (fix->value) == GET_CODE (mp->value)
6751 && fix->mode == mp->mode
6752 && (GET_CODE (fix->value) != CODE_LABEL
6753 || (CODE_LABEL_NUMBER (fix->value)
6754 == CODE_LABEL_NUMBER (mp->value)))
6755 && rtx_equal_p (fix->value, mp->value))
6757 /* More than one fix references this entry. */
6758 mp->refcount++;
6759 return move_minipool_fix_forward_ref (mp, max_mp, max_address);
6762 /* Note the insertion point if necessary. */
6763 if (max_mp == NULL
6764 && mp->max_address > max_address)
6765 max_mp = mp;
6767 /* If we are inserting an 8-bytes aligned quantity and
6768 we have not already found an insertion point, then
6769 make sure that all such 8-byte aligned quantities are
6770 placed at the start of the pool. */
6771 if (ARM_DOUBLEWORD_ALIGN
6772 && max_mp == NULL
6773 && fix->fix_size == 8
6774 && mp->fix_size != 8)
6776 max_mp = mp;
6777 max_address = mp->max_address;
6781 /* The value is not currently in the minipool, so we need to create
6782 a new entry for it. If MAX_MP is NULL, the entry will be put on
6783 the end of the list since the placement is less constrained than
6784 any existing entry. Otherwise, we insert the new fix before
6785 MAX_MP and, if necessary, adjust the constraints on the other
6786 entries. */
6787 mp = xmalloc (sizeof (* mp));
6788 mp->fix_size = fix->fix_size;
6789 mp->mode = fix->mode;
6790 mp->value = fix->value;
6791 mp->refcount = 1;
6792 /* Not yet required for a backwards ref. */
6793 mp->min_address = -65536;
6795 if (max_mp == NULL)
6797 mp->max_address = max_address;
6798 mp->next = NULL;
6799 mp->prev = minipool_vector_tail;
6801 if (mp->prev == NULL)
6803 minipool_vector_head = mp;
6804 minipool_vector_label = gen_label_rtx ();
6806 else
6807 mp->prev->next = mp;
6809 minipool_vector_tail = mp;
6811 else
6813 if (max_address > max_mp->max_address - mp->fix_size)
6814 mp->max_address = max_mp->max_address - mp->fix_size;
6815 else
6816 mp->max_address = max_address;
6818 mp->next = max_mp;
6819 mp->prev = max_mp->prev;
6820 max_mp->prev = mp;
6821 if (mp->prev != NULL)
6822 mp->prev->next = mp;
6823 else
6824 minipool_vector_head = mp;
6827 /* Save the new entry. */
6828 max_mp = mp;
6830 /* Scan over the preceding entries and adjust their addresses as
6831 required. */
6832 while (mp->prev != NULL
6833 && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
6835 mp->prev->max_address = mp->max_address - mp->prev->fix_size;
6836 mp = mp->prev;
6839 return max_mp;
6842 static Mnode *
6843 move_minipool_fix_backward_ref (Mnode *mp, Mnode *min_mp,
6844 HOST_WIDE_INT min_address)
6846 HOST_WIDE_INT offset;
6848 /* This should never be true, and the code below assumes these are
6849 different. */
6850 if (mp == min_mp)
6851 abort ();
6853 if (min_mp == NULL)
6855 if (min_address > mp->min_address)
6856 mp->min_address = min_address;
6858 else
6860 /* We will adjust this below if it is too loose. */
6861 mp->min_address = min_address;
6863 /* Unlink MP from its current position. Since min_mp is non-null,
6864 mp->next must be non-null. */
6865 mp->next->prev = mp->prev;
6866 if (mp->prev != NULL)
6867 mp->prev->next = mp->next;
6868 else
6869 minipool_vector_head = mp->next;
6871 /* Reinsert it after MIN_MP. */
6872 mp->prev = min_mp;
6873 mp->next = min_mp->next;
6874 min_mp->next = mp;
6875 if (mp->next != NULL)
6876 mp->next->prev = mp;
6877 else
6878 minipool_vector_tail = mp;
6881 min_mp = mp;
6883 offset = 0;
6884 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
6886 mp->offset = offset;
6887 if (mp->refcount > 0)
6888 offset += mp->fix_size;
6890 if (mp->next && mp->next->min_address < mp->min_address + mp->fix_size)
6891 mp->next->min_address = mp->min_address + mp->fix_size;
6894 return min_mp;
6897 /* Add a constant to the minipool for a backward reference. Returns the
6898 node added or NULL if the constant will not fit in this pool.
6900 Note that the code for insertion for a backwards reference can be
6901 somewhat confusing because the calculated offsets for each fix do
6902 not take into account the size of the pool (which is still under
6903 construction. */
6904 static Mnode *
6905 add_minipool_backward_ref (Mfix *fix)
6907 /* If set, min_mp is the last pool_entry that has a lower constraint
6908 than the one we are trying to add. */
6909 Mnode *min_mp = NULL;
6910 /* This can be negative, since it is only a constraint. */
6911 HOST_WIDE_INT min_address = fix->address - fix->backwards;
6912 Mnode *mp;
6914 /* If we can't reach the current pool from this insn, or if we can't
6915 insert this entry at the end of the pool without pushing other
6916 fixes out of range, then we don't try. This ensures that we
6917 can't fail later on. */
6918 if (min_address >= minipool_barrier->address
6919 || (minipool_vector_tail->min_address + fix->fix_size
6920 >= minipool_barrier->address))
6921 return NULL;
6923 /* Scan the pool to see if a constant with the same value has
6924 already been added. While we are doing this, also note the
6925 location where we must insert the constant if it doesn't already
6926 exist. */
6927 for (mp = minipool_vector_tail; mp != NULL; mp = mp->prev)
6929 if (GET_CODE (fix->value) == GET_CODE (mp->value)
6930 && fix->mode == mp->mode
6931 && (GET_CODE (fix->value) != CODE_LABEL
6932 || (CODE_LABEL_NUMBER (fix->value)
6933 == CODE_LABEL_NUMBER (mp->value)))
6934 && rtx_equal_p (fix->value, mp->value)
6935 /* Check that there is enough slack to move this entry to the
6936 end of the table (this is conservative). */
6937 && (mp->max_address
6938 > (minipool_barrier->address
6939 + minipool_vector_tail->offset
6940 + minipool_vector_tail->fix_size)))
6942 mp->refcount++;
6943 return move_minipool_fix_backward_ref (mp, min_mp, min_address);
6946 if (min_mp != NULL)
6947 mp->min_address += fix->fix_size;
6948 else
6950 /* Note the insertion point if necessary. */
6951 if (mp->min_address < min_address)
6953 /* For now, we do not allow the insertion of 8-byte alignment
6954 requiring nodes anywhere but at the start of the pool. */
6955 if (ARM_DOUBLEWORD_ALIGN
6956 && fix->fix_size == 8 && mp->fix_size != 8)
6957 return NULL;
6958 else
6959 min_mp = mp;
6961 else if (mp->max_address
6962 < minipool_barrier->address + mp->offset + fix->fix_size)
6964 /* Inserting before this entry would push the fix beyond
6965 its maximum address (which can happen if we have
6966 re-located a forwards fix); force the new fix to come
6967 after it. */
6968 min_mp = mp;
6969 min_address = mp->min_address + fix->fix_size;
6971 /* If we are inserting an 8-bytes aligned quantity and
6972 we have not already found an insertion point, then
6973 make sure that all such 8-byte aligned quantities are
6974 placed at the start of the pool. */
6975 else if (ARM_DOUBLEWORD_ALIGN
6976 && min_mp == NULL
6977 && fix->fix_size == 8
6978 && mp->fix_size < 8)
6980 min_mp = mp;
6981 min_address = mp->min_address + fix->fix_size;
6986 /* We need to create a new entry. */
6987 mp = xmalloc (sizeof (* mp));
6988 mp->fix_size = fix->fix_size;
6989 mp->mode = fix->mode;
6990 mp->value = fix->value;
6991 mp->refcount = 1;
6992 mp->max_address = minipool_barrier->address + 65536;
6994 mp->min_address = min_address;
6996 if (min_mp == NULL)
6998 mp->prev = NULL;
6999 mp->next = minipool_vector_head;
7001 if (mp->next == NULL)
7003 minipool_vector_tail = mp;
7004 minipool_vector_label = gen_label_rtx ();
7006 else
7007 mp->next->prev = mp;
7009 minipool_vector_head = mp;
7011 else
7013 mp->next = min_mp->next;
7014 mp->prev = min_mp;
7015 min_mp->next = mp;
7017 if (mp->next != NULL)
7018 mp->next->prev = mp;
7019 else
7020 minipool_vector_tail = mp;
7023 /* Save the new entry. */
7024 min_mp = mp;
7026 if (mp->prev)
7027 mp = mp->prev;
7028 else
7029 mp->offset = 0;
7031 /* Scan over the following entries and adjust their offsets. */
7032 while (mp->next != NULL)
7034 if (mp->next->min_address < mp->min_address + mp->fix_size)
7035 mp->next->min_address = mp->min_address + mp->fix_size;
7037 if (mp->refcount)
7038 mp->next->offset = mp->offset + mp->fix_size;
7039 else
7040 mp->next->offset = mp->offset;
7042 mp = mp->next;
7045 return min_mp;
7048 static void
7049 assign_minipool_offsets (Mfix *barrier)
7051 HOST_WIDE_INT offset = 0;
7052 Mnode *mp;
7054 minipool_barrier = barrier;
7056 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7058 mp->offset = offset;
7060 if (mp->refcount > 0)
7061 offset += mp->fix_size;
7065 /* Output the literal table */
7066 static void
7067 dump_minipool (rtx scan)
7069 Mnode * mp;
7070 Mnode * nmp;
7071 int align64 = 0;
7073 if (ARM_DOUBLEWORD_ALIGN)
7074 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7075 if (mp->refcount > 0 && mp->fix_size == 8)
7077 align64 = 1;
7078 break;
7081 if (dump_file)
7082 fprintf (dump_file,
7083 ";; Emitting minipool after insn %u; address %ld; align %d (bytes)\n",
7084 INSN_UID (scan), (unsigned long) minipool_barrier->address, align64 ? 8 : 4);
7086 scan = emit_label_after (gen_label_rtx (), scan);
7087 scan = emit_insn_after (align64 ? gen_align_8 () : gen_align_4 (), scan);
7088 scan = emit_label_after (minipool_vector_label, scan);
7090 for (mp = minipool_vector_head; mp != NULL; mp = nmp)
7092 if (mp->refcount > 0)
7094 if (dump_file)
7096 fprintf (dump_file,
7097 ";; Offset %u, min %ld, max %ld ",
7098 (unsigned) mp->offset, (unsigned long) mp->min_address,
7099 (unsigned long) mp->max_address);
7100 arm_print_value (dump_file, mp->value);
7101 fputc ('\n', dump_file);
7104 switch (mp->fix_size)
7106 #ifdef HAVE_consttable_1
7107 case 1:
7108 scan = emit_insn_after (gen_consttable_1 (mp->value), scan);
7109 break;
7111 #endif
7112 #ifdef HAVE_consttable_2
7113 case 2:
7114 scan = emit_insn_after (gen_consttable_2 (mp->value), scan);
7115 break;
7117 #endif
7118 #ifdef HAVE_consttable_4
7119 case 4:
7120 scan = emit_insn_after (gen_consttable_4 (mp->value), scan);
7121 break;
7123 #endif
7124 #ifdef HAVE_consttable_8
7125 case 8:
7126 scan = emit_insn_after (gen_consttable_8 (mp->value), scan);
7127 break;
7129 #endif
7130 default:
7131 abort ();
7132 break;
7136 nmp = mp->next;
7137 free (mp);
7140 minipool_vector_head = minipool_vector_tail = NULL;
7141 scan = emit_insn_after (gen_consttable_end (), scan);
7142 scan = emit_barrier_after (scan);
7145 /* Return the cost of forcibly inserting a barrier after INSN. */
7146 static int
7147 arm_barrier_cost (rtx insn)
7149 /* Basing the location of the pool on the loop depth is preferable,
7150 but at the moment, the basic block information seems to be
7151 corrupt by this stage of the compilation. */
7152 int base_cost = 50;
7153 rtx next = next_nonnote_insn (insn);
7155 if (next != NULL && GET_CODE (next) == CODE_LABEL)
7156 base_cost -= 20;
7158 switch (GET_CODE (insn))
7160 case CODE_LABEL:
7161 /* It will always be better to place the table before the label, rather
7162 than after it. */
7163 return 50;
7165 case INSN:
7166 case CALL_INSN:
7167 return base_cost;
7169 case JUMP_INSN:
7170 return base_cost - 10;
7172 default:
7173 return base_cost + 10;
7177 /* Find the best place in the insn stream in the range
7178 (FIX->address,MAX_ADDRESS) to forcibly insert a minipool barrier.
7179 Create the barrier by inserting a jump and add a new fix entry for
7180 it. */
7181 static Mfix *
7182 create_fix_barrier (Mfix *fix, HOST_WIDE_INT max_address)
7184 HOST_WIDE_INT count = 0;
7185 rtx barrier;
7186 rtx from = fix->insn;
7187 rtx selected = from;
7188 int selected_cost;
7189 HOST_WIDE_INT selected_address;
7190 Mfix * new_fix;
7191 HOST_WIDE_INT max_count = max_address - fix->address;
7192 rtx label = gen_label_rtx ();
7194 selected_cost = arm_barrier_cost (from);
7195 selected_address = fix->address;
7197 while (from && count < max_count)
7199 rtx tmp;
7200 int new_cost;
7202 /* This code shouldn't have been called if there was a natural barrier
7203 within range. */
7204 if (GET_CODE (from) == BARRIER)
7205 abort ();
7207 /* Count the length of this insn. */
7208 count += get_attr_length (from);
7210 /* If there is a jump table, add its length. */
7211 tmp = is_jump_table (from);
7212 if (tmp != NULL)
7214 count += get_jump_table_size (tmp);
7216 /* Jump tables aren't in a basic block, so base the cost on
7217 the dispatch insn. If we select this location, we will
7218 still put the pool after the table. */
7219 new_cost = arm_barrier_cost (from);
7221 if (count < max_count && new_cost <= selected_cost)
7223 selected = tmp;
7224 selected_cost = new_cost;
7225 selected_address = fix->address + count;
7228 /* Continue after the dispatch table. */
7229 from = NEXT_INSN (tmp);
7230 continue;
7233 new_cost = arm_barrier_cost (from);
7235 if (count < max_count && new_cost <= selected_cost)
7237 selected = from;
7238 selected_cost = new_cost;
7239 selected_address = fix->address + count;
7242 from = NEXT_INSN (from);
7245 /* Create a new JUMP_INSN that branches around a barrier. */
7246 from = emit_jump_insn_after (gen_jump (label), selected);
7247 JUMP_LABEL (from) = label;
7248 barrier = emit_barrier_after (from);
7249 emit_label_after (label, barrier);
7251 /* Create a minipool barrier entry for the new barrier. */
7252 new_fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* new_fix));
7253 new_fix->insn = barrier;
7254 new_fix->address = selected_address;
7255 new_fix->next = fix->next;
7256 fix->next = new_fix;
7258 return new_fix;
7261 /* Record that there is a natural barrier in the insn stream at
7262 ADDRESS. */
7263 static void
7264 push_minipool_barrier (rtx insn, HOST_WIDE_INT address)
7266 Mfix * fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* fix));
7268 fix->insn = insn;
7269 fix->address = address;
7271 fix->next = NULL;
7272 if (minipool_fix_head != NULL)
7273 minipool_fix_tail->next = fix;
7274 else
7275 minipool_fix_head = fix;
7277 minipool_fix_tail = fix;
7280 /* Record INSN, which will need fixing up to load a value from the
7281 minipool. ADDRESS is the offset of the insn since the start of the
7282 function; LOC is a pointer to the part of the insn which requires
7283 fixing; VALUE is the constant that must be loaded, which is of type
7284 MODE. */
7285 static void
7286 push_minipool_fix (rtx insn, HOST_WIDE_INT address, rtx *loc,
7287 enum machine_mode mode, rtx value)
7289 Mfix * fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* fix));
7291 #ifdef AOF_ASSEMBLER
7292 /* PIC symbol references need to be converted into offsets into the
7293 based area. */
7294 /* XXX This shouldn't be done here. */
7295 if (flag_pic && GET_CODE (value) == SYMBOL_REF)
7296 value = aof_pic_entry (value);
7297 #endif /* AOF_ASSEMBLER */
7299 fix->insn = insn;
7300 fix->address = address;
7301 fix->loc = loc;
7302 fix->mode = mode;
7303 fix->fix_size = MINIPOOL_FIX_SIZE (mode);
7304 fix->value = value;
7305 fix->forwards = get_attr_pool_range (insn);
7306 fix->backwards = get_attr_neg_pool_range (insn);
7307 fix->minipool = NULL;
7309 /* If an insn doesn't have a range defined for it, then it isn't
7310 expecting to be reworked by this code. Better to abort now than
7311 to generate duff assembly code. */
7312 if (fix->forwards == 0 && fix->backwards == 0)
7313 abort ();
7315 /* With AAPCS/iWMMXt enabled, the pool is aligned to an 8-byte boundary.
7316 So there might be an empty word before the start of the pool.
7317 Hence we reduce the forward range by 4 to allow for this
7318 possibility. */
7319 if (ARM_DOUBLEWORD_ALIGN && fix->fix_size == 8)
7320 fix->forwards -= 4;
7322 if (dump_file)
7324 fprintf (dump_file,
7325 ";; %smode fixup for i%d; addr %lu, range (%ld,%ld): ",
7326 GET_MODE_NAME (mode),
7327 INSN_UID (insn), (unsigned long) address,
7328 -1 * (long)fix->backwards, (long)fix->forwards);
7329 arm_print_value (dump_file, fix->value);
7330 fprintf (dump_file, "\n");
7333 /* Add it to the chain of fixes. */
7334 fix->next = NULL;
7336 if (minipool_fix_head != NULL)
7337 minipool_fix_tail->next = fix;
7338 else
7339 minipool_fix_head = fix;
7341 minipool_fix_tail = fix;
7344 /* Return the cost of synthesizing the const_double VAL inline.
7345 Returns the number of insns needed, or 99 if we don't know how to
7346 do it. */
7348 arm_const_double_inline_cost (rtx val)
7350 long parts[2];
7352 if (GET_MODE (val) == DFmode)
7354 REAL_VALUE_TYPE r;
7355 if (!TARGET_SOFT_FLOAT)
7356 return 99;
7357 REAL_VALUE_FROM_CONST_DOUBLE (r, val);
7358 REAL_VALUE_TO_TARGET_DOUBLE (r, parts);
7360 else if (GET_MODE (val) != VOIDmode)
7361 return 99;
7362 else
7364 parts[0] = CONST_DOUBLE_LOW (val);
7365 parts[1] = CONST_DOUBLE_HIGH (val);
7368 return (arm_gen_constant (SET, SImode, NULL_RTX, parts[0],
7369 NULL_RTX, NULL_RTX, 0, 0)
7370 + arm_gen_constant (SET, SImode, NULL_RTX, parts[1],
7371 NULL_RTX, NULL_RTX, 0, 0));
7374 /* Determine if a CONST_DOUBLE should be pushed to the minipool */
7375 static bool
7376 const_double_needs_minipool (rtx val)
7378 /* thumb only knows to load a CONST_DOUBLE from memory at the moment */
7379 if (TARGET_THUMB)
7380 return true;
7382 /* Don't push anything to the minipool if a CONST_DOUBLE can be built with
7383 a few ALU insns directly. On balance, the optimum is likely to be around
7384 3 insns, except when there are no load delay slots where it should be 4.
7385 When optimizing for size, a limit of 3 allows saving at least one word
7386 except for cases where a single minipool entry could be shared more than
7387 2 times which is rather unlikely to outweight the overall savings. */
7388 return (arm_const_double_inline_cost (val)
7389 > ((optimize_size || arm_ld_sched) ? 3 : 4));
7392 /* Scan INSN and note any of its operands that need fixing.
7393 If DO_PUSHES is false we do not actually push any of the fixups
7394 needed. The function returns TRUE is any fixups were needed/pushed.
7395 This is used by arm_memory_load_p() which needs to know about loads
7396 of constants that will be converted into minipool loads. */
7397 static bool
7398 note_invalid_constants (rtx insn, HOST_WIDE_INT address, int do_pushes)
7400 bool result = false;
7401 int opno;
7403 extract_insn (insn);
7405 if (!constrain_operands (1))
7406 fatal_insn_not_found (insn);
7408 if (recog_data.n_alternatives == 0)
7409 return false;
7411 /* Fill in recog_op_alt with information about the constraints of this insn. */
7412 preprocess_constraints ();
7414 for (opno = 0; opno < recog_data.n_operands; opno++)
7416 /* Things we need to fix can only occur in inputs. */
7417 if (recog_data.operand_type[opno] != OP_IN)
7418 continue;
7420 /* If this alternative is a memory reference, then any mention
7421 of constants in this alternative is really to fool reload
7422 into allowing us to accept one there. We need to fix them up
7423 now so that we output the right code. */
7424 if (recog_op_alt[opno][which_alternative].memory_ok)
7426 rtx op = recog_data.operand[opno];
7428 if (CONSTANT_P (op)
7429 && (GET_CODE (op) != CONST_DOUBLE
7430 || const_double_needs_minipool (op)))
7432 if (do_pushes)
7433 push_minipool_fix (insn, address, recog_data.operand_loc[opno],
7434 recog_data.operand_mode[opno], op);
7435 result = true;
7437 else if (GET_CODE (op) == MEM
7438 && GET_CODE (XEXP (op, 0)) == SYMBOL_REF
7439 && CONSTANT_POOL_ADDRESS_P (XEXP (op, 0)))
7441 if (do_pushes)
7443 rtx cop = avoid_constant_pool_reference (op);
7445 /* Casting the address of something to a mode narrower
7446 than a word can cause avoid_constant_pool_reference()
7447 to return the pool reference itself. That's no good to
7448 us here. Lets just hope that we can use the
7449 constant pool value directly. */
7450 if (op == cop)
7451 cop = get_pool_constant (XEXP (op, 0));
7453 push_minipool_fix (insn, address,
7454 recog_data.operand_loc[opno],
7455 recog_data.operand_mode[opno], cop);
7458 result = true;
7463 return result;
7466 /* Gcc puts the pool in the wrong place for ARM, since we can only
7467 load addresses a limited distance around the pc. We do some
7468 special munging to move the constant pool values to the correct
7469 point in the code. */
7470 static void
7471 arm_reorg (void)
7473 rtx insn;
7474 HOST_WIDE_INT address = 0;
7475 Mfix * fix;
7477 minipool_fix_head = minipool_fix_tail = NULL;
7479 /* The first insn must always be a note, or the code below won't
7480 scan it properly. */
7481 insn = get_insns ();
7482 if (GET_CODE (insn) != NOTE)
7483 abort ();
7485 /* Scan all the insns and record the operands that will need fixing. */
7486 for (insn = next_nonnote_insn (insn); insn; insn = next_nonnote_insn (insn))
7488 if (TARGET_CIRRUS_FIX_INVALID_INSNS
7489 && (arm_cirrus_insn_p (insn)
7490 || GET_CODE (insn) == JUMP_INSN
7491 || arm_memory_load_p (insn)))
7492 cirrus_reorg (insn);
7494 if (GET_CODE (insn) == BARRIER)
7495 push_minipool_barrier (insn, address);
7496 else if (INSN_P (insn))
7498 rtx table;
7500 note_invalid_constants (insn, address, true);
7501 address += get_attr_length (insn);
7503 /* If the insn is a vector jump, add the size of the table
7504 and skip the table. */
7505 if ((table = is_jump_table (insn)) != NULL)
7507 address += get_jump_table_size (table);
7508 insn = table;
7513 fix = minipool_fix_head;
7515 /* Now scan the fixups and perform the required changes. */
7516 while (fix)
7518 Mfix * ftmp;
7519 Mfix * fdel;
7520 Mfix * last_added_fix;
7521 Mfix * last_barrier = NULL;
7522 Mfix * this_fix;
7524 /* Skip any further barriers before the next fix. */
7525 while (fix && GET_CODE (fix->insn) == BARRIER)
7526 fix = fix->next;
7528 /* No more fixes. */
7529 if (fix == NULL)
7530 break;
7532 last_added_fix = NULL;
7534 for (ftmp = fix; ftmp; ftmp = ftmp->next)
7536 if (GET_CODE (ftmp->insn) == BARRIER)
7538 if (ftmp->address >= minipool_vector_head->max_address)
7539 break;
7541 last_barrier = ftmp;
7543 else if ((ftmp->minipool = add_minipool_forward_ref (ftmp)) == NULL)
7544 break;
7546 last_added_fix = ftmp; /* Keep track of the last fix added. */
7549 /* If we found a barrier, drop back to that; any fixes that we
7550 could have reached but come after the barrier will now go in
7551 the next mini-pool. */
7552 if (last_barrier != NULL)
7554 /* Reduce the refcount for those fixes that won't go into this
7555 pool after all. */
7556 for (fdel = last_barrier->next;
7557 fdel && fdel != ftmp;
7558 fdel = fdel->next)
7560 fdel->minipool->refcount--;
7561 fdel->minipool = NULL;
7564 ftmp = last_barrier;
7566 else
7568 /* ftmp is first fix that we can't fit into this pool and
7569 there no natural barriers that we could use. Insert a
7570 new barrier in the code somewhere between the previous
7571 fix and this one, and arrange to jump around it. */
7572 HOST_WIDE_INT max_address;
7574 /* The last item on the list of fixes must be a barrier, so
7575 we can never run off the end of the list of fixes without
7576 last_barrier being set. */
7577 if (ftmp == NULL)
7578 abort ();
7580 max_address = minipool_vector_head->max_address;
7581 /* Check that there isn't another fix that is in range that
7582 we couldn't fit into this pool because the pool was
7583 already too large: we need to put the pool before such an
7584 instruction. */
7585 if (ftmp->address < max_address)
7586 max_address = ftmp->address;
7588 last_barrier = create_fix_barrier (last_added_fix, max_address);
7591 assign_minipool_offsets (last_barrier);
7593 while (ftmp)
7595 if (GET_CODE (ftmp->insn) != BARRIER
7596 && ((ftmp->minipool = add_minipool_backward_ref (ftmp))
7597 == NULL))
7598 break;
7600 ftmp = ftmp->next;
7603 /* Scan over the fixes we have identified for this pool, fixing them
7604 up and adding the constants to the pool itself. */
7605 for (this_fix = fix; this_fix && ftmp != this_fix;
7606 this_fix = this_fix->next)
7607 if (GET_CODE (this_fix->insn) != BARRIER)
7609 rtx addr
7610 = plus_constant (gen_rtx_LABEL_REF (VOIDmode,
7611 minipool_vector_label),
7612 this_fix->minipool->offset);
7613 *this_fix->loc = gen_rtx_MEM (this_fix->mode, addr);
7616 dump_minipool (last_barrier->insn);
7617 fix = ftmp;
7620 /* From now on we must synthesize any constants that we can't handle
7621 directly. This can happen if the RTL gets split during final
7622 instruction generation. */
7623 after_arm_reorg = 1;
7625 /* Free the minipool memory. */
7626 obstack_free (&minipool_obstack, minipool_startobj);
7629 /* Routines to output assembly language. */
7631 /* If the rtx is the correct value then return the string of the number.
7632 In this way we can ensure that valid double constants are generated even
7633 when cross compiling. */
7634 const char *
7635 fp_immediate_constant (rtx x)
7637 REAL_VALUE_TYPE r;
7638 int i;
7640 if (!fp_consts_inited)
7641 init_fp_table ();
7643 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
7644 for (i = 0; i < 8; i++)
7645 if (REAL_VALUES_EQUAL (r, values_fp[i]))
7646 return strings_fp[i];
7648 abort ();
7651 /* As for fp_immediate_constant, but value is passed directly, not in rtx. */
7652 static const char *
7653 fp_const_from_val (REAL_VALUE_TYPE *r)
7655 int i;
7657 if (!fp_consts_inited)
7658 init_fp_table ();
7660 for (i = 0; i < 8; i++)
7661 if (REAL_VALUES_EQUAL (*r, values_fp[i]))
7662 return strings_fp[i];
7664 abort ();
7667 /* Output the operands of a LDM/STM instruction to STREAM.
7668 MASK is the ARM register set mask of which only bits 0-15 are important.
7669 REG is the base register, either the frame pointer or the stack pointer,
7670 INSTR is the possibly suffixed load or store instruction. */
7671 static void
7672 print_multi_reg (FILE *stream, const char *instr, int reg, int mask)
7674 int i;
7675 int not_first = FALSE;
7677 fputc ('\t', stream);
7678 asm_fprintf (stream, instr, reg);
7679 fputs (", {", stream);
7681 for (i = 0; i <= LAST_ARM_REGNUM; i++)
7682 if (mask & (1 << i))
7684 if (not_first)
7685 fprintf (stream, ", ");
7687 asm_fprintf (stream, "%r", i);
7688 not_first = TRUE;
7691 fprintf (stream, "}\n");
7695 /* Output a FLDMX instruction to STREAM.
7696 BASE if the register containing the address.
7697 REG and COUNT specify the register range.
7698 Extra registers may be added to avoid hardware bugs. */
7700 static void
7701 arm_output_fldmx (FILE * stream, unsigned int base, int reg, int count)
7703 int i;
7705 /* Workaround ARM10 VFPr1 bug. */
7706 if (count == 2 && !arm_arch6)
7708 if (reg == 15)
7709 reg--;
7710 count++;
7713 fputc ('\t', stream);
7714 asm_fprintf (stream, "fldmfdx\t%r!, {", base);
7716 for (i = reg; i < reg + count; i++)
7718 if (i > reg)
7719 fputs (", ", stream);
7720 asm_fprintf (stream, "d%d", i);
7722 fputs ("}\n", stream);
7727 /* Output the assembly for a store multiple. */
7729 const char *
7730 vfp_output_fstmx (rtx * operands)
7732 char pattern[100];
7733 int p;
7734 int base;
7735 int i;
7737 strcpy (pattern, "fstmfdx\t%m0!, {%P1");
7738 p = strlen (pattern);
7740 if (GET_CODE (operands[1]) != REG)
7741 abort ();
7743 base = (REGNO (operands[1]) - FIRST_VFP_REGNUM) / 2;
7744 for (i = 1; i < XVECLEN (operands[2], 0); i++)
7746 p += sprintf (&pattern[p], ", d%d", base + i);
7748 strcpy (&pattern[p], "}");
7750 output_asm_insn (pattern, operands);
7751 return "";
7755 /* Emit RTL to save block of VFP register pairs to the stack. Returns the
7756 number of bytes pushed. */
7758 static int
7759 vfp_emit_fstmx (int base_reg, int count)
7761 rtx par;
7762 rtx dwarf;
7763 rtx tmp, reg;
7764 int i;
7766 /* Workaround ARM10 VFPr1 bug. Data corruption can occur when exactly two
7767 register pairs are stored by a store multiple insn. We avoid this
7768 by pushing an extra pair. */
7769 if (count == 2 && !arm_arch6)
7771 if (base_reg == LAST_VFP_REGNUM - 3)
7772 base_reg -= 2;
7773 count++;
7776 /* ??? The frame layout is implementation defined. We describe
7777 standard format 1 (equivalent to a FSTMD insn and unused pad word).
7778 We really need some way of representing the whole block so that the
7779 unwinder can figure it out at runtime. */
7780 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
7781 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (count + 1));
7783 reg = gen_rtx_REG (DFmode, base_reg);
7784 base_reg += 2;
7786 XVECEXP (par, 0, 0)
7787 = gen_rtx_SET (VOIDmode,
7788 gen_rtx_MEM (BLKmode,
7789 gen_rtx_PRE_DEC (BLKmode, stack_pointer_rtx)),
7790 gen_rtx_UNSPEC (BLKmode,
7791 gen_rtvec (1, reg),
7792 UNSPEC_PUSH_MULT));
7794 tmp = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7795 gen_rtx_PLUS (SImode, stack_pointer_rtx,
7796 GEN_INT (-(count * 8 + 4))));
7797 RTX_FRAME_RELATED_P (tmp) = 1;
7798 XVECEXP (dwarf, 0, 0) = tmp;
7800 tmp = gen_rtx_SET (VOIDmode,
7801 gen_rtx_MEM (DFmode, stack_pointer_rtx),
7802 reg);
7803 RTX_FRAME_RELATED_P (tmp) = 1;
7804 XVECEXP (dwarf, 0, 1) = tmp;
7806 for (i = 1; i < count; i++)
7808 reg = gen_rtx_REG (DFmode, base_reg);
7809 base_reg += 2;
7810 XVECEXP (par, 0, i) = gen_rtx_USE (VOIDmode, reg);
7812 tmp = gen_rtx_SET (VOIDmode,
7813 gen_rtx_MEM (DFmode,
7814 gen_rtx_PLUS (SImode,
7815 stack_pointer_rtx,
7816 GEN_INT (i * 8))),
7817 reg);
7818 RTX_FRAME_RELATED_P (tmp) = 1;
7819 XVECEXP (dwarf, 0, i + 1) = tmp;
7822 par = emit_insn (par);
7823 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
7824 REG_NOTES (par));
7825 RTX_FRAME_RELATED_P (par) = 1;
7827 return count * 8 + 4;
7831 /* Output a 'call' insn. */
7832 const char *
7833 output_call (rtx *operands)
7835 if (arm_arch5)
7836 abort (); /* Patterns should call blx <reg> directly. */
7838 /* Handle calls to lr using ip (which may be clobbered in subr anyway). */
7839 if (REGNO (operands[0]) == LR_REGNUM)
7841 operands[0] = gen_rtx_REG (SImode, IP_REGNUM);
7842 output_asm_insn ("mov%?\t%0, %|lr", operands);
7845 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
7847 if (TARGET_INTERWORK || arm_arch4t)
7848 output_asm_insn ("bx%?\t%0", operands);
7849 else
7850 output_asm_insn ("mov%?\t%|pc, %0", operands);
7852 return "";
7855 /* Output a 'call' insn that is a reference in memory. */
7856 const char *
7857 output_call_mem (rtx *operands)
7859 if (TARGET_INTERWORK && !arm_arch5)
7861 output_asm_insn ("ldr%?\t%|ip, %0", operands);
7862 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
7863 output_asm_insn ("bx%?\t%|ip", operands);
7865 else if (regno_use_in (LR_REGNUM, operands[0]))
7867 /* LR is used in the memory address. We load the address in the
7868 first instruction. It's safe to use IP as the target of the
7869 load since the call will kill it anyway. */
7870 output_asm_insn ("ldr%?\t%|ip, %0", operands);
7871 if (arm_arch5)
7872 output_asm_insn ("blx%?\t%|ip", operands);
7873 else
7875 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
7876 if (arm_arch4t)
7877 output_asm_insn ("bx%?\t%|ip", operands);
7878 else
7879 output_asm_insn ("mov%?\t%|pc, %|ip", operands);
7882 else
7884 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
7885 output_asm_insn ("ldr%?\t%|pc, %0", operands);
7888 return "";
7892 /* Output a move from arm registers to an fpa registers.
7893 OPERANDS[0] is an fpa register.
7894 OPERANDS[1] is the first registers of an arm register pair. */
7895 const char *
7896 output_mov_long_double_fpa_from_arm (rtx *operands)
7898 int arm_reg0 = REGNO (operands[1]);
7899 rtx ops[3];
7901 if (arm_reg0 == IP_REGNUM)
7902 abort ();
7904 ops[0] = gen_rtx_REG (SImode, arm_reg0);
7905 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
7906 ops[2] = gen_rtx_REG (SImode, 2 + arm_reg0);
7908 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1, %2}", ops);
7909 output_asm_insn ("ldf%?e\t%0, [%|sp], #12", operands);
7911 return "";
7914 /* Output a move from an fpa register to arm registers.
7915 OPERANDS[0] is the first registers of an arm register pair.
7916 OPERANDS[1] is an fpa register. */
7917 const char *
7918 output_mov_long_double_arm_from_fpa (rtx *operands)
7920 int arm_reg0 = REGNO (operands[0]);
7921 rtx ops[3];
7923 if (arm_reg0 == IP_REGNUM)
7924 abort ();
7926 ops[0] = gen_rtx_REG (SImode, arm_reg0);
7927 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
7928 ops[2] = gen_rtx_REG (SImode, 2 + arm_reg0);
7930 output_asm_insn ("stf%?e\t%1, [%|sp, #-12]!", operands);
7931 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1, %2}", ops);
7932 return "";
7935 /* Output a move from arm registers to arm registers of a long double
7936 OPERANDS[0] is the destination.
7937 OPERANDS[1] is the source. */
7938 const char *
7939 output_mov_long_double_arm_from_arm (rtx *operands)
7941 /* We have to be careful here because the two might overlap. */
7942 int dest_start = REGNO (operands[0]);
7943 int src_start = REGNO (operands[1]);
7944 rtx ops[2];
7945 int i;
7947 if (dest_start < src_start)
7949 for (i = 0; i < 3; i++)
7951 ops[0] = gen_rtx_REG (SImode, dest_start + i);
7952 ops[1] = gen_rtx_REG (SImode, src_start + i);
7953 output_asm_insn ("mov%?\t%0, %1", ops);
7956 else
7958 for (i = 2; i >= 0; i--)
7960 ops[0] = gen_rtx_REG (SImode, dest_start + i);
7961 ops[1] = gen_rtx_REG (SImode, src_start + i);
7962 output_asm_insn ("mov%?\t%0, %1", ops);
7966 return "";
7970 /* Output a move from arm registers to an fpa registers.
7971 OPERANDS[0] is an fpa register.
7972 OPERANDS[1] is the first registers of an arm register pair. */
7973 const char *
7974 output_mov_double_fpa_from_arm (rtx *operands)
7976 int arm_reg0 = REGNO (operands[1]);
7977 rtx ops[2];
7979 if (arm_reg0 == IP_REGNUM)
7980 abort ();
7982 ops[0] = gen_rtx_REG (SImode, arm_reg0);
7983 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
7984 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1}", ops);
7985 output_asm_insn ("ldf%?d\t%0, [%|sp], #8", operands);
7986 return "";
7989 /* Output a move from an fpa register to arm registers.
7990 OPERANDS[0] is the first registers of an arm register pair.
7991 OPERANDS[1] is an fpa register. */
7992 const char *
7993 output_mov_double_arm_from_fpa (rtx *operands)
7995 int arm_reg0 = REGNO (operands[0]);
7996 rtx ops[2];
7998 if (arm_reg0 == IP_REGNUM)
7999 abort ();
8001 ops[0] = gen_rtx_REG (SImode, arm_reg0);
8002 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
8003 output_asm_insn ("stf%?d\t%1, [%|sp, #-8]!", operands);
8004 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1}", ops);
8005 return "";
8008 /* Output a move between double words.
8009 It must be REG<-REG, REG<-CONST_DOUBLE, REG<-CONST_INT, REG<-MEM
8010 or MEM<-REG and all MEMs must be offsettable addresses. */
8011 const char *
8012 output_move_double (rtx *operands)
8014 enum rtx_code code0 = GET_CODE (operands[0]);
8015 enum rtx_code code1 = GET_CODE (operands[1]);
8016 rtx otherops[3];
8018 if (code0 == REG)
8020 int reg0 = REGNO (operands[0]);
8022 otherops[0] = gen_rtx_REG (SImode, 1 + reg0);
8024 if (code1 == REG)
8026 int reg1 = REGNO (operands[1]);
8027 if (reg1 == IP_REGNUM)
8028 abort ();
8030 /* Ensure the second source is not overwritten. */
8031 if (reg1 == reg0 + (WORDS_BIG_ENDIAN ? -1 : 1))
8032 output_asm_insn ("mov%?\t%Q0, %Q1\n\tmov%?\t%R0, %R1", operands);
8033 else
8034 output_asm_insn ("mov%?\t%R0, %R1\n\tmov%?\t%Q0, %Q1", operands);
8036 else if (code1 == CONST_VECTOR)
8038 HOST_WIDE_INT hint = 0;
8040 switch (GET_MODE (operands[1]))
8042 case V2SImode:
8043 otherops[1] = GEN_INT (INTVAL (CONST_VECTOR_ELT (operands[1], 1)));
8044 operands[1] = GEN_INT (INTVAL (CONST_VECTOR_ELT (operands[1], 0)));
8045 break;
8047 case V4HImode:
8048 if (BYTES_BIG_ENDIAN)
8050 hint = INTVAL (CONST_VECTOR_ELT (operands[1], 2));
8051 hint <<= 16;
8052 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 3));
8054 else
8056 hint = INTVAL (CONST_VECTOR_ELT (operands[1], 3));
8057 hint <<= 16;
8058 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 2));
8061 otherops[1] = GEN_INT (hint);
8062 hint = 0;
8064 if (BYTES_BIG_ENDIAN)
8066 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 0));
8067 hint <<= 16;
8068 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 1));
8070 else
8072 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 1));
8073 hint <<= 16;
8074 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 0));
8077 operands[1] = GEN_INT (hint);
8078 break;
8080 case V8QImode:
8081 if (BYTES_BIG_ENDIAN)
8083 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 4));
8084 hint <<= 8;
8085 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 5));
8086 hint <<= 8;
8087 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 6));
8088 hint <<= 8;
8089 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 7));
8091 else
8093 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 7));
8094 hint <<= 8;
8095 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 6));
8096 hint <<= 8;
8097 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 5));
8098 hint <<= 8;
8099 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 4));
8102 otherops[1] = GEN_INT (hint);
8103 hint = 0;
8105 if (BYTES_BIG_ENDIAN)
8107 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 0));
8108 hint <<= 8;
8109 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 1));
8110 hint <<= 8;
8111 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 2));
8112 hint <<= 8;
8113 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 3));
8115 else
8117 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 3));
8118 hint <<= 8;
8119 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 2));
8120 hint <<= 8;
8121 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 1));
8122 hint <<= 8;
8123 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 0));
8126 operands[1] = GEN_INT (hint);
8127 break;
8129 default:
8130 abort ();
8132 output_mov_immediate (operands);
8133 output_mov_immediate (otherops);
8135 else if (code1 == CONST_DOUBLE)
8137 if (GET_MODE (operands[1]) == DFmode)
8139 REAL_VALUE_TYPE r;
8140 long l[2];
8142 REAL_VALUE_FROM_CONST_DOUBLE (r, operands[1]);
8143 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
8144 otherops[1] = GEN_INT (l[1]);
8145 operands[1] = GEN_INT (l[0]);
8147 else if (GET_MODE (operands[1]) != VOIDmode)
8148 abort ();
8149 else if (WORDS_BIG_ENDIAN)
8151 otherops[1] = GEN_INT (CONST_DOUBLE_LOW (operands[1]));
8152 operands[1] = GEN_INT (CONST_DOUBLE_HIGH (operands[1]));
8154 else
8156 otherops[1] = GEN_INT (CONST_DOUBLE_HIGH (operands[1]));
8157 operands[1] = GEN_INT (CONST_DOUBLE_LOW (operands[1]));
8160 output_mov_immediate (operands);
8161 output_mov_immediate (otherops);
8163 else if (code1 == CONST_INT)
8165 #if HOST_BITS_PER_WIDE_INT > 32
8166 /* If HOST_WIDE_INT is more than 32 bits, the intval tells us
8167 what the upper word is. */
8168 if (WORDS_BIG_ENDIAN)
8170 otherops[1] = GEN_INT (ARM_SIGN_EXTEND (INTVAL (operands[1])));
8171 operands[1] = GEN_INT (INTVAL (operands[1]) >> 32);
8173 else
8175 otherops[1] = GEN_INT (INTVAL (operands[1]) >> 32);
8176 operands[1] = GEN_INT (ARM_SIGN_EXTEND (INTVAL (operands[1])));
8178 #else
8179 /* Sign extend the intval into the high-order word. */
8180 if (WORDS_BIG_ENDIAN)
8182 otherops[1] = operands[1];
8183 operands[1] = (INTVAL (operands[1]) < 0
8184 ? constm1_rtx : const0_rtx);
8186 else
8187 otherops[1] = INTVAL (operands[1]) < 0 ? constm1_rtx : const0_rtx;
8188 #endif
8189 output_mov_immediate (otherops);
8190 output_mov_immediate (operands);
8192 else if (code1 == MEM)
8194 switch (GET_CODE (XEXP (operands[1], 0)))
8196 case REG:
8197 output_asm_insn ("ldm%?ia\t%m1, %M0", operands);
8198 break;
8200 case PRE_INC:
8201 if (!TARGET_LDRD)
8202 abort (); /* Should never happen now. */
8203 output_asm_insn ("ldr%?d\t%0, [%m1, #8]!", operands);
8204 break;
8206 case PRE_DEC:
8207 output_asm_insn ("ldm%?db\t%m1!, %M0", operands);
8208 break;
8210 case POST_INC:
8211 output_asm_insn ("ldm%?ia\t%m1!, %M0", operands);
8212 break;
8214 case POST_DEC:
8215 if (!TARGET_LDRD)
8216 abort (); /* Should never happen now. */
8217 output_asm_insn ("ldr%?d\t%0, [%m1], #-8", operands);
8218 break;
8220 case PRE_MODIFY:
8221 case POST_MODIFY:
8222 otherops[0] = operands[0];
8223 otherops[1] = XEXP (XEXP (XEXP (operands[1], 0), 1), 0);
8224 otherops[2] = XEXP (XEXP (XEXP (operands[1], 0), 1), 1);
8226 if (GET_CODE (XEXP (operands[1], 0)) == PRE_MODIFY)
8228 if (reg_overlap_mentioned_p (otherops[0], otherops[2]))
8230 /* Registers overlap so split out the increment. */
8231 output_asm_insn ("add%?\t%1, %1, %2", otherops);
8232 output_asm_insn ("ldr%?d\t%0, [%1] @split", otherops);
8234 else
8235 output_asm_insn ("ldr%?d\t%0, [%1, %2]!", otherops);
8237 else
8239 /* We only allow constant increments, so this is safe. */
8240 output_asm_insn ("ldr%?d\t%0, [%1], %2", otherops);
8242 break;
8244 case LABEL_REF:
8245 case CONST:
8246 output_asm_insn ("adr%?\t%0, %1", operands);
8247 output_asm_insn ("ldm%?ia\t%0, %M0", operands);
8248 break;
8250 default:
8251 if (arm_add_operand (XEXP (XEXP (operands[1], 0), 1),
8252 GET_MODE (XEXP (XEXP (operands[1], 0), 1))))
8254 otherops[0] = operands[0];
8255 otherops[1] = XEXP (XEXP (operands[1], 0), 0);
8256 otherops[2] = XEXP (XEXP (operands[1], 0), 1);
8258 if (GET_CODE (XEXP (operands[1], 0)) == PLUS)
8260 if (GET_CODE (otherops[2]) == CONST_INT)
8262 switch ((int) INTVAL (otherops[2]))
8264 case -8:
8265 output_asm_insn ("ldm%?db\t%1, %M0", otherops);
8266 return "";
8267 case -4:
8268 output_asm_insn ("ldm%?da\t%1, %M0", otherops);
8269 return "";
8270 case 4:
8271 output_asm_insn ("ldm%?ib\t%1, %M0", otherops);
8272 return "";
8275 if (TARGET_LDRD
8276 && (GET_CODE (otherops[2]) == REG
8277 || (GET_CODE (otherops[2]) == CONST_INT
8278 && INTVAL (otherops[2]) > -256
8279 && INTVAL (otherops[2]) < 256)))
8281 if (reg_overlap_mentioned_p (otherops[0],
8282 otherops[2]))
8284 /* Swap base and index registers over to
8285 avoid a conflict. */
8286 otherops[1] = XEXP (XEXP (operands[1], 0), 1);
8287 otherops[2] = XEXP (XEXP (operands[1], 0), 0);
8290 /* If both registers conflict, it will usually
8291 have been fixed by a splitter. */
8292 if (reg_overlap_mentioned_p (otherops[0],
8293 otherops[2]))
8295 output_asm_insn ("add%?\t%1, %1, %2", otherops);
8296 output_asm_insn ("ldr%?d\t%0, [%1]",
8297 otherops);
8298 return "";
8300 else
8302 output_asm_insn ("ldr%?d\t%0, [%1, %2]",
8303 otherops);
8304 return "";
8307 if (GET_CODE (otherops[2]) == CONST_INT)
8309 if (!(const_ok_for_arm (INTVAL (otherops[2]))))
8310 output_asm_insn ("sub%?\t%0, %1, #%n2", otherops);
8311 else
8312 output_asm_insn ("add%?\t%0, %1, %2", otherops);
8314 else
8315 output_asm_insn ("add%?\t%0, %1, %2", otherops);
8317 else
8318 output_asm_insn ("sub%?\t%0, %1, %2", otherops);
8320 return "ldm%?ia\t%0, %M0";
8322 else
8324 otherops[1] = adjust_address (operands[1], SImode, 4);
8325 /* Take care of overlapping base/data reg. */
8326 if (reg_mentioned_p (operands[0], operands[1]))
8328 output_asm_insn ("ldr%?\t%0, %1", otherops);
8329 output_asm_insn ("ldr%?\t%0, %1", operands);
8331 else
8333 output_asm_insn ("ldr%?\t%0, %1", operands);
8334 output_asm_insn ("ldr%?\t%0, %1", otherops);
8339 else
8340 abort (); /* Constraints should prevent this. */
8342 else if (code0 == MEM && code1 == REG)
8344 if (REGNO (operands[1]) == IP_REGNUM)
8345 abort ();
8347 switch (GET_CODE (XEXP (operands[0], 0)))
8349 case REG:
8350 output_asm_insn ("stm%?ia\t%m0, %M1", operands);
8351 break;
8353 case PRE_INC:
8354 if (!TARGET_LDRD)
8355 abort (); /* Should never happen now. */
8356 output_asm_insn ("str%?d\t%1, [%m0, #8]!", operands);
8357 break;
8359 case PRE_DEC:
8360 output_asm_insn ("stm%?db\t%m0!, %M1", operands);
8361 break;
8363 case POST_INC:
8364 output_asm_insn ("stm%?ia\t%m0!, %M1", operands);
8365 break;
8367 case POST_DEC:
8368 if (!TARGET_LDRD)
8369 abort (); /* Should never happen now. */
8370 output_asm_insn ("str%?d\t%1, [%m0], #-8", operands);
8371 break;
8373 case PRE_MODIFY:
8374 case POST_MODIFY:
8375 otherops[0] = operands[1];
8376 otherops[1] = XEXP (XEXP (XEXP (operands[0], 0), 1), 0);
8377 otherops[2] = XEXP (XEXP (XEXP (operands[0], 0), 1), 1);
8379 if (GET_CODE (XEXP (operands[0], 0)) == PRE_MODIFY)
8380 output_asm_insn ("str%?d\t%0, [%1, %2]!", otherops);
8381 else
8382 output_asm_insn ("str%?d\t%0, [%1], %2", otherops);
8383 break;
8385 case PLUS:
8386 otherops[2] = XEXP (XEXP (operands[0], 0), 1);
8387 if (GET_CODE (otherops[2]) == CONST_INT)
8389 switch ((int) INTVAL (XEXP (XEXP (operands[0], 0), 1)))
8391 case -8:
8392 output_asm_insn ("stm%?db\t%m0, %M1", operands);
8393 return "";
8395 case -4:
8396 output_asm_insn ("stm%?da\t%m0, %M1", operands);
8397 return "";
8399 case 4:
8400 output_asm_insn ("stm%?ib\t%m0, %M1", operands);
8401 return "";
8404 if (TARGET_LDRD
8405 && (GET_CODE (otherops[2]) == REG
8406 || (GET_CODE (otherops[2]) == CONST_INT
8407 && INTVAL (otherops[2]) > -256
8408 && INTVAL (otherops[2]) < 256)))
8410 otherops[0] = operands[1];
8411 otherops[1] = XEXP (XEXP (operands[0], 0), 0);
8412 output_asm_insn ("str%?d\t%0, [%1, %2]", otherops);
8413 return "";
8415 /* Fall through */
8417 default:
8418 otherops[0] = adjust_address (operands[0], SImode, 4);
8419 otherops[1] = gen_rtx_REG (SImode, 1 + REGNO (operands[1]));
8420 output_asm_insn ("str%?\t%1, %0", operands);
8421 output_asm_insn ("str%?\t%1, %0", otherops);
8424 else
8425 /* Constraints should prevent this. */
8426 abort ();
8428 return "";
8432 /* Output an arbitrary MOV reg, #n.
8433 OPERANDS[0] is a register. OPERANDS[1] is a const_int. */
8434 const char *
8435 output_mov_immediate (rtx *operands)
8437 HOST_WIDE_INT n = INTVAL (operands[1]);
8439 /* Try to use one MOV. */
8440 if (const_ok_for_arm (n))
8441 output_asm_insn ("mov%?\t%0, %1", operands);
8443 /* Try to use one MVN. */
8444 else if (const_ok_for_arm (~n))
8446 operands[1] = GEN_INT (~n);
8447 output_asm_insn ("mvn%?\t%0, %1", operands);
8449 else
8451 int n_ones = 0;
8452 int i;
8454 /* If all else fails, make it out of ORRs or BICs as appropriate. */
8455 for (i = 0; i < 32; i++)
8456 if (n & 1 << i)
8457 n_ones++;
8459 if (n_ones > 16) /* Shorter to use MVN with BIC in this case. */
8460 output_multi_immediate (operands, "mvn%?\t%0, %1", "bic%?\t%0, %0, %1", 1, ~ n);
8461 else
8462 output_multi_immediate (operands, "mov%?\t%0, %1", "orr%?\t%0, %0, %1", 1, n);
8465 return "";
8468 /* Output an ADD r, s, #n where n may be too big for one instruction.
8469 If adding zero to one register, output nothing. */
8470 const char *
8471 output_add_immediate (rtx *operands)
8473 HOST_WIDE_INT n = INTVAL (operands[2]);
8475 if (n != 0 || REGNO (operands[0]) != REGNO (operands[1]))
8477 if (n < 0)
8478 output_multi_immediate (operands,
8479 "sub%?\t%0, %1, %2", "sub%?\t%0, %0, %2", 2,
8480 -n);
8481 else
8482 output_multi_immediate (operands,
8483 "add%?\t%0, %1, %2", "add%?\t%0, %0, %2", 2,
8487 return "";
8490 /* Output a multiple immediate operation.
8491 OPERANDS is the vector of operands referred to in the output patterns.
8492 INSTR1 is the output pattern to use for the first constant.
8493 INSTR2 is the output pattern to use for subsequent constants.
8494 IMMED_OP is the index of the constant slot in OPERANDS.
8495 N is the constant value. */
8496 static const char *
8497 output_multi_immediate (rtx *operands, const char *instr1, const char *instr2,
8498 int immed_op, HOST_WIDE_INT n)
8500 #if HOST_BITS_PER_WIDE_INT > 32
8501 n &= 0xffffffff;
8502 #endif
8504 if (n == 0)
8506 /* Quick and easy output. */
8507 operands[immed_op] = const0_rtx;
8508 output_asm_insn (instr1, operands);
8510 else
8512 int i;
8513 const char * instr = instr1;
8515 /* Note that n is never zero here (which would give no output). */
8516 for (i = 0; i < 32; i += 2)
8518 if (n & (3 << i))
8520 operands[immed_op] = GEN_INT (n & (255 << i));
8521 output_asm_insn (instr, operands);
8522 instr = instr2;
8523 i += 6;
8528 return "";
8531 /* Return the appropriate ARM instruction for the operation code.
8532 The returned result should not be overwritten. OP is the rtx of the
8533 operation. SHIFT_FIRST_ARG is TRUE if the first argument of the operator
8534 was shifted. */
8535 const char *
8536 arithmetic_instr (rtx op, int shift_first_arg)
8538 switch (GET_CODE (op))
8540 case PLUS:
8541 return "add";
8543 case MINUS:
8544 return shift_first_arg ? "rsb" : "sub";
8546 case IOR:
8547 return "orr";
8549 case XOR:
8550 return "eor";
8552 case AND:
8553 return "and";
8555 default:
8556 abort ();
8560 /* Ensure valid constant shifts and return the appropriate shift mnemonic
8561 for the operation code. The returned result should not be overwritten.
8562 OP is the rtx code of the shift.
8563 On exit, *AMOUNTP will be -1 if the shift is by a register, or a constant
8564 shift. */
8565 static const char *
8566 shift_op (rtx op, HOST_WIDE_INT *amountp)
8568 const char * mnem;
8569 enum rtx_code code = GET_CODE (op);
8571 if (GET_CODE (XEXP (op, 1)) == REG || GET_CODE (XEXP (op, 1)) == SUBREG)
8572 *amountp = -1;
8573 else if (GET_CODE (XEXP (op, 1)) == CONST_INT)
8574 *amountp = INTVAL (XEXP (op, 1));
8575 else
8576 abort ();
8578 switch (code)
8580 case ASHIFT:
8581 mnem = "asl";
8582 break;
8584 case ASHIFTRT:
8585 mnem = "asr";
8586 break;
8588 case LSHIFTRT:
8589 mnem = "lsr";
8590 break;
8592 case ROTATE:
8593 if (*amountp == -1)
8594 abort ();
8595 *amountp = 32 - *amountp;
8597 /* Fall through. */
8599 case ROTATERT:
8600 mnem = "ror";
8601 break;
8603 case MULT:
8604 /* We never have to worry about the amount being other than a
8605 power of 2, since this case can never be reloaded from a reg. */
8606 if (*amountp != -1)
8607 *amountp = int_log2 (*amountp);
8608 else
8609 abort ();
8610 return "asl";
8612 default:
8613 abort ();
8616 if (*amountp != -1)
8618 /* This is not 100% correct, but follows from the desire to merge
8619 multiplication by a power of 2 with the recognizer for a
8620 shift. >=32 is not a valid shift for "asl", so we must try and
8621 output a shift that produces the correct arithmetical result.
8622 Using lsr #32 is identical except for the fact that the carry bit
8623 is not set correctly if we set the flags; but we never use the
8624 carry bit from such an operation, so we can ignore that. */
8625 if (code == ROTATERT)
8626 /* Rotate is just modulo 32. */
8627 *amountp &= 31;
8628 else if (*amountp != (*amountp & 31))
8630 if (code == ASHIFT)
8631 mnem = "lsr";
8632 *amountp = 32;
8635 /* Shifts of 0 are no-ops. */
8636 if (*amountp == 0)
8637 return NULL;
8640 return mnem;
8643 /* Obtain the shift from the POWER of two. */
8645 static HOST_WIDE_INT
8646 int_log2 (HOST_WIDE_INT power)
8648 HOST_WIDE_INT shift = 0;
8650 while ((((HOST_WIDE_INT) 1 << shift) & power) == 0)
8652 if (shift > 31)
8653 abort ();
8654 shift++;
8657 return shift;
8660 /* Output a .ascii pseudo-op, keeping track of lengths. This is because
8661 /bin/as is horribly restrictive. */
8662 #define MAX_ASCII_LEN 51
8664 void
8665 output_ascii_pseudo_op (FILE *stream, const unsigned char *p, int len)
8667 int i;
8668 int len_so_far = 0;
8670 fputs ("\t.ascii\t\"", stream);
8672 for (i = 0; i < len; i++)
8674 int c = p[i];
8676 if (len_so_far >= MAX_ASCII_LEN)
8678 fputs ("\"\n\t.ascii\t\"", stream);
8679 len_so_far = 0;
8682 switch (c)
8684 case TARGET_TAB:
8685 fputs ("\\t", stream);
8686 len_so_far += 2;
8687 break;
8689 case TARGET_FF:
8690 fputs ("\\f", stream);
8691 len_so_far += 2;
8692 break;
8694 case TARGET_BS:
8695 fputs ("\\b", stream);
8696 len_so_far += 2;
8697 break;
8699 case TARGET_CR:
8700 fputs ("\\r", stream);
8701 len_so_far += 2;
8702 break;
8704 case TARGET_NEWLINE:
8705 fputs ("\\n", stream);
8706 c = p [i + 1];
8707 if ((c >= ' ' && c <= '~')
8708 || c == TARGET_TAB)
8709 /* This is a good place for a line break. */
8710 len_so_far = MAX_ASCII_LEN;
8711 else
8712 len_so_far += 2;
8713 break;
8715 case '\"':
8716 case '\\':
8717 putc ('\\', stream);
8718 len_so_far++;
8719 /* Drop through. */
8721 default:
8722 if (c >= ' ' && c <= '~')
8724 putc (c, stream);
8725 len_so_far++;
8727 else
8729 fprintf (stream, "\\%03o", c);
8730 len_so_far += 4;
8732 break;
8736 fputs ("\"\n", stream);
8739 /* Compute the register save mask for registers 0 through 12
8740 inclusive. This code is used by arm_compute_save_reg_mask. */
8741 static unsigned long
8742 arm_compute_save_reg0_reg12_mask (void)
8744 unsigned long func_type = arm_current_func_type ();
8745 unsigned int save_reg_mask = 0;
8746 unsigned int reg;
8748 if (IS_INTERRUPT (func_type))
8750 unsigned int max_reg;
8751 /* Interrupt functions must not corrupt any registers,
8752 even call clobbered ones. If this is a leaf function
8753 we can just examine the registers used by the RTL, but
8754 otherwise we have to assume that whatever function is
8755 called might clobber anything, and so we have to save
8756 all the call-clobbered registers as well. */
8757 if (ARM_FUNC_TYPE (func_type) == ARM_FT_FIQ)
8758 /* FIQ handlers have registers r8 - r12 banked, so
8759 we only need to check r0 - r7, Normal ISRs only
8760 bank r14 and r15, so we must check up to r12.
8761 r13 is the stack pointer which is always preserved,
8762 so we do not need to consider it here. */
8763 max_reg = 7;
8764 else
8765 max_reg = 12;
8767 for (reg = 0; reg <= max_reg; reg++)
8768 if (regs_ever_live[reg]
8769 || (! current_function_is_leaf && call_used_regs [reg]))
8770 save_reg_mask |= (1 << reg);
8772 /* Also save the pic base register if necessary. */
8773 if (flag_pic
8774 && !TARGET_SINGLE_PIC_BASE
8775 && current_function_uses_pic_offset_table)
8776 save_reg_mask |= 1 << PIC_OFFSET_TABLE_REGNUM;
8778 else
8780 /* In the normal case we only need to save those registers
8781 which are call saved and which are used by this function. */
8782 for (reg = 0; reg <= 10; reg++)
8783 if (regs_ever_live[reg] && ! call_used_regs [reg])
8784 save_reg_mask |= (1 << reg);
8786 /* Handle the frame pointer as a special case. */
8787 if (! TARGET_APCS_FRAME
8788 && ! frame_pointer_needed
8789 && regs_ever_live[HARD_FRAME_POINTER_REGNUM]
8790 && ! call_used_regs[HARD_FRAME_POINTER_REGNUM])
8791 save_reg_mask |= 1 << HARD_FRAME_POINTER_REGNUM;
8793 /* If we aren't loading the PIC register,
8794 don't stack it even though it may be live. */
8795 if (flag_pic
8796 && !TARGET_SINGLE_PIC_BASE
8797 && (regs_ever_live[PIC_OFFSET_TABLE_REGNUM]
8798 || current_function_uses_pic_offset_table))
8799 save_reg_mask |= 1 << PIC_OFFSET_TABLE_REGNUM;
8802 /* Save registers so the exception handler can modify them. */
8803 if (current_function_calls_eh_return)
8805 unsigned int i;
8807 for (i = 0; ; i++)
8809 reg = EH_RETURN_DATA_REGNO (i);
8810 if (reg == INVALID_REGNUM)
8811 break;
8812 save_reg_mask |= 1 << reg;
8816 return save_reg_mask;
8819 /* Compute a bit mask of which registers need to be
8820 saved on the stack for the current function. */
8822 static unsigned long
8823 arm_compute_save_reg_mask (void)
8825 unsigned int save_reg_mask = 0;
8826 unsigned long func_type = arm_current_func_type ();
8828 if (IS_NAKED (func_type))
8829 /* This should never really happen. */
8830 return 0;
8832 /* If we are creating a stack frame, then we must save the frame pointer,
8833 IP (which will hold the old stack pointer), LR and the PC. */
8834 if (frame_pointer_needed)
8835 save_reg_mask |=
8836 (1 << ARM_HARD_FRAME_POINTER_REGNUM)
8837 | (1 << IP_REGNUM)
8838 | (1 << LR_REGNUM)
8839 | (1 << PC_REGNUM);
8841 /* Volatile functions do not return, so there
8842 is no need to save any other registers. */
8843 if (IS_VOLATILE (func_type))
8844 return save_reg_mask;
8846 save_reg_mask |= arm_compute_save_reg0_reg12_mask ();
8848 /* Decide if we need to save the link register.
8849 Interrupt routines have their own banked link register,
8850 so they never need to save it.
8851 Otherwise if we do not use the link register we do not need to save
8852 it. If we are pushing other registers onto the stack however, we
8853 can save an instruction in the epilogue by pushing the link register
8854 now and then popping it back into the PC. This incurs extra memory
8855 accesses though, so we only do it when optimizing for size, and only
8856 if we know that we will not need a fancy return sequence. */
8857 if (regs_ever_live [LR_REGNUM]
8858 || (save_reg_mask
8859 && optimize_size
8860 && ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
8861 && !current_function_calls_eh_return))
8862 save_reg_mask |= 1 << LR_REGNUM;
8864 if (cfun->machine->lr_save_eliminated)
8865 save_reg_mask &= ~ (1 << LR_REGNUM);
8867 if (TARGET_REALLY_IWMMXT
8868 && ((bit_count (save_reg_mask)
8869 + ARM_NUM_INTS (current_function_pretend_args_size)) % 2) != 0)
8871 unsigned int reg;
8873 /* The total number of registers that are going to be pushed
8874 onto the stack is odd. We need to ensure that the stack
8875 is 64-bit aligned before we start to save iWMMXt registers,
8876 and also before we start to create locals. (A local variable
8877 might be a double or long long which we will load/store using
8878 an iWMMXt instruction). Therefore we need to push another
8879 ARM register, so that the stack will be 64-bit aligned. We
8880 try to avoid using the arg registers (r0 -r3) as they might be
8881 used to pass values in a tail call. */
8882 for (reg = 4; reg <= 12; reg++)
8883 if ((save_reg_mask & (1 << reg)) == 0)
8884 break;
8886 if (reg <= 12)
8887 save_reg_mask |= (1 << reg);
8888 else
8890 cfun->machine->sibcall_blocked = 1;
8891 save_reg_mask |= (1 << 3);
8895 return save_reg_mask;
8899 /* Compute a bit mask of which registers need to be
8900 saved on the stack for the current function. */
8901 static unsigned long
8902 thumb_compute_save_reg_mask (void)
8904 unsigned long mask;
8905 int reg;
8907 mask = 0;
8908 for (reg = 0; reg < 12; reg ++)
8910 if (regs_ever_live[reg] && !call_used_regs[reg])
8911 mask |= 1 << reg;
8914 if (flag_pic && !TARGET_SINGLE_PIC_BASE)
8915 mask |= (1 << PIC_OFFSET_TABLE_REGNUM);
8916 if (TARGET_SINGLE_PIC_BASE)
8917 mask &= ~(1 << arm_pic_register);
8918 /* See if we might need r11 for calls to _interwork_r11_call_via_rN(). */
8919 if (!frame_pointer_needed && CALLER_INTERWORKING_SLOT_SIZE > 0)
8920 mask |= 1 << ARM_HARD_FRAME_POINTER_REGNUM;
8922 /* lr will also be pushed if any lo regs are pushed. */
8923 if (mask & 0xff || thumb_force_lr_save ())
8924 mask |= (1 << LR_REGNUM);
8926 /* Make sure we have a low work register if we need one. */
8927 if (((mask & 0xff) == 0 && regs_ever_live[LAST_ARG_REGNUM])
8928 && ((mask & 0x0f00) || TARGET_BACKTRACE))
8929 mask |= 1 << LAST_LO_REGNUM;
8931 return mask;
8935 /* Return the number of bytes required to save VFP registers. */
8936 static int
8937 arm_get_vfp_saved_size (void)
8939 unsigned int regno;
8940 int count;
8941 int saved;
8943 saved = 0;
8944 /* Space for saved VFP registers. */
8945 if (TARGET_HARD_FLOAT && TARGET_VFP)
8947 count = 0;
8948 for (regno = FIRST_VFP_REGNUM;
8949 regno < LAST_VFP_REGNUM;
8950 regno += 2)
8952 if ((!regs_ever_live[regno] || call_used_regs[regno])
8953 && (!regs_ever_live[regno + 1] || call_used_regs[regno + 1]))
8955 if (count > 0)
8957 /* Workaround ARM10 VFPr1 bug. */
8958 if (count == 2 && !arm_arch6)
8959 count++;
8960 saved += count * 8 + 4;
8962 count = 0;
8964 else
8965 count++;
8967 if (count > 0)
8969 if (count == 2 && !arm_arch6)
8970 count++;
8971 saved += count * 8 + 4;
8974 return saved;
8978 /* Generate a function exit sequence. If REALLY_RETURN is false, then do
8979 everything bar the final return instruction. */
8980 const char *
8981 output_return_instruction (rtx operand, int really_return, int reverse)
8983 char conditional[10];
8984 char instr[100];
8985 int reg;
8986 unsigned long live_regs_mask;
8987 unsigned long func_type;
8988 arm_stack_offsets *offsets;
8990 func_type = arm_current_func_type ();
8992 if (IS_NAKED (func_type))
8993 return "";
8995 if (IS_VOLATILE (func_type) && TARGET_ABORT_NORETURN)
8997 /* If this function was declared non-returning, and we have
8998 found a tail call, then we have to trust that the called
8999 function won't return. */
9000 if (really_return)
9002 rtx ops[2];
9004 /* Otherwise, trap an attempted return by aborting. */
9005 ops[0] = operand;
9006 ops[1] = gen_rtx_SYMBOL_REF (Pmode, NEED_PLT_RELOC ? "abort(PLT)"
9007 : "abort");
9008 assemble_external_libcall (ops[1]);
9009 output_asm_insn (reverse ? "bl%D0\t%a1" : "bl%d0\t%a1", ops);
9012 return "";
9015 if (current_function_calls_alloca && !really_return)
9016 abort ();
9018 sprintf (conditional, "%%?%%%c0", reverse ? 'D' : 'd');
9020 return_used_this_function = 1;
9022 live_regs_mask = arm_compute_save_reg_mask ();
9024 if (live_regs_mask)
9026 const char * return_reg;
9028 /* If we do not have any special requirements for function exit
9029 (e.g. interworking, or ISR) then we can load the return address
9030 directly into the PC. Otherwise we must load it into LR. */
9031 if (really_return
9032 && ! TARGET_INTERWORK)
9033 return_reg = reg_names[PC_REGNUM];
9034 else
9035 return_reg = reg_names[LR_REGNUM];
9037 if ((live_regs_mask & (1 << IP_REGNUM)) == (1 << IP_REGNUM))
9039 /* There are three possible reasons for the IP register
9040 being saved. 1) a stack frame was created, in which case
9041 IP contains the old stack pointer, or 2) an ISR routine
9042 corrupted it, or 3) it was saved to align the stack on
9043 iWMMXt. In case 1, restore IP into SP, otherwise just
9044 restore IP. */
9045 if (frame_pointer_needed)
9047 live_regs_mask &= ~ (1 << IP_REGNUM);
9048 live_regs_mask |= (1 << SP_REGNUM);
9050 else
9052 if (! IS_INTERRUPT (func_type)
9053 && ! TARGET_REALLY_IWMMXT)
9054 abort ();
9058 /* On some ARM architectures it is faster to use LDR rather than
9059 LDM to load a single register. On other architectures, the
9060 cost is the same. In 26 bit mode, or for exception handlers,
9061 we have to use LDM to load the PC so that the CPSR is also
9062 restored. */
9063 for (reg = 0; reg <= LAST_ARM_REGNUM; reg++)
9065 if (live_regs_mask == (unsigned int)(1 << reg))
9066 break;
9068 if (reg <= LAST_ARM_REGNUM
9069 && (reg != LR_REGNUM
9070 || ! really_return
9071 || ! IS_INTERRUPT (func_type)))
9073 sprintf (instr, "ldr%s\t%%|%s, [%%|sp], #4", conditional,
9074 (reg == LR_REGNUM) ? return_reg : reg_names[reg]);
9076 else
9078 char *p;
9079 int first = 1;
9081 /* Generate the load multiple instruction to restore the
9082 registers. Note we can get here, even if
9083 frame_pointer_needed is true, but only if sp already
9084 points to the base of the saved core registers. */
9085 if (live_regs_mask & (1 << SP_REGNUM))
9087 unsigned HOST_WIDE_INT stack_adjust;
9089 offsets = arm_get_frame_offsets ();
9090 stack_adjust = offsets->outgoing_args - offsets->saved_regs;
9091 if (stack_adjust != 0 && stack_adjust != 4)
9092 abort ();
9094 if (stack_adjust && arm_arch5)
9095 sprintf (instr, "ldm%sib\t%%|sp, {", conditional);
9096 else
9098 /* If we can't use ldmib (SA110 bug), then try to pop r3
9099 instead. */
9100 if (stack_adjust)
9101 live_regs_mask |= 1 << 3;
9102 sprintf (instr, "ldm%sfd\t%%|sp, {", conditional);
9105 else
9106 sprintf (instr, "ldm%sfd\t%%|sp!, {", conditional);
9108 p = instr + strlen (instr);
9110 for (reg = 0; reg <= SP_REGNUM; reg++)
9111 if (live_regs_mask & (1 << reg))
9113 int l = strlen (reg_names[reg]);
9115 if (first)
9116 first = 0;
9117 else
9119 memcpy (p, ", ", 2);
9120 p += 2;
9123 memcpy (p, "%|", 2);
9124 memcpy (p + 2, reg_names[reg], l);
9125 p += l + 2;
9128 if (live_regs_mask & (1 << LR_REGNUM))
9130 sprintf (p, "%s%%|%s}", first ? "" : ", ", return_reg);
9131 /* If returning from an interrupt, restore the CPSR. */
9132 if (IS_INTERRUPT (func_type))
9133 strcat (p, "^");
9135 else
9136 strcpy (p, "}");
9139 output_asm_insn (instr, & operand);
9141 /* See if we need to generate an extra instruction to
9142 perform the actual function return. */
9143 if (really_return
9144 && func_type != ARM_FT_INTERWORKED
9145 && (live_regs_mask & (1 << LR_REGNUM)) != 0)
9147 /* The return has already been handled
9148 by loading the LR into the PC. */
9149 really_return = 0;
9153 if (really_return)
9155 switch ((int) ARM_FUNC_TYPE (func_type))
9157 case ARM_FT_ISR:
9158 case ARM_FT_FIQ:
9159 sprintf (instr, "sub%ss\t%%|pc, %%|lr, #4", conditional);
9160 break;
9162 case ARM_FT_INTERWORKED:
9163 sprintf (instr, "bx%s\t%%|lr", conditional);
9164 break;
9166 case ARM_FT_EXCEPTION:
9167 sprintf (instr, "mov%ss\t%%|pc, %%|lr", conditional);
9168 break;
9170 default:
9171 /* Use bx if it's available. */
9172 if (arm_arch5 || arm_arch4t)
9173 sprintf (instr, "bx%s\t%%|lr", conditional);
9174 else
9175 sprintf (instr, "mov%s\t%%|pc, %%|lr", conditional);
9176 break;
9179 output_asm_insn (instr, & operand);
9182 return "";
9185 /* Write the function name into the code section, directly preceding
9186 the function prologue.
9188 Code will be output similar to this:
9190 .ascii "arm_poke_function_name", 0
9191 .align
9193 .word 0xff000000 + (t1 - t0)
9194 arm_poke_function_name
9195 mov ip, sp
9196 stmfd sp!, {fp, ip, lr, pc}
9197 sub fp, ip, #4
9199 When performing a stack backtrace, code can inspect the value
9200 of 'pc' stored at 'fp' + 0. If the trace function then looks
9201 at location pc - 12 and the top 8 bits are set, then we know
9202 that there is a function name embedded immediately preceding this
9203 location and has length ((pc[-3]) & 0xff000000).
9205 We assume that pc is declared as a pointer to an unsigned long.
9207 It is of no benefit to output the function name if we are assembling
9208 a leaf function. These function types will not contain a stack
9209 backtrace structure, therefore it is not possible to determine the
9210 function name. */
9211 void
9212 arm_poke_function_name (FILE *stream, const char *name)
9214 unsigned long alignlength;
9215 unsigned long length;
9216 rtx x;
9218 length = strlen (name) + 1;
9219 alignlength = ROUND_UP_WORD (length);
9221 ASM_OUTPUT_ASCII (stream, name, length);
9222 ASM_OUTPUT_ALIGN (stream, 2);
9223 x = GEN_INT ((unsigned HOST_WIDE_INT) 0xff000000 + alignlength);
9224 assemble_aligned_integer (UNITS_PER_WORD, x);
9227 /* Place some comments into the assembler stream
9228 describing the current function. */
9229 static void
9230 arm_output_function_prologue (FILE *f, HOST_WIDE_INT frame_size)
9232 unsigned long func_type;
9234 if (!TARGET_ARM)
9236 thumb_output_function_prologue (f, frame_size);
9237 return;
9240 /* Sanity check. */
9241 if (arm_ccfsm_state || arm_target_insn)
9242 abort ();
9244 func_type = arm_current_func_type ();
9246 switch ((int) ARM_FUNC_TYPE (func_type))
9248 default:
9249 case ARM_FT_NORMAL:
9250 break;
9251 case ARM_FT_INTERWORKED:
9252 asm_fprintf (f, "\t%@ Function supports interworking.\n");
9253 break;
9254 case ARM_FT_ISR:
9255 asm_fprintf (f, "\t%@ Interrupt Service Routine.\n");
9256 break;
9257 case ARM_FT_FIQ:
9258 asm_fprintf (f, "\t%@ Fast Interrupt Service Routine.\n");
9259 break;
9260 case ARM_FT_EXCEPTION:
9261 asm_fprintf (f, "\t%@ ARM Exception Handler.\n");
9262 break;
9265 if (IS_NAKED (func_type))
9266 asm_fprintf (f, "\t%@ Naked Function: prologue and epilogue provided by programmer.\n");
9268 if (IS_VOLATILE (func_type))
9269 asm_fprintf (f, "\t%@ Volatile: function does not return.\n");
9271 if (IS_NESTED (func_type))
9272 asm_fprintf (f, "\t%@ Nested: function declared inside another function.\n");
9274 asm_fprintf (f, "\t%@ args = %d, pretend = %d, frame = %wd\n",
9275 current_function_args_size,
9276 current_function_pretend_args_size, frame_size);
9278 asm_fprintf (f, "\t%@ frame_needed = %d, uses_anonymous_args = %d\n",
9279 frame_pointer_needed,
9280 cfun->machine->uses_anonymous_args);
9282 if (cfun->machine->lr_save_eliminated)
9283 asm_fprintf (f, "\t%@ link register save eliminated.\n");
9285 if (current_function_calls_eh_return)
9286 asm_fprintf (f, "\t@ Calls __builtin_eh_return.\n");
9288 #ifdef AOF_ASSEMBLER
9289 if (flag_pic)
9290 asm_fprintf (f, "\tmov\t%r, %r\n", IP_REGNUM, PIC_OFFSET_TABLE_REGNUM);
9291 #endif
9293 return_used_this_function = 0;
9296 const char *
9297 arm_output_epilogue (rtx sibling)
9299 int reg;
9300 unsigned long saved_regs_mask;
9301 unsigned long func_type;
9302 /* Floats_offset is the offset from the "virtual" frame. In an APCS
9303 frame that is $fp + 4 for a non-variadic function. */
9304 int floats_offset = 0;
9305 rtx operands[3];
9306 FILE * f = asm_out_file;
9307 unsigned int lrm_count = 0;
9308 int really_return = (sibling == NULL);
9309 int start_reg;
9310 arm_stack_offsets *offsets;
9312 /* If we have already generated the return instruction
9313 then it is futile to generate anything else. */
9314 if (use_return_insn (FALSE, sibling) && return_used_this_function)
9315 return "";
9317 func_type = arm_current_func_type ();
9319 if (IS_NAKED (func_type))
9320 /* Naked functions don't have epilogues. */
9321 return "";
9323 if (IS_VOLATILE (func_type) && TARGET_ABORT_NORETURN)
9325 rtx op;
9327 /* A volatile function should never return. Call abort. */
9328 op = gen_rtx_SYMBOL_REF (Pmode, NEED_PLT_RELOC ? "abort(PLT)" : "abort");
9329 assemble_external_libcall (op);
9330 output_asm_insn ("bl\t%a0", &op);
9332 return "";
9335 if (current_function_calls_eh_return
9336 && ! really_return)
9337 /* If we are throwing an exception, then we really must
9338 be doing a return, so we can't tail-call. */
9339 abort ();
9341 offsets = arm_get_frame_offsets ();
9342 saved_regs_mask = arm_compute_save_reg_mask ();
9344 if (TARGET_IWMMXT)
9345 lrm_count = bit_count (saved_regs_mask);
9347 floats_offset = offsets->saved_args;
9348 /* Compute how far away the floats will be. */
9349 for (reg = 0; reg <= LAST_ARM_REGNUM; reg++)
9350 if (saved_regs_mask & (1 << reg))
9351 floats_offset += 4;
9353 if (frame_pointer_needed)
9355 /* This variable is for the Virtual Frame Pointer, not VFP regs. */
9356 int vfp_offset = offsets->frame;
9358 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
9360 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
9361 if (regs_ever_live[reg] && !call_used_regs[reg])
9363 floats_offset += 12;
9364 asm_fprintf (f, "\tldfe\t%r, [%r, #-%d]\n",
9365 reg, FP_REGNUM, floats_offset - vfp_offset);
9368 else
9370 start_reg = LAST_FPA_REGNUM;
9372 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
9374 if (regs_ever_live[reg] && !call_used_regs[reg])
9376 floats_offset += 12;
9378 /* We can't unstack more than four registers at once. */
9379 if (start_reg - reg == 3)
9381 asm_fprintf (f, "\tlfm\t%r, 4, [%r, #-%d]\n",
9382 reg, FP_REGNUM, floats_offset - vfp_offset);
9383 start_reg = reg - 1;
9386 else
9388 if (reg != start_reg)
9389 asm_fprintf (f, "\tlfm\t%r, %d, [%r, #-%d]\n",
9390 reg + 1, start_reg - reg,
9391 FP_REGNUM, floats_offset - vfp_offset);
9392 start_reg = reg - 1;
9396 /* Just in case the last register checked also needs unstacking. */
9397 if (reg != start_reg)
9398 asm_fprintf (f, "\tlfm\t%r, %d, [%r, #-%d]\n",
9399 reg + 1, start_reg - reg,
9400 FP_REGNUM, floats_offset - vfp_offset);
9403 if (TARGET_HARD_FLOAT && TARGET_VFP)
9405 int saved_size;
9407 /* The fldmx insn does not have base+offset addressing modes,
9408 so we use IP to hold the address. */
9409 saved_size = arm_get_vfp_saved_size ();
9411 if (saved_size > 0)
9413 floats_offset += saved_size;
9414 asm_fprintf (f, "\tsub\t%r, %r, #%d\n", IP_REGNUM,
9415 FP_REGNUM, floats_offset - vfp_offset);
9417 start_reg = FIRST_VFP_REGNUM;
9418 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
9420 if ((!regs_ever_live[reg] || call_used_regs[reg])
9421 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
9423 if (start_reg != reg)
9424 arm_output_fldmx (f, IP_REGNUM,
9425 (start_reg - FIRST_VFP_REGNUM) / 2,
9426 (reg - start_reg) / 2);
9427 start_reg = reg + 2;
9430 if (start_reg != reg)
9431 arm_output_fldmx (f, IP_REGNUM,
9432 (start_reg - FIRST_VFP_REGNUM) / 2,
9433 (reg - start_reg) / 2);
9436 if (TARGET_IWMMXT)
9438 /* The frame pointer is guaranteed to be non-double-word aligned.
9439 This is because it is set to (old_stack_pointer - 4) and the
9440 old_stack_pointer was double word aligned. Thus the offset to
9441 the iWMMXt registers to be loaded must also be non-double-word
9442 sized, so that the resultant address *is* double-word aligned.
9443 We can ignore floats_offset since that was already included in
9444 the live_regs_mask. */
9445 lrm_count += (lrm_count % 2 ? 2 : 1);
9447 for (reg = LAST_IWMMXT_REGNUM; reg >= FIRST_IWMMXT_REGNUM; reg--)
9448 if (regs_ever_live[reg] && !call_used_regs[reg])
9450 asm_fprintf (f, "\twldrd\t%r, [%r, #-%d]\n",
9451 reg, FP_REGNUM, lrm_count * 4);
9452 lrm_count += 2;
9456 /* saved_regs_mask should contain the IP, which at the time of stack
9457 frame generation actually contains the old stack pointer. So a
9458 quick way to unwind the stack is just pop the IP register directly
9459 into the stack pointer. */
9460 if ((saved_regs_mask & (1 << IP_REGNUM)) == 0)
9461 abort ();
9462 saved_regs_mask &= ~ (1 << IP_REGNUM);
9463 saved_regs_mask |= (1 << SP_REGNUM);
9465 /* There are two registers left in saved_regs_mask - LR and PC. We
9466 only need to restore the LR register (the return address), but to
9467 save time we can load it directly into the PC, unless we need a
9468 special function exit sequence, or we are not really returning. */
9469 if (really_return
9470 && ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
9471 && !current_function_calls_eh_return)
9472 /* Delete the LR from the register mask, so that the LR on
9473 the stack is loaded into the PC in the register mask. */
9474 saved_regs_mask &= ~ (1 << LR_REGNUM);
9475 else
9476 saved_regs_mask &= ~ (1 << PC_REGNUM);
9478 /* We must use SP as the base register, because SP is one of the
9479 registers being restored. If an interrupt or page fault
9480 happens in the ldm instruction, the SP might or might not
9481 have been restored. That would be bad, as then SP will no
9482 longer indicate the safe area of stack, and we can get stack
9483 corruption. Using SP as the base register means that it will
9484 be reset correctly to the original value, should an interrupt
9485 occur. If the stack pointer already points at the right
9486 place, then omit the subtraction. */
9487 if (offsets->outgoing_args != (1 + (int) bit_count (saved_regs_mask))
9488 || current_function_calls_alloca)
9489 asm_fprintf (f, "\tsub\t%r, %r, #%d\n", SP_REGNUM, FP_REGNUM,
9490 4 * bit_count (saved_regs_mask));
9491 print_multi_reg (f, "ldmfd\t%r", SP_REGNUM, saved_regs_mask);
9493 if (IS_INTERRUPT (func_type))
9494 /* Interrupt handlers will have pushed the
9495 IP onto the stack, so restore it now. */
9496 print_multi_reg (f, "ldmfd\t%r!", SP_REGNUM, 1 << IP_REGNUM);
9498 else
9500 /* Restore stack pointer if necessary. */
9501 if (offsets->outgoing_args != offsets->saved_regs)
9503 operands[0] = operands[1] = stack_pointer_rtx;
9504 operands[2] = GEN_INT (offsets->outgoing_args - offsets->saved_regs);
9505 output_add_immediate (operands);
9508 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
9510 for (reg = FIRST_FPA_REGNUM; reg <= LAST_FPA_REGNUM; reg++)
9511 if (regs_ever_live[reg] && !call_used_regs[reg])
9512 asm_fprintf (f, "\tldfe\t%r, [%r], #12\n",
9513 reg, SP_REGNUM);
9515 else
9517 start_reg = FIRST_FPA_REGNUM;
9519 for (reg = FIRST_FPA_REGNUM; reg <= LAST_FPA_REGNUM; reg++)
9521 if (regs_ever_live[reg] && !call_used_regs[reg])
9523 if (reg - start_reg == 3)
9525 asm_fprintf (f, "\tlfmfd\t%r, 4, [%r]!\n",
9526 start_reg, SP_REGNUM);
9527 start_reg = reg + 1;
9530 else
9532 if (reg != start_reg)
9533 asm_fprintf (f, "\tlfmfd\t%r, %d, [%r]!\n",
9534 start_reg, reg - start_reg,
9535 SP_REGNUM);
9537 start_reg = reg + 1;
9541 /* Just in case the last register checked also needs unstacking. */
9542 if (reg != start_reg)
9543 asm_fprintf (f, "\tlfmfd\t%r, %d, [%r]!\n",
9544 start_reg, reg - start_reg, SP_REGNUM);
9547 if (TARGET_HARD_FLOAT && TARGET_VFP)
9549 start_reg = FIRST_VFP_REGNUM;
9550 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
9552 if ((!regs_ever_live[reg] || call_used_regs[reg])
9553 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
9555 if (start_reg != reg)
9556 arm_output_fldmx (f, SP_REGNUM,
9557 (start_reg - FIRST_VFP_REGNUM) / 2,
9558 (reg - start_reg) / 2);
9559 start_reg = reg + 2;
9562 if (start_reg != reg)
9563 arm_output_fldmx (f, SP_REGNUM,
9564 (start_reg - FIRST_VFP_REGNUM) / 2,
9565 (reg - start_reg) / 2);
9567 if (TARGET_IWMMXT)
9568 for (reg = FIRST_IWMMXT_REGNUM; reg <= LAST_IWMMXT_REGNUM; reg++)
9569 if (regs_ever_live[reg] && !call_used_regs[reg])
9570 asm_fprintf (f, "\twldrd\t%r, [%r], #8\n", reg, SP_REGNUM);
9572 /* If we can, restore the LR into the PC. */
9573 if (ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
9574 && really_return
9575 && current_function_pretend_args_size == 0
9576 && saved_regs_mask & (1 << LR_REGNUM)
9577 && !current_function_calls_eh_return)
9579 saved_regs_mask &= ~ (1 << LR_REGNUM);
9580 saved_regs_mask |= (1 << PC_REGNUM);
9583 /* Load the registers off the stack. If we only have one register
9584 to load use the LDR instruction - it is faster. */
9585 if (saved_regs_mask == (1 << LR_REGNUM))
9587 asm_fprintf (f, "\tldr\t%r, [%r], #4\n", LR_REGNUM, SP_REGNUM);
9589 else if (saved_regs_mask)
9591 if (saved_regs_mask & (1 << SP_REGNUM))
9592 /* Note - write back to the stack register is not enabled
9593 (i.e. "ldmfd sp!..."). We know that the stack pointer is
9594 in the list of registers and if we add writeback the
9595 instruction becomes UNPREDICTABLE. */
9596 print_multi_reg (f, "ldmfd\t%r", SP_REGNUM, saved_regs_mask);
9597 else
9598 print_multi_reg (f, "ldmfd\t%r!", SP_REGNUM, saved_regs_mask);
9601 if (current_function_pretend_args_size)
9603 /* Unwind the pre-pushed regs. */
9604 operands[0] = operands[1] = stack_pointer_rtx;
9605 operands[2] = GEN_INT (current_function_pretend_args_size);
9606 output_add_immediate (operands);
9610 /* We may have already restored PC directly from the stack. */
9611 if (!really_return || saved_regs_mask & (1 << PC_REGNUM))
9612 return "";
9614 /* Stack adjustment for exception handler. */
9615 if (current_function_calls_eh_return)
9616 asm_fprintf (f, "\tadd\t%r, %r, %r\n", SP_REGNUM, SP_REGNUM,
9617 ARM_EH_STACKADJ_REGNUM);
9619 /* Generate the return instruction. */
9620 switch ((int) ARM_FUNC_TYPE (func_type))
9622 case ARM_FT_ISR:
9623 case ARM_FT_FIQ:
9624 asm_fprintf (f, "\tsubs\t%r, %r, #4\n", PC_REGNUM, LR_REGNUM);
9625 break;
9627 case ARM_FT_EXCEPTION:
9628 asm_fprintf (f, "\tmovs\t%r, %r\n", PC_REGNUM, LR_REGNUM);
9629 break;
9631 case ARM_FT_INTERWORKED:
9632 asm_fprintf (f, "\tbx\t%r\n", LR_REGNUM);
9633 break;
9635 default:
9636 if (arm_arch5 || arm_arch4t)
9637 asm_fprintf (f, "\tbx\t%r\n", LR_REGNUM);
9638 else
9639 asm_fprintf (f, "\tmov\t%r, %r\n", PC_REGNUM, LR_REGNUM);
9640 break;
9643 return "";
9646 static void
9647 arm_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
9648 HOST_WIDE_INT frame_size ATTRIBUTE_UNUSED)
9650 arm_stack_offsets *offsets;
9652 if (TARGET_THUMB)
9654 int regno;
9656 /* Emit any call-via-reg trampolines that are needed for v4t support
9657 of call_reg and call_value_reg type insns. */
9658 for (regno = 0; regno < SP_REGNUM; regno++)
9660 rtx label = cfun->machine->call_via[regno];
9662 if (label != NULL)
9664 function_section (current_function_decl);
9665 targetm.asm_out.internal_label (asm_out_file, "L",
9666 CODE_LABEL_NUMBER (label));
9667 asm_fprintf (asm_out_file, "\tbx\t%r\n", regno);
9671 /* ??? Probably not safe to set this here, since it assumes that a
9672 function will be emitted as assembly immediately after we generate
9673 RTL for it. This does not happen for inline functions. */
9674 return_used_this_function = 0;
9676 else
9678 /* We need to take into account any stack-frame rounding. */
9679 offsets = arm_get_frame_offsets ();
9681 if (use_return_insn (FALSE, NULL)
9682 && return_used_this_function
9683 && offsets->saved_regs != offsets->outgoing_args
9684 && !frame_pointer_needed)
9685 abort ();
9687 /* Reset the ARM-specific per-function variables. */
9688 after_arm_reorg = 0;
9692 /* Generate and emit an insn that we will recognize as a push_multi.
9693 Unfortunately, since this insn does not reflect very well the actual
9694 semantics of the operation, we need to annotate the insn for the benefit
9695 of DWARF2 frame unwind information. */
9696 static rtx
9697 emit_multi_reg_push (int mask)
9699 int num_regs = 0;
9700 int num_dwarf_regs;
9701 int i, j;
9702 rtx par;
9703 rtx dwarf;
9704 int dwarf_par_index;
9705 rtx tmp, reg;
9707 for (i = 0; i <= LAST_ARM_REGNUM; i++)
9708 if (mask & (1 << i))
9709 num_regs++;
9711 if (num_regs == 0 || num_regs > 16)
9712 abort ();
9714 /* We don't record the PC in the dwarf frame information. */
9715 num_dwarf_regs = num_regs;
9716 if (mask & (1 << PC_REGNUM))
9717 num_dwarf_regs--;
9719 /* For the body of the insn we are going to generate an UNSPEC in
9720 parallel with several USEs. This allows the insn to be recognized
9721 by the push_multi pattern in the arm.md file. The insn looks
9722 something like this:
9724 (parallel [
9725 (set (mem:BLK (pre_dec:BLK (reg:SI sp)))
9726 (unspec:BLK [(reg:SI r4)] UNSPEC_PUSH_MULT))
9727 (use (reg:SI 11 fp))
9728 (use (reg:SI 12 ip))
9729 (use (reg:SI 14 lr))
9730 (use (reg:SI 15 pc))
9733 For the frame note however, we try to be more explicit and actually
9734 show each register being stored into the stack frame, plus a (single)
9735 decrement of the stack pointer. We do it this way in order to be
9736 friendly to the stack unwinding code, which only wants to see a single
9737 stack decrement per instruction. The RTL we generate for the note looks
9738 something like this:
9740 (sequence [
9741 (set (reg:SI sp) (plus:SI (reg:SI sp) (const_int -20)))
9742 (set (mem:SI (reg:SI sp)) (reg:SI r4))
9743 (set (mem:SI (plus:SI (reg:SI sp) (const_int 4))) (reg:SI fp))
9744 (set (mem:SI (plus:SI (reg:SI sp) (const_int 8))) (reg:SI ip))
9745 (set (mem:SI (plus:SI (reg:SI sp) (const_int 12))) (reg:SI lr))
9748 This sequence is used both by the code to support stack unwinding for
9749 exceptions handlers and the code to generate dwarf2 frame debugging. */
9751 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (num_regs));
9752 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (num_dwarf_regs + 1));
9753 dwarf_par_index = 1;
9755 for (i = 0; i <= LAST_ARM_REGNUM; i++)
9757 if (mask & (1 << i))
9759 reg = gen_rtx_REG (SImode, i);
9761 XVECEXP (par, 0, 0)
9762 = gen_rtx_SET (VOIDmode,
9763 gen_rtx_MEM (BLKmode,
9764 gen_rtx_PRE_DEC (BLKmode,
9765 stack_pointer_rtx)),
9766 gen_rtx_UNSPEC (BLKmode,
9767 gen_rtvec (1, reg),
9768 UNSPEC_PUSH_MULT));
9770 if (i != PC_REGNUM)
9772 tmp = gen_rtx_SET (VOIDmode,
9773 gen_rtx_MEM (SImode, stack_pointer_rtx),
9774 reg);
9775 RTX_FRAME_RELATED_P (tmp) = 1;
9776 XVECEXP (dwarf, 0, dwarf_par_index) = tmp;
9777 dwarf_par_index++;
9780 break;
9784 for (j = 1, i++; j < num_regs; i++)
9786 if (mask & (1 << i))
9788 reg = gen_rtx_REG (SImode, i);
9790 XVECEXP (par, 0, j) = gen_rtx_USE (VOIDmode, reg);
9792 if (i != PC_REGNUM)
9794 tmp = gen_rtx_SET (VOIDmode,
9795 gen_rtx_MEM (SImode,
9796 plus_constant (stack_pointer_rtx,
9797 4 * j)),
9798 reg);
9799 RTX_FRAME_RELATED_P (tmp) = 1;
9800 XVECEXP (dwarf, 0, dwarf_par_index++) = tmp;
9803 j++;
9807 par = emit_insn (par);
9809 tmp = gen_rtx_SET (SImode,
9810 stack_pointer_rtx,
9811 gen_rtx_PLUS (SImode,
9812 stack_pointer_rtx,
9813 GEN_INT (-4 * num_regs)));
9814 RTX_FRAME_RELATED_P (tmp) = 1;
9815 XVECEXP (dwarf, 0, 0) = tmp;
9817 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
9818 REG_NOTES (par));
9819 return par;
9822 static rtx
9823 emit_sfm (int base_reg, int count)
9825 rtx par;
9826 rtx dwarf;
9827 rtx tmp, reg;
9828 int i;
9830 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
9831 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (count + 1));
9833 reg = gen_rtx_REG (XFmode, base_reg++);
9835 XVECEXP (par, 0, 0)
9836 = gen_rtx_SET (VOIDmode,
9837 gen_rtx_MEM (BLKmode,
9838 gen_rtx_PRE_DEC (BLKmode, stack_pointer_rtx)),
9839 gen_rtx_UNSPEC (BLKmode,
9840 gen_rtvec (1, reg),
9841 UNSPEC_PUSH_MULT));
9842 tmp = gen_rtx_SET (VOIDmode,
9843 gen_rtx_MEM (XFmode, stack_pointer_rtx), reg);
9844 RTX_FRAME_RELATED_P (tmp) = 1;
9845 XVECEXP (dwarf, 0, 1) = tmp;
9847 for (i = 1; i < count; i++)
9849 reg = gen_rtx_REG (XFmode, base_reg++);
9850 XVECEXP (par, 0, i) = gen_rtx_USE (VOIDmode, reg);
9852 tmp = gen_rtx_SET (VOIDmode,
9853 gen_rtx_MEM (XFmode,
9854 plus_constant (stack_pointer_rtx,
9855 i * 12)),
9856 reg);
9857 RTX_FRAME_RELATED_P (tmp) = 1;
9858 XVECEXP (dwarf, 0, i + 1) = tmp;
9861 tmp = gen_rtx_SET (VOIDmode,
9862 stack_pointer_rtx,
9863 gen_rtx_PLUS (SImode,
9864 stack_pointer_rtx,
9865 GEN_INT (-12 * count)));
9866 RTX_FRAME_RELATED_P (tmp) = 1;
9867 XVECEXP (dwarf, 0, 0) = tmp;
9869 par = emit_insn (par);
9870 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
9871 REG_NOTES (par));
9872 return par;
9876 /* Return true if the current function needs to save/restore LR. */
9878 static bool
9879 thumb_force_lr_save (void)
9881 return !cfun->machine->lr_save_eliminated
9882 && (!leaf_function_p ()
9883 || thumb_far_jump_used_p ()
9884 || regs_ever_live [LR_REGNUM]);
9888 /* Compute the distance from register FROM to register TO.
9889 These can be the arg pointer (26), the soft frame pointer (25),
9890 the stack pointer (13) or the hard frame pointer (11).
9891 In thumb mode r7 is used as the soft frame pointer, if needed.
9892 Typical stack layout looks like this:
9894 old stack pointer -> | |
9895 ----
9896 | | \
9897 | | saved arguments for
9898 | | vararg functions
9899 | | /
9901 hard FP & arg pointer -> | | \
9902 | | stack
9903 | | frame
9904 | | /
9906 | | \
9907 | | call saved
9908 | | registers
9909 soft frame pointer -> | | /
9911 | | \
9912 | | local
9913 | | variables
9914 | | /
9916 | | \
9917 | | outgoing
9918 | | arguments
9919 current stack pointer -> | | /
9922 For a given function some or all of these stack components
9923 may not be needed, giving rise to the possibility of
9924 eliminating some of the registers.
9926 The values returned by this function must reflect the behavior
9927 of arm_expand_prologue() and arm_compute_save_reg_mask().
9929 The sign of the number returned reflects the direction of stack
9930 growth, so the values are positive for all eliminations except
9931 from the soft frame pointer to the hard frame pointer.
9933 SFP may point just inside the local variables block to ensure correct
9934 alignment. */
9937 /* Calculate stack offsets. These are used to calculate register elimination
9938 offsets and in prologue/epilogue code. */
9940 static arm_stack_offsets *
9941 arm_get_frame_offsets (void)
9943 struct arm_stack_offsets *offsets;
9944 unsigned long func_type;
9945 int leaf;
9946 int saved;
9947 HOST_WIDE_INT frame_size;
9949 offsets = &cfun->machine->stack_offsets;
9951 /* We need to know if we are a leaf function. Unfortunately, it
9952 is possible to be called after start_sequence has been called,
9953 which causes get_insns to return the insns for the sequence,
9954 not the function, which will cause leaf_function_p to return
9955 the incorrect result.
9957 to know about leaf functions once reload has completed, and the
9958 frame size cannot be changed after that time, so we can safely
9959 use the cached value. */
9961 if (reload_completed)
9962 return offsets;
9964 /* Initially this is the size of the local variables. It will translated
9965 into an offset once we have determined the size of preceding data. */
9966 frame_size = ROUND_UP_WORD (get_frame_size ());
9968 leaf = leaf_function_p ();
9970 /* Space for variadic functions. */
9971 offsets->saved_args = current_function_pretend_args_size;
9973 offsets->frame = offsets->saved_args + (frame_pointer_needed ? 4 : 0);
9975 if (TARGET_ARM)
9977 unsigned int regno;
9979 saved = bit_count (arm_compute_save_reg_mask ()) * 4;
9981 /* We know that SP will be doubleword aligned on entry, and we must
9982 preserve that condition at any subroutine call. We also require the
9983 soft frame pointer to be doubleword aligned. */
9985 if (TARGET_REALLY_IWMMXT)
9987 /* Check for the call-saved iWMMXt registers. */
9988 for (regno = FIRST_IWMMXT_REGNUM;
9989 regno <= LAST_IWMMXT_REGNUM;
9990 regno++)
9991 if (regs_ever_live [regno] && ! call_used_regs [regno])
9992 saved += 8;
9995 func_type = arm_current_func_type ();
9996 if (! IS_VOLATILE (func_type))
9998 /* Space for saved FPA registers. */
9999 for (regno = FIRST_FPA_REGNUM; regno <= LAST_FPA_REGNUM; regno++)
10000 if (regs_ever_live[regno] && ! call_used_regs[regno])
10001 saved += 12;
10003 /* Space for saved VFP registers. */
10004 if (TARGET_HARD_FLOAT && TARGET_VFP)
10005 saved += arm_get_vfp_saved_size ();
10008 else /* TARGET_THUMB */
10010 saved = bit_count (thumb_compute_save_reg_mask ()) * 4;
10011 if (TARGET_BACKTRACE)
10012 saved += 16;
10015 /* Saved registers include the stack frame. */
10016 offsets->saved_regs = offsets->saved_args + saved;
10017 offsets->soft_frame = offsets->saved_regs + CALLER_INTERWORKING_SLOT_SIZE;
10018 /* A leaf function does not need any stack alignment if it has nothing
10019 on the stack. */
10020 if (leaf && frame_size == 0)
10022 offsets->outgoing_args = offsets->soft_frame;
10023 return offsets;
10026 /* Ensure SFP has the correct alignment. */
10027 if (ARM_DOUBLEWORD_ALIGN
10028 && (offsets->soft_frame & 7))
10029 offsets->soft_frame += 4;
10031 offsets->outgoing_args = offsets->soft_frame + frame_size
10032 + current_function_outgoing_args_size;
10034 if (ARM_DOUBLEWORD_ALIGN)
10036 /* Ensure SP remains doubleword aligned. */
10037 if (offsets->outgoing_args & 7)
10038 offsets->outgoing_args += 4;
10039 if (offsets->outgoing_args & 7)
10040 abort ();
10043 return offsets;
10047 /* Calculate the relative offsets for the different stack pointers. Positive
10048 offsets are in the direction of stack growth. */
10050 HOST_WIDE_INT
10051 arm_compute_initial_elimination_offset (unsigned int from, unsigned int to)
10053 arm_stack_offsets *offsets;
10055 offsets = arm_get_frame_offsets ();
10057 /* OK, now we have enough information to compute the distances.
10058 There must be an entry in these switch tables for each pair
10059 of registers in ELIMINABLE_REGS, even if some of the entries
10060 seem to be redundant or useless. */
10061 switch (from)
10063 case ARG_POINTER_REGNUM:
10064 switch (to)
10066 case THUMB_HARD_FRAME_POINTER_REGNUM:
10067 return 0;
10069 case FRAME_POINTER_REGNUM:
10070 /* This is the reverse of the soft frame pointer
10071 to hard frame pointer elimination below. */
10072 return offsets->soft_frame - offsets->saved_args;
10074 case ARM_HARD_FRAME_POINTER_REGNUM:
10075 /* If there is no stack frame then the hard
10076 frame pointer and the arg pointer coincide. */
10077 if (offsets->frame == offsets->saved_regs)
10078 return 0;
10079 /* FIXME: Not sure about this. Maybe we should always return 0 ? */
10080 return (frame_pointer_needed
10081 && cfun->static_chain_decl != NULL
10082 && ! cfun->machine->uses_anonymous_args) ? 4 : 0;
10084 case STACK_POINTER_REGNUM:
10085 /* If nothing has been pushed on the stack at all
10086 then this will return -4. This *is* correct! */
10087 return offsets->outgoing_args - (offsets->saved_args + 4);
10089 default:
10090 abort ();
10092 break;
10094 case FRAME_POINTER_REGNUM:
10095 switch (to)
10097 case THUMB_HARD_FRAME_POINTER_REGNUM:
10098 return 0;
10100 case ARM_HARD_FRAME_POINTER_REGNUM:
10101 /* The hard frame pointer points to the top entry in the
10102 stack frame. The soft frame pointer to the bottom entry
10103 in the stack frame. If there is no stack frame at all,
10104 then they are identical. */
10106 return offsets->frame - offsets->soft_frame;
10108 case STACK_POINTER_REGNUM:
10109 return offsets->outgoing_args - offsets->soft_frame;
10111 default:
10112 abort ();
10114 break;
10116 default:
10117 /* You cannot eliminate from the stack pointer.
10118 In theory you could eliminate from the hard frame
10119 pointer to the stack pointer, but this will never
10120 happen, since if a stack frame is not needed the
10121 hard frame pointer will never be used. */
10122 abort ();
10127 /* Generate the prologue instructions for entry into an ARM function. */
10128 void
10129 arm_expand_prologue (void)
10131 int reg;
10132 rtx amount;
10133 rtx insn;
10134 rtx ip_rtx;
10135 unsigned long live_regs_mask;
10136 unsigned long func_type;
10137 int fp_offset = 0;
10138 int saved_pretend_args = 0;
10139 int saved_regs = 0;
10140 unsigned HOST_WIDE_INT args_to_push;
10141 arm_stack_offsets *offsets;
10143 func_type = arm_current_func_type ();
10145 /* Naked functions don't have prologues. */
10146 if (IS_NAKED (func_type))
10147 return;
10149 /* Make a copy of c_f_p_a_s as we may need to modify it locally. */
10150 args_to_push = current_function_pretend_args_size;
10152 /* Compute which register we will have to save onto the stack. */
10153 live_regs_mask = arm_compute_save_reg_mask ();
10155 ip_rtx = gen_rtx_REG (SImode, IP_REGNUM);
10157 if (frame_pointer_needed)
10159 if (IS_INTERRUPT (func_type))
10161 /* Interrupt functions must not corrupt any registers.
10162 Creating a frame pointer however, corrupts the IP
10163 register, so we must push it first. */
10164 insn = emit_multi_reg_push (1 << IP_REGNUM);
10166 /* Do not set RTX_FRAME_RELATED_P on this insn.
10167 The dwarf stack unwinding code only wants to see one
10168 stack decrement per function, and this is not it. If
10169 this instruction is labeled as being part of the frame
10170 creation sequence then dwarf2out_frame_debug_expr will
10171 abort when it encounters the assignment of IP to FP
10172 later on, since the use of SP here establishes SP as
10173 the CFA register and not IP.
10175 Anyway this instruction is not really part of the stack
10176 frame creation although it is part of the prologue. */
10178 else if (IS_NESTED (func_type))
10180 /* The Static chain register is the same as the IP register
10181 used as a scratch register during stack frame creation.
10182 To get around this need to find somewhere to store IP
10183 whilst the frame is being created. We try the following
10184 places in order:
10186 1. The last argument register.
10187 2. A slot on the stack above the frame. (This only
10188 works if the function is not a varargs function).
10189 3. Register r3, after pushing the argument registers
10190 onto the stack.
10192 Note - we only need to tell the dwarf2 backend about the SP
10193 adjustment in the second variant; the static chain register
10194 doesn't need to be unwound, as it doesn't contain a value
10195 inherited from the caller. */
10197 if (regs_ever_live[3] == 0)
10199 insn = gen_rtx_REG (SImode, 3);
10200 insn = gen_rtx_SET (SImode, insn, ip_rtx);
10201 insn = emit_insn (insn);
10203 else if (args_to_push == 0)
10205 rtx dwarf;
10206 insn = gen_rtx_PRE_DEC (SImode, stack_pointer_rtx);
10207 insn = gen_rtx_MEM (SImode, insn);
10208 insn = gen_rtx_SET (VOIDmode, insn, ip_rtx);
10209 insn = emit_insn (insn);
10211 fp_offset = 4;
10213 /* Just tell the dwarf backend that we adjusted SP. */
10214 dwarf = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
10215 gen_rtx_PLUS (SImode, stack_pointer_rtx,
10216 GEN_INT (-fp_offset)));
10217 RTX_FRAME_RELATED_P (insn) = 1;
10218 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
10219 dwarf, REG_NOTES (insn));
10221 else
10223 /* Store the args on the stack. */
10224 if (cfun->machine->uses_anonymous_args)
10225 insn = emit_multi_reg_push
10226 ((0xf0 >> (args_to_push / 4)) & 0xf);
10227 else
10228 insn = emit_insn
10229 (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10230 GEN_INT (- args_to_push)));
10232 RTX_FRAME_RELATED_P (insn) = 1;
10234 saved_pretend_args = 1;
10235 fp_offset = args_to_push;
10236 args_to_push = 0;
10238 /* Now reuse r3 to preserve IP. */
10239 insn = gen_rtx_REG (SImode, 3);
10240 insn = gen_rtx_SET (SImode, insn, ip_rtx);
10241 (void) emit_insn (insn);
10245 if (fp_offset)
10247 insn = gen_rtx_PLUS (SImode, stack_pointer_rtx, GEN_INT (fp_offset));
10248 insn = gen_rtx_SET (SImode, ip_rtx, insn);
10250 else
10251 insn = gen_movsi (ip_rtx, stack_pointer_rtx);
10253 insn = emit_insn (insn);
10254 RTX_FRAME_RELATED_P (insn) = 1;
10257 if (args_to_push)
10259 /* Push the argument registers, or reserve space for them. */
10260 if (cfun->machine->uses_anonymous_args)
10261 insn = emit_multi_reg_push
10262 ((0xf0 >> (args_to_push / 4)) & 0xf);
10263 else
10264 insn = emit_insn
10265 (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10266 GEN_INT (- args_to_push)));
10267 RTX_FRAME_RELATED_P (insn) = 1;
10270 /* If this is an interrupt service routine, and the link register
10271 is going to be pushed, and we are not creating a stack frame,
10272 (which would involve an extra push of IP and a pop in the epilogue)
10273 subtracting four from LR now will mean that the function return
10274 can be done with a single instruction. */
10275 if ((func_type == ARM_FT_ISR || func_type == ARM_FT_FIQ)
10276 && (live_regs_mask & (1 << LR_REGNUM)) != 0
10277 && ! frame_pointer_needed)
10278 emit_insn (gen_rtx_SET (SImode,
10279 gen_rtx_REG (SImode, LR_REGNUM),
10280 gen_rtx_PLUS (SImode,
10281 gen_rtx_REG (SImode, LR_REGNUM),
10282 GEN_INT (-4))));
10284 if (live_regs_mask)
10286 insn = emit_multi_reg_push (live_regs_mask);
10287 saved_regs += bit_count (live_regs_mask) * 4;
10288 RTX_FRAME_RELATED_P (insn) = 1;
10291 if (TARGET_IWMMXT)
10292 for (reg = LAST_IWMMXT_REGNUM; reg >= FIRST_IWMMXT_REGNUM; reg--)
10293 if (regs_ever_live[reg] && ! call_used_regs [reg])
10295 insn = gen_rtx_PRE_DEC (V2SImode, stack_pointer_rtx);
10296 insn = gen_rtx_MEM (V2SImode, insn);
10297 insn = emit_insn (gen_rtx_SET (VOIDmode, insn,
10298 gen_rtx_REG (V2SImode, reg)));
10299 RTX_FRAME_RELATED_P (insn) = 1;
10300 saved_regs += 8;
10303 if (! IS_VOLATILE (func_type))
10305 int start_reg;
10307 /* Save any floating point call-saved registers used by this
10308 function. */
10309 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
10311 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
10312 if (regs_ever_live[reg] && !call_used_regs[reg])
10314 insn = gen_rtx_PRE_DEC (XFmode, stack_pointer_rtx);
10315 insn = gen_rtx_MEM (XFmode, insn);
10316 insn = emit_insn (gen_rtx_SET (VOIDmode, insn,
10317 gen_rtx_REG (XFmode, reg)));
10318 RTX_FRAME_RELATED_P (insn) = 1;
10319 saved_regs += 12;
10322 else
10324 start_reg = LAST_FPA_REGNUM;
10326 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
10328 if (regs_ever_live[reg] && !call_used_regs[reg])
10330 if (start_reg - reg == 3)
10332 insn = emit_sfm (reg, 4);
10333 RTX_FRAME_RELATED_P (insn) = 1;
10334 saved_regs += 48;
10335 start_reg = reg - 1;
10338 else
10340 if (start_reg != reg)
10342 insn = emit_sfm (reg + 1, start_reg - reg);
10343 RTX_FRAME_RELATED_P (insn) = 1;
10344 saved_regs += (start_reg - reg) * 12;
10346 start_reg = reg - 1;
10350 if (start_reg != reg)
10352 insn = emit_sfm (reg + 1, start_reg - reg);
10353 saved_regs += (start_reg - reg) * 12;
10354 RTX_FRAME_RELATED_P (insn) = 1;
10357 if (TARGET_HARD_FLOAT && TARGET_VFP)
10359 start_reg = FIRST_VFP_REGNUM;
10361 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
10363 if ((!regs_ever_live[reg] || call_used_regs[reg])
10364 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
10366 if (start_reg != reg)
10367 saved_regs += vfp_emit_fstmx (start_reg,
10368 (reg - start_reg) / 2);
10369 start_reg = reg + 2;
10372 if (start_reg != reg)
10373 saved_regs += vfp_emit_fstmx (start_reg,
10374 (reg - start_reg) / 2);
10378 if (frame_pointer_needed)
10380 /* Create the new frame pointer. */
10381 insn = GEN_INT (-(4 + args_to_push + fp_offset));
10382 insn = emit_insn (gen_addsi3 (hard_frame_pointer_rtx, ip_rtx, insn));
10383 RTX_FRAME_RELATED_P (insn) = 1;
10385 if (IS_NESTED (func_type))
10387 /* Recover the static chain register. */
10388 if (regs_ever_live [3] == 0
10389 || saved_pretend_args)
10390 insn = gen_rtx_REG (SImode, 3);
10391 else /* if (current_function_pretend_args_size == 0) */
10393 insn = gen_rtx_PLUS (SImode, hard_frame_pointer_rtx,
10394 GEN_INT (4));
10395 insn = gen_rtx_MEM (SImode, insn);
10398 emit_insn (gen_rtx_SET (SImode, ip_rtx, insn));
10399 /* Add a USE to stop propagate_one_insn() from barfing. */
10400 emit_insn (gen_prologue_use (ip_rtx));
10404 offsets = arm_get_frame_offsets ();
10405 if (offsets->outgoing_args != offsets->saved_args + saved_regs)
10407 /* This add can produce multiple insns for a large constant, so we
10408 need to get tricky. */
10409 rtx last = get_last_insn ();
10411 amount = GEN_INT (offsets->saved_args + saved_regs
10412 - offsets->outgoing_args);
10414 insn = emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10415 amount));
10418 last = last ? NEXT_INSN (last) : get_insns ();
10419 RTX_FRAME_RELATED_P (last) = 1;
10421 while (last != insn);
10423 /* If the frame pointer is needed, emit a special barrier that
10424 will prevent the scheduler from moving stores to the frame
10425 before the stack adjustment. */
10426 if (frame_pointer_needed)
10427 insn = emit_insn (gen_stack_tie (stack_pointer_rtx,
10428 hard_frame_pointer_rtx));
10432 if (flag_pic)
10433 arm_load_pic_register (INVALID_REGNUM);
10435 /* If we are profiling, make sure no instructions are scheduled before
10436 the call to mcount. Similarly if the user has requested no
10437 scheduling in the prolog. */
10438 if (current_function_profile || TARGET_NO_SCHED_PRO)
10439 emit_insn (gen_blockage ());
10441 /* If the link register is being kept alive, with the return address in it,
10442 then make sure that it does not get reused by the ce2 pass. */
10443 if ((live_regs_mask & (1 << LR_REGNUM)) == 0)
10445 emit_insn (gen_prologue_use (gen_rtx_REG (SImode, LR_REGNUM)));
10446 cfun->machine->lr_save_eliminated = 1;
10450 /* If CODE is 'd', then the X is a condition operand and the instruction
10451 should only be executed if the condition is true.
10452 if CODE is 'D', then the X is a condition operand and the instruction
10453 should only be executed if the condition is false: however, if the mode
10454 of the comparison is CCFPEmode, then always execute the instruction -- we
10455 do this because in these circumstances !GE does not necessarily imply LT;
10456 in these cases the instruction pattern will take care to make sure that
10457 an instruction containing %d will follow, thereby undoing the effects of
10458 doing this instruction unconditionally.
10459 If CODE is 'N' then X is a floating point operand that must be negated
10460 before output.
10461 If CODE is 'B' then output a bitwise inverted value of X (a const int).
10462 If X is a REG and CODE is `M', output a ldm/stm style multi-reg. */
10463 void
10464 arm_print_operand (FILE *stream, rtx x, int code)
10466 switch (code)
10468 case '@':
10469 fputs (ASM_COMMENT_START, stream);
10470 return;
10472 case '_':
10473 fputs (user_label_prefix, stream);
10474 return;
10476 case '|':
10477 fputs (REGISTER_PREFIX, stream);
10478 return;
10480 case '?':
10481 if (arm_ccfsm_state == 3 || arm_ccfsm_state == 4)
10483 if (TARGET_THUMB)
10485 output_operand_lossage ("predicated Thumb instruction");
10486 break;
10488 if (current_insn_predicate != NULL)
10490 output_operand_lossage
10491 ("predicated instruction in conditional sequence");
10492 break;
10495 fputs (arm_condition_codes[arm_current_cc], stream);
10497 else if (current_insn_predicate)
10499 enum arm_cond_code code;
10501 if (TARGET_THUMB)
10503 output_operand_lossage ("predicated Thumb instruction");
10504 break;
10507 code = get_arm_condition_code (current_insn_predicate);
10508 fputs (arm_condition_codes[code], stream);
10510 return;
10512 case 'N':
10514 REAL_VALUE_TYPE r;
10515 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
10516 r = REAL_VALUE_NEGATE (r);
10517 fprintf (stream, "%s", fp_const_from_val (&r));
10519 return;
10521 case 'B':
10522 if (GET_CODE (x) == CONST_INT)
10524 HOST_WIDE_INT val;
10525 val = ARM_SIGN_EXTEND (~INTVAL (x));
10526 fprintf (stream, HOST_WIDE_INT_PRINT_DEC, val);
10528 else
10530 putc ('~', stream);
10531 output_addr_const (stream, x);
10533 return;
10535 case 'i':
10536 fprintf (stream, "%s", arithmetic_instr (x, 1));
10537 return;
10539 /* Truncate Cirrus shift counts. */
10540 case 's':
10541 if (GET_CODE (x) == CONST_INT)
10543 fprintf (stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0x3f);
10544 return;
10546 arm_print_operand (stream, x, 0);
10547 return;
10549 case 'I':
10550 fprintf (stream, "%s", arithmetic_instr (x, 0));
10551 return;
10553 case 'S':
10555 HOST_WIDE_INT val;
10556 const char * shift = shift_op (x, &val);
10558 if (shift)
10560 fprintf (stream, ", %s ", shift_op (x, &val));
10561 if (val == -1)
10562 arm_print_operand (stream, XEXP (x, 1), 0);
10563 else
10564 fprintf (stream, "#" HOST_WIDE_INT_PRINT_DEC, val);
10567 return;
10569 /* An explanation of the 'Q', 'R' and 'H' register operands:
10571 In a pair of registers containing a DI or DF value the 'Q'
10572 operand returns the register number of the register containing
10573 the least significant part of the value. The 'R' operand returns
10574 the register number of the register containing the most
10575 significant part of the value.
10577 The 'H' operand returns the higher of the two register numbers.
10578 On a run where WORDS_BIG_ENDIAN is true the 'H' operand is the
10579 same as the 'Q' operand, since the most significant part of the
10580 value is held in the lower number register. The reverse is true
10581 on systems where WORDS_BIG_ENDIAN is false.
10583 The purpose of these operands is to distinguish between cases
10584 where the endian-ness of the values is important (for example
10585 when they are added together), and cases where the endian-ness
10586 is irrelevant, but the order of register operations is important.
10587 For example when loading a value from memory into a register
10588 pair, the endian-ness does not matter. Provided that the value
10589 from the lower memory address is put into the lower numbered
10590 register, and the value from the higher address is put into the
10591 higher numbered register, the load will work regardless of whether
10592 the value being loaded is big-wordian or little-wordian. The
10593 order of the two register loads can matter however, if the address
10594 of the memory location is actually held in one of the registers
10595 being overwritten by the load. */
10596 case 'Q':
10597 if (GET_CODE (x) != REG || REGNO (x) > LAST_ARM_REGNUM)
10599 output_operand_lossage ("invalid operand for code '%c'", code);
10600 return;
10603 asm_fprintf (stream, "%r", REGNO (x) + (WORDS_BIG_ENDIAN ? 1 : 0));
10604 return;
10606 case 'R':
10607 if (GET_CODE (x) != REG || REGNO (x) > LAST_ARM_REGNUM)
10609 output_operand_lossage ("invalid operand for code '%c'", code);
10610 return;
10613 asm_fprintf (stream, "%r", REGNO (x) + (WORDS_BIG_ENDIAN ? 0 : 1));
10614 return;
10616 case 'H':
10617 if (GET_CODE (x) != REG || REGNO (x) > LAST_ARM_REGNUM)
10619 output_operand_lossage ("invalid operand for code '%c'", code);
10620 return;
10623 asm_fprintf (stream, "%r", REGNO (x) + 1);
10624 return;
10626 case 'm':
10627 asm_fprintf (stream, "%r",
10628 GET_CODE (XEXP (x, 0)) == REG
10629 ? REGNO (XEXP (x, 0)) : REGNO (XEXP (XEXP (x, 0), 0)));
10630 return;
10632 case 'M':
10633 asm_fprintf (stream, "{%r-%r}",
10634 REGNO (x),
10635 REGNO (x) + ARM_NUM_REGS (GET_MODE (x)) - 1);
10636 return;
10638 case 'd':
10639 /* CONST_TRUE_RTX means always -- that's the default. */
10640 if (x == const_true_rtx)
10641 return;
10643 if (!COMPARISON_P (x))
10645 output_operand_lossage ("invalid operand for code '%c'", code);
10646 return;
10649 fputs (arm_condition_codes[get_arm_condition_code (x)],
10650 stream);
10651 return;
10653 case 'D':
10654 /* CONST_TRUE_RTX means not always -- i.e. never. We shouldn't ever
10655 want to do that. */
10656 if (x == const_true_rtx)
10658 output_operand_lossage ("instruction never exectued");
10659 return;
10661 if (!COMPARISON_P (x))
10663 output_operand_lossage ("invalid operand for code '%c'", code);
10664 return;
10667 fputs (arm_condition_codes[ARM_INVERSE_CONDITION_CODE
10668 (get_arm_condition_code (x))],
10669 stream);
10670 return;
10672 /* Cirrus registers can be accessed in a variety of ways:
10673 single floating point (f)
10674 double floating point (d)
10675 32bit integer (fx)
10676 64bit integer (dx). */
10677 case 'W': /* Cirrus register in F mode. */
10678 case 'X': /* Cirrus register in D mode. */
10679 case 'Y': /* Cirrus register in FX mode. */
10680 case 'Z': /* Cirrus register in DX mode. */
10681 if (GET_CODE (x) != REG || REGNO_REG_CLASS (REGNO (x)) != CIRRUS_REGS)
10682 abort ();
10684 fprintf (stream, "mv%s%s",
10685 code == 'W' ? "f"
10686 : code == 'X' ? "d"
10687 : code == 'Y' ? "fx" : "dx", reg_names[REGNO (x)] + 2);
10689 return;
10691 /* Print cirrus register in the mode specified by the register's mode. */
10692 case 'V':
10694 int mode = GET_MODE (x);
10696 if (GET_CODE (x) != REG || REGNO_REG_CLASS (REGNO (x)) != CIRRUS_REGS)
10698 output_operand_lossage ("invalid operand for code '%c'", code);
10699 return;
10702 fprintf (stream, "mv%s%s",
10703 mode == DFmode ? "d"
10704 : mode == SImode ? "fx"
10705 : mode == DImode ? "dx"
10706 : "f", reg_names[REGNO (x)] + 2);
10708 return;
10711 case 'U':
10712 if (GET_CODE (x) != REG
10713 || REGNO (x) < FIRST_IWMMXT_GR_REGNUM
10714 || REGNO (x) > LAST_IWMMXT_GR_REGNUM)
10715 /* Bad value for wCG register number. */
10717 output_operand_lossage ("invalid operand for code '%c'", code);
10718 return;
10721 else
10722 fprintf (stream, "%d", REGNO (x) - FIRST_IWMMXT_GR_REGNUM);
10723 return;
10725 /* Print an iWMMXt control register name. */
10726 case 'w':
10727 if (GET_CODE (x) != CONST_INT
10728 || INTVAL (x) < 0
10729 || INTVAL (x) >= 16)
10730 /* Bad value for wC register number. */
10732 output_operand_lossage ("invalid operand for code '%c'", code);
10733 return;
10736 else
10738 static const char * wc_reg_names [16] =
10740 "wCID", "wCon", "wCSSF", "wCASF",
10741 "wC4", "wC5", "wC6", "wC7",
10742 "wCGR0", "wCGR1", "wCGR2", "wCGR3",
10743 "wC12", "wC13", "wC14", "wC15"
10746 fprintf (stream, wc_reg_names [INTVAL (x)]);
10748 return;
10750 /* Print a VFP double precision register name. */
10751 case 'P':
10753 int mode = GET_MODE (x);
10754 int num;
10756 if (mode != DImode && mode != DFmode)
10758 output_operand_lossage ("invalid operand for code '%c'", code);
10759 return;
10762 if (GET_CODE (x) != REG
10763 || !IS_VFP_REGNUM (REGNO (x)))
10765 output_operand_lossage ("invalid operand for code '%c'", code);
10766 return;
10769 num = REGNO(x) - FIRST_VFP_REGNUM;
10770 if (num & 1)
10772 output_operand_lossage ("invalid operand for code '%c'", code);
10773 return;
10776 fprintf (stream, "d%d", num >> 1);
10778 return;
10780 default:
10781 if (x == 0)
10783 output_operand_lossage ("missing operand");
10784 return;
10787 if (GET_CODE (x) == REG)
10788 asm_fprintf (stream, "%r", REGNO (x));
10789 else if (GET_CODE (x) == MEM)
10791 output_memory_reference_mode = GET_MODE (x);
10792 output_address (XEXP (x, 0));
10794 else if (GET_CODE (x) == CONST_DOUBLE)
10795 fprintf (stream, "#%s", fp_immediate_constant (x));
10796 else if (GET_CODE (x) == NEG)
10797 abort (); /* This should never happen now. */
10798 else
10800 fputc ('#', stream);
10801 output_addr_const (stream, x);
10806 #ifndef AOF_ASSEMBLER
10807 /* Target hook for assembling integer objects. The ARM version needs to
10808 handle word-sized values specially. */
10809 static bool
10810 arm_assemble_integer (rtx x, unsigned int size, int aligned_p)
10812 if (size == UNITS_PER_WORD && aligned_p)
10814 fputs ("\t.word\t", asm_out_file);
10815 output_addr_const (asm_out_file, x);
10817 /* Mark symbols as position independent. We only do this in the
10818 .text segment, not in the .data segment. */
10819 if (NEED_GOT_RELOC && flag_pic && making_const_table &&
10820 (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF))
10822 if (GET_CODE (x) == SYMBOL_REF
10823 && (CONSTANT_POOL_ADDRESS_P (x)
10824 || SYMBOL_REF_LOCAL_P (x)))
10825 fputs ("(GOTOFF)", asm_out_file);
10826 else if (GET_CODE (x) == LABEL_REF)
10827 fputs ("(GOTOFF)", asm_out_file);
10828 else
10829 fputs ("(GOT)", asm_out_file);
10831 fputc ('\n', asm_out_file);
10832 return true;
10835 if (arm_vector_mode_supported_p (GET_MODE (x)))
10837 int i, units;
10839 if (GET_CODE (x) != CONST_VECTOR)
10840 abort ();
10842 units = CONST_VECTOR_NUNITS (x);
10844 switch (GET_MODE (x))
10846 case V2SImode: size = 4; break;
10847 case V4HImode: size = 2; break;
10848 case V8QImode: size = 1; break;
10849 default:
10850 abort ();
10853 for (i = 0; i < units; i++)
10855 rtx elt;
10857 elt = CONST_VECTOR_ELT (x, i);
10858 assemble_integer
10859 (elt, size, i == 0 ? BIGGEST_ALIGNMENT : size * BITS_PER_UNIT, 1);
10862 return true;
10865 return default_assemble_integer (x, size, aligned_p);
10867 #endif
10869 /* A finite state machine takes care of noticing whether or not instructions
10870 can be conditionally executed, and thus decrease execution time and code
10871 size by deleting branch instructions. The fsm is controlled by
10872 final_prescan_insn, and controls the actions of ASM_OUTPUT_OPCODE. */
10874 /* The state of the fsm controlling condition codes are:
10875 0: normal, do nothing special
10876 1: make ASM_OUTPUT_OPCODE not output this instruction
10877 2: make ASM_OUTPUT_OPCODE not output this instruction
10878 3: make instructions conditional
10879 4: make instructions conditional
10881 State transitions (state->state by whom under condition):
10882 0 -> 1 final_prescan_insn if the `target' is a label
10883 0 -> 2 final_prescan_insn if the `target' is an unconditional branch
10884 1 -> 3 ASM_OUTPUT_OPCODE after not having output the conditional branch
10885 2 -> 4 ASM_OUTPUT_OPCODE after not having output the conditional branch
10886 3 -> 0 (*targetm.asm_out.internal_label) if the `target' label is reached
10887 (the target label has CODE_LABEL_NUMBER equal to arm_target_label).
10888 4 -> 0 final_prescan_insn if the `target' unconditional branch is reached
10889 (the target insn is arm_target_insn).
10891 If the jump clobbers the conditions then we use states 2 and 4.
10893 A similar thing can be done with conditional return insns.
10895 XXX In case the `target' is an unconditional branch, this conditionalising
10896 of the instructions always reduces code size, but not always execution
10897 time. But then, I want to reduce the code size to somewhere near what
10898 /bin/cc produces. */
10900 /* Returns the index of the ARM condition code string in
10901 `arm_condition_codes'. COMPARISON should be an rtx like
10902 `(eq (...) (...))'. */
10903 static enum arm_cond_code
10904 get_arm_condition_code (rtx comparison)
10906 enum machine_mode mode = GET_MODE (XEXP (comparison, 0));
10907 int code;
10908 enum rtx_code comp_code = GET_CODE (comparison);
10910 if (GET_MODE_CLASS (mode) != MODE_CC)
10911 mode = SELECT_CC_MODE (comp_code, XEXP (comparison, 0),
10912 XEXP (comparison, 1));
10914 switch (mode)
10916 case CC_DNEmode: code = ARM_NE; goto dominance;
10917 case CC_DEQmode: code = ARM_EQ; goto dominance;
10918 case CC_DGEmode: code = ARM_GE; goto dominance;
10919 case CC_DGTmode: code = ARM_GT; goto dominance;
10920 case CC_DLEmode: code = ARM_LE; goto dominance;
10921 case CC_DLTmode: code = ARM_LT; goto dominance;
10922 case CC_DGEUmode: code = ARM_CS; goto dominance;
10923 case CC_DGTUmode: code = ARM_HI; goto dominance;
10924 case CC_DLEUmode: code = ARM_LS; goto dominance;
10925 case CC_DLTUmode: code = ARM_CC;
10927 dominance:
10928 if (comp_code != EQ && comp_code != NE)
10929 abort ();
10931 if (comp_code == EQ)
10932 return ARM_INVERSE_CONDITION_CODE (code);
10933 return code;
10935 case CC_NOOVmode:
10936 switch (comp_code)
10938 case NE: return ARM_NE;
10939 case EQ: return ARM_EQ;
10940 case GE: return ARM_PL;
10941 case LT: return ARM_MI;
10942 default: abort ();
10945 case CC_Zmode:
10946 switch (comp_code)
10948 case NE: return ARM_NE;
10949 case EQ: return ARM_EQ;
10950 default: abort ();
10953 case CC_Nmode:
10954 switch (comp_code)
10956 case NE: return ARM_MI;
10957 case EQ: return ARM_PL;
10958 default: abort ();
10961 case CCFPEmode:
10962 case CCFPmode:
10963 /* These encodings assume that AC=1 in the FPA system control
10964 byte. This allows us to handle all cases except UNEQ and
10965 LTGT. */
10966 switch (comp_code)
10968 case GE: return ARM_GE;
10969 case GT: return ARM_GT;
10970 case LE: return ARM_LS;
10971 case LT: return ARM_MI;
10972 case NE: return ARM_NE;
10973 case EQ: return ARM_EQ;
10974 case ORDERED: return ARM_VC;
10975 case UNORDERED: return ARM_VS;
10976 case UNLT: return ARM_LT;
10977 case UNLE: return ARM_LE;
10978 case UNGT: return ARM_HI;
10979 case UNGE: return ARM_PL;
10980 /* UNEQ and LTGT do not have a representation. */
10981 case UNEQ: /* Fall through. */
10982 case LTGT: /* Fall through. */
10983 default: abort ();
10986 case CC_SWPmode:
10987 switch (comp_code)
10989 case NE: return ARM_NE;
10990 case EQ: return ARM_EQ;
10991 case GE: return ARM_LE;
10992 case GT: return ARM_LT;
10993 case LE: return ARM_GE;
10994 case LT: return ARM_GT;
10995 case GEU: return ARM_LS;
10996 case GTU: return ARM_CC;
10997 case LEU: return ARM_CS;
10998 case LTU: return ARM_HI;
10999 default: abort ();
11002 case CC_Cmode:
11003 switch (comp_code)
11005 case LTU: return ARM_CS;
11006 case GEU: return ARM_CC;
11007 default: abort ();
11010 case CCmode:
11011 switch (comp_code)
11013 case NE: return ARM_NE;
11014 case EQ: return ARM_EQ;
11015 case GE: return ARM_GE;
11016 case GT: return ARM_GT;
11017 case LE: return ARM_LE;
11018 case LT: return ARM_LT;
11019 case GEU: return ARM_CS;
11020 case GTU: return ARM_HI;
11021 case LEU: return ARM_LS;
11022 case LTU: return ARM_CC;
11023 default: abort ();
11026 default: abort ();
11029 abort ();
11032 void
11033 arm_final_prescan_insn (rtx insn)
11035 /* BODY will hold the body of INSN. */
11036 rtx body = PATTERN (insn);
11038 /* This will be 1 if trying to repeat the trick, and things need to be
11039 reversed if it appears to fail. */
11040 int reverse = 0;
11042 /* JUMP_CLOBBERS will be one implies that the conditions if a branch is
11043 taken are clobbered, even if the rtl suggests otherwise. It also
11044 means that we have to grub around within the jump expression to find
11045 out what the conditions are when the jump isn't taken. */
11046 int jump_clobbers = 0;
11048 /* If we start with a return insn, we only succeed if we find another one. */
11049 int seeking_return = 0;
11051 /* START_INSN will hold the insn from where we start looking. This is the
11052 first insn after the following code_label if REVERSE is true. */
11053 rtx start_insn = insn;
11055 /* If in state 4, check if the target branch is reached, in order to
11056 change back to state 0. */
11057 if (arm_ccfsm_state == 4)
11059 if (insn == arm_target_insn)
11061 arm_target_insn = NULL;
11062 arm_ccfsm_state = 0;
11064 return;
11067 /* If in state 3, it is possible to repeat the trick, if this insn is an
11068 unconditional branch to a label, and immediately following this branch
11069 is the previous target label which is only used once, and the label this
11070 branch jumps to is not too far off. */
11071 if (arm_ccfsm_state == 3)
11073 if (simplejump_p (insn))
11075 start_insn = next_nonnote_insn (start_insn);
11076 if (GET_CODE (start_insn) == BARRIER)
11078 /* XXX Isn't this always a barrier? */
11079 start_insn = next_nonnote_insn (start_insn);
11081 if (GET_CODE (start_insn) == CODE_LABEL
11082 && CODE_LABEL_NUMBER (start_insn) == arm_target_label
11083 && LABEL_NUSES (start_insn) == 1)
11084 reverse = TRUE;
11085 else
11086 return;
11088 else if (GET_CODE (body) == RETURN)
11090 start_insn = next_nonnote_insn (start_insn);
11091 if (GET_CODE (start_insn) == BARRIER)
11092 start_insn = next_nonnote_insn (start_insn);
11093 if (GET_CODE (start_insn) == CODE_LABEL
11094 && CODE_LABEL_NUMBER (start_insn) == arm_target_label
11095 && LABEL_NUSES (start_insn) == 1)
11097 reverse = TRUE;
11098 seeking_return = 1;
11100 else
11101 return;
11103 else
11104 return;
11107 if (arm_ccfsm_state != 0 && !reverse)
11108 abort ();
11109 if (GET_CODE (insn) != JUMP_INSN)
11110 return;
11112 /* This jump might be paralleled with a clobber of the condition codes
11113 the jump should always come first */
11114 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) > 0)
11115 body = XVECEXP (body, 0, 0);
11117 if (reverse
11118 || (GET_CODE (body) == SET && GET_CODE (SET_DEST (body)) == PC
11119 && GET_CODE (SET_SRC (body)) == IF_THEN_ELSE))
11121 int insns_skipped;
11122 int fail = FALSE, succeed = FALSE;
11123 /* Flag which part of the IF_THEN_ELSE is the LABEL_REF. */
11124 int then_not_else = TRUE;
11125 rtx this_insn = start_insn, label = 0;
11127 /* If the jump cannot be done with one instruction, we cannot
11128 conditionally execute the instruction in the inverse case. */
11129 if (get_attr_conds (insn) == CONDS_JUMP_CLOB)
11131 jump_clobbers = 1;
11132 return;
11135 /* Register the insn jumped to. */
11136 if (reverse)
11138 if (!seeking_return)
11139 label = XEXP (SET_SRC (body), 0);
11141 else if (GET_CODE (XEXP (SET_SRC (body), 1)) == LABEL_REF)
11142 label = XEXP (XEXP (SET_SRC (body), 1), 0);
11143 else if (GET_CODE (XEXP (SET_SRC (body), 2)) == LABEL_REF)
11145 label = XEXP (XEXP (SET_SRC (body), 2), 0);
11146 then_not_else = FALSE;
11148 else if (GET_CODE (XEXP (SET_SRC (body), 1)) == RETURN)
11149 seeking_return = 1;
11150 else if (GET_CODE (XEXP (SET_SRC (body), 2)) == RETURN)
11152 seeking_return = 1;
11153 then_not_else = FALSE;
11155 else
11156 abort ();
11158 /* See how many insns this branch skips, and what kind of insns. If all
11159 insns are okay, and the label or unconditional branch to the same
11160 label is not too far away, succeed. */
11161 for (insns_skipped = 0;
11162 !fail && !succeed && insns_skipped++ < max_insns_skipped;)
11164 rtx scanbody;
11166 this_insn = next_nonnote_insn (this_insn);
11167 if (!this_insn)
11168 break;
11170 switch (GET_CODE (this_insn))
11172 case CODE_LABEL:
11173 /* Succeed if it is the target label, otherwise fail since
11174 control falls in from somewhere else. */
11175 if (this_insn == label)
11177 if (jump_clobbers)
11179 arm_ccfsm_state = 2;
11180 this_insn = next_nonnote_insn (this_insn);
11182 else
11183 arm_ccfsm_state = 1;
11184 succeed = TRUE;
11186 else
11187 fail = TRUE;
11188 break;
11190 case BARRIER:
11191 /* Succeed if the following insn is the target label.
11192 Otherwise fail.
11193 If return insns are used then the last insn in a function
11194 will be a barrier. */
11195 this_insn = next_nonnote_insn (this_insn);
11196 if (this_insn && this_insn == label)
11198 if (jump_clobbers)
11200 arm_ccfsm_state = 2;
11201 this_insn = next_nonnote_insn (this_insn);
11203 else
11204 arm_ccfsm_state = 1;
11205 succeed = TRUE;
11207 else
11208 fail = TRUE;
11209 break;
11211 case CALL_INSN:
11212 /* The AAPCS says that conditional calls should not be
11213 used since they make interworking inefficient (the
11214 linker can't transform BL<cond> into BLX). That's
11215 only a problem if the machine has BLX. */
11216 if (arm_arch5)
11218 fail = TRUE;
11219 break;
11222 /* Succeed if the following insn is the target label, or
11223 if the following two insns are a barrier and the
11224 target label. */
11225 this_insn = next_nonnote_insn (this_insn);
11226 if (this_insn && GET_CODE (this_insn) == BARRIER)
11227 this_insn = next_nonnote_insn (this_insn);
11229 if (this_insn && this_insn == label
11230 && insns_skipped < max_insns_skipped)
11232 if (jump_clobbers)
11234 arm_ccfsm_state = 2;
11235 this_insn = next_nonnote_insn (this_insn);
11237 else
11238 arm_ccfsm_state = 1;
11239 succeed = TRUE;
11241 else
11242 fail = TRUE;
11243 break;
11245 case JUMP_INSN:
11246 /* If this is an unconditional branch to the same label, succeed.
11247 If it is to another label, do nothing. If it is conditional,
11248 fail. */
11249 /* XXX Probably, the tests for SET and the PC are
11250 unnecessary. */
11252 scanbody = PATTERN (this_insn);
11253 if (GET_CODE (scanbody) == SET
11254 && GET_CODE (SET_DEST (scanbody)) == PC)
11256 if (GET_CODE (SET_SRC (scanbody)) == LABEL_REF
11257 && XEXP (SET_SRC (scanbody), 0) == label && !reverse)
11259 arm_ccfsm_state = 2;
11260 succeed = TRUE;
11262 else if (GET_CODE (SET_SRC (scanbody)) == IF_THEN_ELSE)
11263 fail = TRUE;
11265 /* Fail if a conditional return is undesirable (e.g. on a
11266 StrongARM), but still allow this if optimizing for size. */
11267 else if (GET_CODE (scanbody) == RETURN
11268 && !use_return_insn (TRUE, NULL)
11269 && !optimize_size)
11270 fail = TRUE;
11271 else if (GET_CODE (scanbody) == RETURN
11272 && seeking_return)
11274 arm_ccfsm_state = 2;
11275 succeed = TRUE;
11277 else if (GET_CODE (scanbody) == PARALLEL)
11279 switch (get_attr_conds (this_insn))
11281 case CONDS_NOCOND:
11282 break;
11283 default:
11284 fail = TRUE;
11285 break;
11288 else
11289 fail = TRUE; /* Unrecognized jump (e.g. epilogue). */
11291 break;
11293 case INSN:
11294 /* Instructions using or affecting the condition codes make it
11295 fail. */
11296 scanbody = PATTERN (this_insn);
11297 if (!(GET_CODE (scanbody) == SET
11298 || GET_CODE (scanbody) == PARALLEL)
11299 || get_attr_conds (this_insn) != CONDS_NOCOND)
11300 fail = TRUE;
11302 /* A conditional cirrus instruction must be followed by
11303 a non Cirrus instruction. However, since we
11304 conditionalize instructions in this function and by
11305 the time we get here we can't add instructions
11306 (nops), because shorten_branches() has already been
11307 called, we will disable conditionalizing Cirrus
11308 instructions to be safe. */
11309 if (GET_CODE (scanbody) != USE
11310 && GET_CODE (scanbody) != CLOBBER
11311 && get_attr_cirrus (this_insn) != CIRRUS_NOT)
11312 fail = TRUE;
11313 break;
11315 default:
11316 break;
11319 if (succeed)
11321 if ((!seeking_return) && (arm_ccfsm_state == 1 || reverse))
11322 arm_target_label = CODE_LABEL_NUMBER (label);
11323 else if (seeking_return || arm_ccfsm_state == 2)
11325 while (this_insn && GET_CODE (PATTERN (this_insn)) == USE)
11327 this_insn = next_nonnote_insn (this_insn);
11328 if (this_insn && (GET_CODE (this_insn) == BARRIER
11329 || GET_CODE (this_insn) == CODE_LABEL))
11330 abort ();
11332 if (!this_insn)
11334 /* Oh, dear! we ran off the end.. give up. */
11335 recog (PATTERN (insn), insn, NULL);
11336 arm_ccfsm_state = 0;
11337 arm_target_insn = NULL;
11338 return;
11340 arm_target_insn = this_insn;
11342 else
11343 abort ();
11344 if (jump_clobbers)
11346 if (reverse)
11347 abort ();
11348 arm_current_cc =
11349 get_arm_condition_code (XEXP (XEXP (XEXP (SET_SRC (body),
11350 0), 0), 1));
11351 if (GET_CODE (XEXP (XEXP (SET_SRC (body), 0), 0)) == AND)
11352 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
11353 if (GET_CODE (XEXP (SET_SRC (body), 0)) == NE)
11354 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
11356 else
11358 /* If REVERSE is true, ARM_CURRENT_CC needs to be inverted from
11359 what it was. */
11360 if (!reverse)
11361 arm_current_cc = get_arm_condition_code (XEXP (SET_SRC (body),
11362 0));
11365 if (reverse || then_not_else)
11366 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
11369 /* Restore recog_data (getting the attributes of other insns can
11370 destroy this array, but final.c assumes that it remains intact
11371 across this call; since the insn has been recognized already we
11372 call recog direct). */
11373 recog (PATTERN (insn), insn, NULL);
11377 /* Returns true if REGNO is a valid register
11378 for holding a quantity of type MODE. */
11380 arm_hard_regno_mode_ok (unsigned int regno, enum machine_mode mode)
11382 if (GET_MODE_CLASS (mode) == MODE_CC)
11383 return regno == CC_REGNUM || regno == VFPCC_REGNUM;
11385 if (TARGET_THUMB)
11386 /* For the Thumb we only allow values bigger than SImode in
11387 registers 0 - 6, so that there is always a second low
11388 register available to hold the upper part of the value.
11389 We probably we ought to ensure that the register is the
11390 start of an even numbered register pair. */
11391 return (ARM_NUM_REGS (mode) < 2) || (regno < LAST_LO_REGNUM);
11393 if (IS_CIRRUS_REGNUM (regno))
11394 /* We have outlawed SI values in Cirrus registers because they
11395 reside in the lower 32 bits, but SF values reside in the
11396 upper 32 bits. This causes gcc all sorts of grief. We can't
11397 even split the registers into pairs because Cirrus SI values
11398 get sign extended to 64bits-- aldyh. */
11399 return (GET_MODE_CLASS (mode) == MODE_FLOAT) || (mode == DImode);
11401 if (IS_VFP_REGNUM (regno))
11403 if (mode == SFmode || mode == SImode)
11404 return TRUE;
11406 /* DFmode values are only valid in even register pairs. */
11407 if (mode == DFmode)
11408 return ((regno - FIRST_VFP_REGNUM) & 1) == 0;
11409 return FALSE;
11412 if (IS_IWMMXT_GR_REGNUM (regno))
11413 return mode == SImode;
11415 if (IS_IWMMXT_REGNUM (regno))
11416 return VALID_IWMMXT_REG_MODE (mode);
11418 /* We allow any value to be stored in the general registers.
11419 Restrict doubleword quantities to even register pairs so that we can
11420 use ldrd. */
11421 if (regno <= LAST_ARM_REGNUM)
11422 return !(TARGET_LDRD && GET_MODE_SIZE (mode) > 4 && (regno & 1) != 0);
11424 if ( regno == FRAME_POINTER_REGNUM
11425 || regno == ARG_POINTER_REGNUM)
11426 /* We only allow integers in the fake hard registers. */
11427 return GET_MODE_CLASS (mode) == MODE_INT;
11429 /* The only registers left are the FPA registers
11430 which we only allow to hold FP values. */
11431 return GET_MODE_CLASS (mode) == MODE_FLOAT
11432 && regno >= FIRST_FPA_REGNUM
11433 && regno <= LAST_FPA_REGNUM;
11437 arm_regno_class (int regno)
11439 if (TARGET_THUMB)
11441 if (regno == STACK_POINTER_REGNUM)
11442 return STACK_REG;
11443 if (regno == CC_REGNUM)
11444 return CC_REG;
11445 if (regno < 8)
11446 return LO_REGS;
11447 return HI_REGS;
11450 if ( regno <= LAST_ARM_REGNUM
11451 || regno == FRAME_POINTER_REGNUM
11452 || regno == ARG_POINTER_REGNUM)
11453 return GENERAL_REGS;
11455 if (regno == CC_REGNUM || regno == VFPCC_REGNUM)
11456 return NO_REGS;
11458 if (IS_CIRRUS_REGNUM (regno))
11459 return CIRRUS_REGS;
11461 if (IS_VFP_REGNUM (regno))
11462 return VFP_REGS;
11464 if (IS_IWMMXT_REGNUM (regno))
11465 return IWMMXT_REGS;
11467 if (IS_IWMMXT_GR_REGNUM (regno))
11468 return IWMMXT_GR_REGS;
11470 return FPA_REGS;
11473 /* Handle a special case when computing the offset
11474 of an argument from the frame pointer. */
11476 arm_debugger_arg_offset (int value, rtx addr)
11478 rtx insn;
11480 /* We are only interested if dbxout_parms() failed to compute the offset. */
11481 if (value != 0)
11482 return 0;
11484 /* We can only cope with the case where the address is held in a register. */
11485 if (GET_CODE (addr) != REG)
11486 return 0;
11488 /* If we are using the frame pointer to point at the argument, then
11489 an offset of 0 is correct. */
11490 if (REGNO (addr) == (unsigned) HARD_FRAME_POINTER_REGNUM)
11491 return 0;
11493 /* If we are using the stack pointer to point at the
11494 argument, then an offset of 0 is correct. */
11495 if ((TARGET_THUMB || !frame_pointer_needed)
11496 && REGNO (addr) == SP_REGNUM)
11497 return 0;
11499 /* Oh dear. The argument is pointed to by a register rather
11500 than being held in a register, or being stored at a known
11501 offset from the frame pointer. Since GDB only understands
11502 those two kinds of argument we must translate the address
11503 held in the register into an offset from the frame pointer.
11504 We do this by searching through the insns for the function
11505 looking to see where this register gets its value. If the
11506 register is initialized from the frame pointer plus an offset
11507 then we are in luck and we can continue, otherwise we give up.
11509 This code is exercised by producing debugging information
11510 for a function with arguments like this:
11512 double func (double a, double b, int c, double d) {return d;}
11514 Without this code the stab for parameter 'd' will be set to
11515 an offset of 0 from the frame pointer, rather than 8. */
11517 /* The if() statement says:
11519 If the insn is a normal instruction
11520 and if the insn is setting the value in a register
11521 and if the register being set is the register holding the address of the argument
11522 and if the address is computing by an addition
11523 that involves adding to a register
11524 which is the frame pointer
11525 a constant integer
11527 then... */
11529 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
11531 if ( GET_CODE (insn) == INSN
11532 && GET_CODE (PATTERN (insn)) == SET
11533 && REGNO (XEXP (PATTERN (insn), 0)) == REGNO (addr)
11534 && GET_CODE (XEXP (PATTERN (insn), 1)) == PLUS
11535 && GET_CODE (XEXP (XEXP (PATTERN (insn), 1), 0)) == REG
11536 && REGNO (XEXP (XEXP (PATTERN (insn), 1), 0)) == (unsigned) HARD_FRAME_POINTER_REGNUM
11537 && GET_CODE (XEXP (XEXP (PATTERN (insn), 1), 1)) == CONST_INT
11540 value = INTVAL (XEXP (XEXP (PATTERN (insn), 1), 1));
11542 break;
11546 if (value == 0)
11548 debug_rtx (addr);
11549 warning ("unable to compute real location of stacked parameter");
11550 value = 8; /* XXX magic hack */
11553 return value;
11556 #define def_mbuiltin(MASK, NAME, TYPE, CODE) \
11557 do \
11559 if ((MASK) & insn_flags) \
11560 lang_hooks.builtin_function ((NAME), (TYPE), (CODE), \
11561 BUILT_IN_MD, NULL, NULL_TREE); \
11563 while (0)
11565 struct builtin_description
11567 const unsigned int mask;
11568 const enum insn_code icode;
11569 const char * const name;
11570 const enum arm_builtins code;
11571 const enum rtx_code comparison;
11572 const unsigned int flag;
11575 static const struct builtin_description bdesc_2arg[] =
11577 #define IWMMXT_BUILTIN(code, string, builtin) \
11578 { FL_IWMMXT, CODE_FOR_##code, "__builtin_arm_" string, \
11579 ARM_BUILTIN_##builtin, 0, 0 },
11581 IWMMXT_BUILTIN (addv8qi3, "waddb", WADDB)
11582 IWMMXT_BUILTIN (addv4hi3, "waddh", WADDH)
11583 IWMMXT_BUILTIN (addv2si3, "waddw", WADDW)
11584 IWMMXT_BUILTIN (subv8qi3, "wsubb", WSUBB)
11585 IWMMXT_BUILTIN (subv4hi3, "wsubh", WSUBH)
11586 IWMMXT_BUILTIN (subv2si3, "wsubw", WSUBW)
11587 IWMMXT_BUILTIN (ssaddv8qi3, "waddbss", WADDSSB)
11588 IWMMXT_BUILTIN (ssaddv4hi3, "waddhss", WADDSSH)
11589 IWMMXT_BUILTIN (ssaddv2si3, "waddwss", WADDSSW)
11590 IWMMXT_BUILTIN (sssubv8qi3, "wsubbss", WSUBSSB)
11591 IWMMXT_BUILTIN (sssubv4hi3, "wsubhss", WSUBSSH)
11592 IWMMXT_BUILTIN (sssubv2si3, "wsubwss", WSUBSSW)
11593 IWMMXT_BUILTIN (usaddv8qi3, "waddbus", WADDUSB)
11594 IWMMXT_BUILTIN (usaddv4hi3, "waddhus", WADDUSH)
11595 IWMMXT_BUILTIN (usaddv2si3, "waddwus", WADDUSW)
11596 IWMMXT_BUILTIN (ussubv8qi3, "wsubbus", WSUBUSB)
11597 IWMMXT_BUILTIN (ussubv4hi3, "wsubhus", WSUBUSH)
11598 IWMMXT_BUILTIN (ussubv2si3, "wsubwus", WSUBUSW)
11599 IWMMXT_BUILTIN (mulv4hi3, "wmulul", WMULUL)
11600 IWMMXT_BUILTIN (smulv4hi3_highpart, "wmulsm", WMULSM)
11601 IWMMXT_BUILTIN (umulv4hi3_highpart, "wmulum", WMULUM)
11602 IWMMXT_BUILTIN (eqv8qi3, "wcmpeqb", WCMPEQB)
11603 IWMMXT_BUILTIN (eqv4hi3, "wcmpeqh", WCMPEQH)
11604 IWMMXT_BUILTIN (eqv2si3, "wcmpeqw", WCMPEQW)
11605 IWMMXT_BUILTIN (gtuv8qi3, "wcmpgtub", WCMPGTUB)
11606 IWMMXT_BUILTIN (gtuv4hi3, "wcmpgtuh", WCMPGTUH)
11607 IWMMXT_BUILTIN (gtuv2si3, "wcmpgtuw", WCMPGTUW)
11608 IWMMXT_BUILTIN (gtv8qi3, "wcmpgtsb", WCMPGTSB)
11609 IWMMXT_BUILTIN (gtv4hi3, "wcmpgtsh", WCMPGTSH)
11610 IWMMXT_BUILTIN (gtv2si3, "wcmpgtsw", WCMPGTSW)
11611 IWMMXT_BUILTIN (umaxv8qi3, "wmaxub", WMAXUB)
11612 IWMMXT_BUILTIN (smaxv8qi3, "wmaxsb", WMAXSB)
11613 IWMMXT_BUILTIN (umaxv4hi3, "wmaxuh", WMAXUH)
11614 IWMMXT_BUILTIN (smaxv4hi3, "wmaxsh", WMAXSH)
11615 IWMMXT_BUILTIN (umaxv2si3, "wmaxuw", WMAXUW)
11616 IWMMXT_BUILTIN (smaxv2si3, "wmaxsw", WMAXSW)
11617 IWMMXT_BUILTIN (uminv8qi3, "wminub", WMINUB)
11618 IWMMXT_BUILTIN (sminv8qi3, "wminsb", WMINSB)
11619 IWMMXT_BUILTIN (uminv4hi3, "wminuh", WMINUH)
11620 IWMMXT_BUILTIN (sminv4hi3, "wminsh", WMINSH)
11621 IWMMXT_BUILTIN (uminv2si3, "wminuw", WMINUW)
11622 IWMMXT_BUILTIN (sminv2si3, "wminsw", WMINSW)
11623 IWMMXT_BUILTIN (iwmmxt_anddi3, "wand", WAND)
11624 IWMMXT_BUILTIN (iwmmxt_nanddi3, "wandn", WANDN)
11625 IWMMXT_BUILTIN (iwmmxt_iordi3, "wor", WOR)
11626 IWMMXT_BUILTIN (iwmmxt_xordi3, "wxor", WXOR)
11627 IWMMXT_BUILTIN (iwmmxt_uavgv8qi3, "wavg2b", WAVG2B)
11628 IWMMXT_BUILTIN (iwmmxt_uavgv4hi3, "wavg2h", WAVG2H)
11629 IWMMXT_BUILTIN (iwmmxt_uavgrndv8qi3, "wavg2br", WAVG2BR)
11630 IWMMXT_BUILTIN (iwmmxt_uavgrndv4hi3, "wavg2hr", WAVG2HR)
11631 IWMMXT_BUILTIN (iwmmxt_wunpckilb, "wunpckilb", WUNPCKILB)
11632 IWMMXT_BUILTIN (iwmmxt_wunpckilh, "wunpckilh", WUNPCKILH)
11633 IWMMXT_BUILTIN (iwmmxt_wunpckilw, "wunpckilw", WUNPCKILW)
11634 IWMMXT_BUILTIN (iwmmxt_wunpckihb, "wunpckihb", WUNPCKIHB)
11635 IWMMXT_BUILTIN (iwmmxt_wunpckihh, "wunpckihh", WUNPCKIHH)
11636 IWMMXT_BUILTIN (iwmmxt_wunpckihw, "wunpckihw", WUNPCKIHW)
11637 IWMMXT_BUILTIN (iwmmxt_wmadds, "wmadds", WMADDS)
11638 IWMMXT_BUILTIN (iwmmxt_wmaddu, "wmaddu", WMADDU)
11640 #define IWMMXT_BUILTIN2(code, builtin) \
11641 { FL_IWMMXT, CODE_FOR_##code, NULL, ARM_BUILTIN_##builtin, 0, 0 },
11643 IWMMXT_BUILTIN2 (iwmmxt_wpackhss, WPACKHSS)
11644 IWMMXT_BUILTIN2 (iwmmxt_wpackwss, WPACKWSS)
11645 IWMMXT_BUILTIN2 (iwmmxt_wpackdss, WPACKDSS)
11646 IWMMXT_BUILTIN2 (iwmmxt_wpackhus, WPACKHUS)
11647 IWMMXT_BUILTIN2 (iwmmxt_wpackwus, WPACKWUS)
11648 IWMMXT_BUILTIN2 (iwmmxt_wpackdus, WPACKDUS)
11649 IWMMXT_BUILTIN2 (ashlv4hi3_di, WSLLH)
11650 IWMMXT_BUILTIN2 (ashlv4hi3, WSLLHI)
11651 IWMMXT_BUILTIN2 (ashlv2si3_di, WSLLW)
11652 IWMMXT_BUILTIN2 (ashlv2si3, WSLLWI)
11653 IWMMXT_BUILTIN2 (ashldi3_di, WSLLD)
11654 IWMMXT_BUILTIN2 (ashldi3_iwmmxt, WSLLDI)
11655 IWMMXT_BUILTIN2 (lshrv4hi3_di, WSRLH)
11656 IWMMXT_BUILTIN2 (lshrv4hi3, WSRLHI)
11657 IWMMXT_BUILTIN2 (lshrv2si3_di, WSRLW)
11658 IWMMXT_BUILTIN2 (lshrv2si3, WSRLWI)
11659 IWMMXT_BUILTIN2 (lshrdi3_di, WSRLD)
11660 IWMMXT_BUILTIN2 (lshrdi3_iwmmxt, WSRLDI)
11661 IWMMXT_BUILTIN2 (ashrv4hi3_di, WSRAH)
11662 IWMMXT_BUILTIN2 (ashrv4hi3, WSRAHI)
11663 IWMMXT_BUILTIN2 (ashrv2si3_di, WSRAW)
11664 IWMMXT_BUILTIN2 (ashrv2si3, WSRAWI)
11665 IWMMXT_BUILTIN2 (ashrdi3_di, WSRAD)
11666 IWMMXT_BUILTIN2 (ashrdi3_iwmmxt, WSRADI)
11667 IWMMXT_BUILTIN2 (rorv4hi3_di, WRORH)
11668 IWMMXT_BUILTIN2 (rorv4hi3, WRORHI)
11669 IWMMXT_BUILTIN2 (rorv2si3_di, WRORW)
11670 IWMMXT_BUILTIN2 (rorv2si3, WRORWI)
11671 IWMMXT_BUILTIN2 (rordi3_di, WRORD)
11672 IWMMXT_BUILTIN2 (rordi3, WRORDI)
11673 IWMMXT_BUILTIN2 (iwmmxt_wmacuz, WMACUZ)
11674 IWMMXT_BUILTIN2 (iwmmxt_wmacsz, WMACSZ)
11677 static const struct builtin_description bdesc_1arg[] =
11679 IWMMXT_BUILTIN (iwmmxt_tmovmskb, "tmovmskb", TMOVMSKB)
11680 IWMMXT_BUILTIN (iwmmxt_tmovmskh, "tmovmskh", TMOVMSKH)
11681 IWMMXT_BUILTIN (iwmmxt_tmovmskw, "tmovmskw", TMOVMSKW)
11682 IWMMXT_BUILTIN (iwmmxt_waccb, "waccb", WACCB)
11683 IWMMXT_BUILTIN (iwmmxt_wacch, "wacch", WACCH)
11684 IWMMXT_BUILTIN (iwmmxt_waccw, "waccw", WACCW)
11685 IWMMXT_BUILTIN (iwmmxt_wunpckehub, "wunpckehub", WUNPCKEHUB)
11686 IWMMXT_BUILTIN (iwmmxt_wunpckehuh, "wunpckehuh", WUNPCKEHUH)
11687 IWMMXT_BUILTIN (iwmmxt_wunpckehuw, "wunpckehuw", WUNPCKEHUW)
11688 IWMMXT_BUILTIN (iwmmxt_wunpckehsb, "wunpckehsb", WUNPCKEHSB)
11689 IWMMXT_BUILTIN (iwmmxt_wunpckehsh, "wunpckehsh", WUNPCKEHSH)
11690 IWMMXT_BUILTIN (iwmmxt_wunpckehsw, "wunpckehsw", WUNPCKEHSW)
11691 IWMMXT_BUILTIN (iwmmxt_wunpckelub, "wunpckelub", WUNPCKELUB)
11692 IWMMXT_BUILTIN (iwmmxt_wunpckeluh, "wunpckeluh", WUNPCKELUH)
11693 IWMMXT_BUILTIN (iwmmxt_wunpckeluw, "wunpckeluw", WUNPCKELUW)
11694 IWMMXT_BUILTIN (iwmmxt_wunpckelsb, "wunpckelsb", WUNPCKELSB)
11695 IWMMXT_BUILTIN (iwmmxt_wunpckelsh, "wunpckelsh", WUNPCKELSH)
11696 IWMMXT_BUILTIN (iwmmxt_wunpckelsw, "wunpckelsw", WUNPCKELSW)
11699 /* Set up all the iWMMXt builtins. This is
11700 not called if TARGET_IWMMXT is zero. */
11702 static void
11703 arm_init_iwmmxt_builtins (void)
11705 const struct builtin_description * d;
11706 size_t i;
11707 tree endlink = void_list_node;
11709 tree V2SI_type_node = build_vector_type_for_mode (intSI_type_node, V2SImode);
11710 tree V4HI_type_node = build_vector_type_for_mode (intHI_type_node, V4HImode);
11711 tree V8QI_type_node = build_vector_type_for_mode (intQI_type_node, V8QImode);
11713 tree int_ftype_int
11714 = build_function_type (integer_type_node,
11715 tree_cons (NULL_TREE, integer_type_node, endlink));
11716 tree v8qi_ftype_v8qi_v8qi_int
11717 = build_function_type (V8QI_type_node,
11718 tree_cons (NULL_TREE, V8QI_type_node,
11719 tree_cons (NULL_TREE, V8QI_type_node,
11720 tree_cons (NULL_TREE,
11721 integer_type_node,
11722 endlink))));
11723 tree v4hi_ftype_v4hi_int
11724 = build_function_type (V4HI_type_node,
11725 tree_cons (NULL_TREE, V4HI_type_node,
11726 tree_cons (NULL_TREE, integer_type_node,
11727 endlink)));
11728 tree v2si_ftype_v2si_int
11729 = build_function_type (V2SI_type_node,
11730 tree_cons (NULL_TREE, V2SI_type_node,
11731 tree_cons (NULL_TREE, integer_type_node,
11732 endlink)));
11733 tree v2si_ftype_di_di
11734 = build_function_type (V2SI_type_node,
11735 tree_cons (NULL_TREE, long_long_integer_type_node,
11736 tree_cons (NULL_TREE, long_long_integer_type_node,
11737 endlink)));
11738 tree di_ftype_di_int
11739 = build_function_type (long_long_integer_type_node,
11740 tree_cons (NULL_TREE, long_long_integer_type_node,
11741 tree_cons (NULL_TREE, integer_type_node,
11742 endlink)));
11743 tree di_ftype_di_int_int
11744 = build_function_type (long_long_integer_type_node,
11745 tree_cons (NULL_TREE, long_long_integer_type_node,
11746 tree_cons (NULL_TREE, integer_type_node,
11747 tree_cons (NULL_TREE,
11748 integer_type_node,
11749 endlink))));
11750 tree int_ftype_v8qi
11751 = build_function_type (integer_type_node,
11752 tree_cons (NULL_TREE, V8QI_type_node,
11753 endlink));
11754 tree int_ftype_v4hi
11755 = build_function_type (integer_type_node,
11756 tree_cons (NULL_TREE, V4HI_type_node,
11757 endlink));
11758 tree int_ftype_v2si
11759 = build_function_type (integer_type_node,
11760 tree_cons (NULL_TREE, V2SI_type_node,
11761 endlink));
11762 tree int_ftype_v8qi_int
11763 = build_function_type (integer_type_node,
11764 tree_cons (NULL_TREE, V8QI_type_node,
11765 tree_cons (NULL_TREE, integer_type_node,
11766 endlink)));
11767 tree int_ftype_v4hi_int
11768 = build_function_type (integer_type_node,
11769 tree_cons (NULL_TREE, V4HI_type_node,
11770 tree_cons (NULL_TREE, integer_type_node,
11771 endlink)));
11772 tree int_ftype_v2si_int
11773 = build_function_type (integer_type_node,
11774 tree_cons (NULL_TREE, V2SI_type_node,
11775 tree_cons (NULL_TREE, integer_type_node,
11776 endlink)));
11777 tree v8qi_ftype_v8qi_int_int
11778 = build_function_type (V8QI_type_node,
11779 tree_cons (NULL_TREE, V8QI_type_node,
11780 tree_cons (NULL_TREE, integer_type_node,
11781 tree_cons (NULL_TREE,
11782 integer_type_node,
11783 endlink))));
11784 tree v4hi_ftype_v4hi_int_int
11785 = build_function_type (V4HI_type_node,
11786 tree_cons (NULL_TREE, V4HI_type_node,
11787 tree_cons (NULL_TREE, integer_type_node,
11788 tree_cons (NULL_TREE,
11789 integer_type_node,
11790 endlink))));
11791 tree v2si_ftype_v2si_int_int
11792 = build_function_type (V2SI_type_node,
11793 tree_cons (NULL_TREE, V2SI_type_node,
11794 tree_cons (NULL_TREE, integer_type_node,
11795 tree_cons (NULL_TREE,
11796 integer_type_node,
11797 endlink))));
11798 /* Miscellaneous. */
11799 tree v8qi_ftype_v4hi_v4hi
11800 = build_function_type (V8QI_type_node,
11801 tree_cons (NULL_TREE, V4HI_type_node,
11802 tree_cons (NULL_TREE, V4HI_type_node,
11803 endlink)));
11804 tree v4hi_ftype_v2si_v2si
11805 = build_function_type (V4HI_type_node,
11806 tree_cons (NULL_TREE, V2SI_type_node,
11807 tree_cons (NULL_TREE, V2SI_type_node,
11808 endlink)));
11809 tree v2si_ftype_v4hi_v4hi
11810 = build_function_type (V2SI_type_node,
11811 tree_cons (NULL_TREE, V4HI_type_node,
11812 tree_cons (NULL_TREE, V4HI_type_node,
11813 endlink)));
11814 tree v2si_ftype_v8qi_v8qi
11815 = build_function_type (V2SI_type_node,
11816 tree_cons (NULL_TREE, V8QI_type_node,
11817 tree_cons (NULL_TREE, V8QI_type_node,
11818 endlink)));
11819 tree v4hi_ftype_v4hi_di
11820 = build_function_type (V4HI_type_node,
11821 tree_cons (NULL_TREE, V4HI_type_node,
11822 tree_cons (NULL_TREE,
11823 long_long_integer_type_node,
11824 endlink)));
11825 tree v2si_ftype_v2si_di
11826 = build_function_type (V2SI_type_node,
11827 tree_cons (NULL_TREE, V2SI_type_node,
11828 tree_cons (NULL_TREE,
11829 long_long_integer_type_node,
11830 endlink)));
11831 tree void_ftype_int_int
11832 = build_function_type (void_type_node,
11833 tree_cons (NULL_TREE, integer_type_node,
11834 tree_cons (NULL_TREE, integer_type_node,
11835 endlink)));
11836 tree di_ftype_void
11837 = build_function_type (long_long_unsigned_type_node, endlink);
11838 tree di_ftype_v8qi
11839 = build_function_type (long_long_integer_type_node,
11840 tree_cons (NULL_TREE, V8QI_type_node,
11841 endlink));
11842 tree di_ftype_v4hi
11843 = build_function_type (long_long_integer_type_node,
11844 tree_cons (NULL_TREE, V4HI_type_node,
11845 endlink));
11846 tree di_ftype_v2si
11847 = build_function_type (long_long_integer_type_node,
11848 tree_cons (NULL_TREE, V2SI_type_node,
11849 endlink));
11850 tree v2si_ftype_v4hi
11851 = build_function_type (V2SI_type_node,
11852 tree_cons (NULL_TREE, V4HI_type_node,
11853 endlink));
11854 tree v4hi_ftype_v8qi
11855 = build_function_type (V4HI_type_node,
11856 tree_cons (NULL_TREE, V8QI_type_node,
11857 endlink));
11859 tree di_ftype_di_v4hi_v4hi
11860 = build_function_type (long_long_unsigned_type_node,
11861 tree_cons (NULL_TREE,
11862 long_long_unsigned_type_node,
11863 tree_cons (NULL_TREE, V4HI_type_node,
11864 tree_cons (NULL_TREE,
11865 V4HI_type_node,
11866 endlink))));
11868 tree di_ftype_v4hi_v4hi
11869 = build_function_type (long_long_unsigned_type_node,
11870 tree_cons (NULL_TREE, V4HI_type_node,
11871 tree_cons (NULL_TREE, V4HI_type_node,
11872 endlink)));
11874 /* Normal vector binops. */
11875 tree v8qi_ftype_v8qi_v8qi
11876 = build_function_type (V8QI_type_node,
11877 tree_cons (NULL_TREE, V8QI_type_node,
11878 tree_cons (NULL_TREE, V8QI_type_node,
11879 endlink)));
11880 tree v4hi_ftype_v4hi_v4hi
11881 = build_function_type (V4HI_type_node,
11882 tree_cons (NULL_TREE, V4HI_type_node,
11883 tree_cons (NULL_TREE, V4HI_type_node,
11884 endlink)));
11885 tree v2si_ftype_v2si_v2si
11886 = build_function_type (V2SI_type_node,
11887 tree_cons (NULL_TREE, V2SI_type_node,
11888 tree_cons (NULL_TREE, V2SI_type_node,
11889 endlink)));
11890 tree di_ftype_di_di
11891 = build_function_type (long_long_unsigned_type_node,
11892 tree_cons (NULL_TREE, long_long_unsigned_type_node,
11893 tree_cons (NULL_TREE,
11894 long_long_unsigned_type_node,
11895 endlink)));
11897 /* Add all builtins that are more or less simple operations on two
11898 operands. */
11899 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
11901 /* Use one of the operands; the target can have a different mode for
11902 mask-generating compares. */
11903 enum machine_mode mode;
11904 tree type;
11906 if (d->name == 0)
11907 continue;
11909 mode = insn_data[d->icode].operand[1].mode;
11911 switch (mode)
11913 case V8QImode:
11914 type = v8qi_ftype_v8qi_v8qi;
11915 break;
11916 case V4HImode:
11917 type = v4hi_ftype_v4hi_v4hi;
11918 break;
11919 case V2SImode:
11920 type = v2si_ftype_v2si_v2si;
11921 break;
11922 case DImode:
11923 type = di_ftype_di_di;
11924 break;
11926 default:
11927 abort ();
11930 def_mbuiltin (d->mask, d->name, type, d->code);
11933 /* Add the remaining MMX insns with somewhat more complicated types. */
11934 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wzero", di_ftype_void, ARM_BUILTIN_WZERO);
11935 def_mbuiltin (FL_IWMMXT, "__builtin_arm_setwcx", void_ftype_int_int, ARM_BUILTIN_SETWCX);
11936 def_mbuiltin (FL_IWMMXT, "__builtin_arm_getwcx", int_ftype_int, ARM_BUILTIN_GETWCX);
11938 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSLLH);
11939 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllw", v2si_ftype_v2si_di, ARM_BUILTIN_WSLLW);
11940 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wslld", di_ftype_di_di, ARM_BUILTIN_WSLLD);
11941 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSLLHI);
11942 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllwi", v2si_ftype_v2si_int, ARM_BUILTIN_WSLLWI);
11943 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wslldi", di_ftype_di_int, ARM_BUILTIN_WSLLDI);
11945 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSRLH);
11946 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlw", v2si_ftype_v2si_di, ARM_BUILTIN_WSRLW);
11947 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrld", di_ftype_di_di, ARM_BUILTIN_WSRLD);
11948 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSRLHI);
11949 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlwi", v2si_ftype_v2si_int, ARM_BUILTIN_WSRLWI);
11950 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrldi", di_ftype_di_int, ARM_BUILTIN_WSRLDI);
11952 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrah", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSRAH);
11953 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsraw", v2si_ftype_v2si_di, ARM_BUILTIN_WSRAW);
11954 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrad", di_ftype_di_di, ARM_BUILTIN_WSRAD);
11955 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrahi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSRAHI);
11956 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrawi", v2si_ftype_v2si_int, ARM_BUILTIN_WSRAWI);
11957 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsradi", di_ftype_di_int, ARM_BUILTIN_WSRADI);
11959 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WRORH);
11960 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorw", v2si_ftype_v2si_di, ARM_BUILTIN_WRORW);
11961 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrord", di_ftype_di_di, ARM_BUILTIN_WRORD);
11962 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WRORHI);
11963 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorwi", v2si_ftype_v2si_int, ARM_BUILTIN_WRORWI);
11964 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrordi", di_ftype_di_int, ARM_BUILTIN_WRORDI);
11966 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wshufh", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSHUFH);
11968 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadb", v2si_ftype_v8qi_v8qi, ARM_BUILTIN_WSADB);
11969 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadh", v2si_ftype_v4hi_v4hi, ARM_BUILTIN_WSADH);
11970 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadbz", v2si_ftype_v8qi_v8qi, ARM_BUILTIN_WSADBZ);
11971 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadhz", v2si_ftype_v4hi_v4hi, ARM_BUILTIN_WSADHZ);
11973 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsb", int_ftype_v8qi_int, ARM_BUILTIN_TEXTRMSB);
11974 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsh", int_ftype_v4hi_int, ARM_BUILTIN_TEXTRMSH);
11975 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsw", int_ftype_v2si_int, ARM_BUILTIN_TEXTRMSW);
11976 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmub", int_ftype_v8qi_int, ARM_BUILTIN_TEXTRMUB);
11977 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmuh", int_ftype_v4hi_int, ARM_BUILTIN_TEXTRMUH);
11978 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmuw", int_ftype_v2si_int, ARM_BUILTIN_TEXTRMUW);
11979 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrb", v8qi_ftype_v8qi_int_int, ARM_BUILTIN_TINSRB);
11980 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrh", v4hi_ftype_v4hi_int_int, ARM_BUILTIN_TINSRH);
11981 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrw", v2si_ftype_v2si_int_int, ARM_BUILTIN_TINSRW);
11983 def_mbuiltin (FL_IWMMXT, "__builtin_arm_waccb", di_ftype_v8qi, ARM_BUILTIN_WACCB);
11984 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wacch", di_ftype_v4hi, ARM_BUILTIN_WACCH);
11985 def_mbuiltin (FL_IWMMXT, "__builtin_arm_waccw", di_ftype_v2si, ARM_BUILTIN_WACCW);
11987 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskb", int_ftype_v8qi, ARM_BUILTIN_TMOVMSKB);
11988 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskh", int_ftype_v4hi, ARM_BUILTIN_TMOVMSKH);
11989 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskw", int_ftype_v2si, ARM_BUILTIN_TMOVMSKW);
11991 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackhss", v8qi_ftype_v4hi_v4hi, ARM_BUILTIN_WPACKHSS);
11992 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackhus", v8qi_ftype_v4hi_v4hi, ARM_BUILTIN_WPACKHUS);
11993 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackwus", v4hi_ftype_v2si_v2si, ARM_BUILTIN_WPACKWUS);
11994 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackwss", v4hi_ftype_v2si_v2si, ARM_BUILTIN_WPACKWSS);
11995 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackdus", v2si_ftype_di_di, ARM_BUILTIN_WPACKDUS);
11996 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackdss", v2si_ftype_di_di, ARM_BUILTIN_WPACKDSS);
11998 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehub", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKEHUB);
11999 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehuh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKEHUH);
12000 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehuw", di_ftype_v2si, ARM_BUILTIN_WUNPCKEHUW);
12001 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsb", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKEHSB);
12002 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKEHSH);
12003 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsw", di_ftype_v2si, ARM_BUILTIN_WUNPCKEHSW);
12004 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelub", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKELUB);
12005 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckeluh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKELUH);
12006 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckeluw", di_ftype_v2si, ARM_BUILTIN_WUNPCKELUW);
12007 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsb", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKELSB);
12008 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKELSH);
12009 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsw", di_ftype_v2si, ARM_BUILTIN_WUNPCKELSW);
12011 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacs", di_ftype_di_v4hi_v4hi, ARM_BUILTIN_WMACS);
12012 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacsz", di_ftype_v4hi_v4hi, ARM_BUILTIN_WMACSZ);
12013 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacu", di_ftype_di_v4hi_v4hi, ARM_BUILTIN_WMACU);
12014 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacuz", di_ftype_v4hi_v4hi, ARM_BUILTIN_WMACUZ);
12016 def_mbuiltin (FL_IWMMXT, "__builtin_arm_walign", v8qi_ftype_v8qi_v8qi_int, ARM_BUILTIN_WALIGN);
12017 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmia", di_ftype_di_int_int, ARM_BUILTIN_TMIA);
12018 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiaph", di_ftype_di_int_int, ARM_BUILTIN_TMIAPH);
12019 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiabb", di_ftype_di_int_int, ARM_BUILTIN_TMIABB);
12020 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiabt", di_ftype_di_int_int, ARM_BUILTIN_TMIABT);
12021 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiatb", di_ftype_di_int_int, ARM_BUILTIN_TMIATB);
12022 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiatt", di_ftype_di_int_int, ARM_BUILTIN_TMIATT);
12025 static void
12026 arm_init_builtins (void)
12028 if (TARGET_REALLY_IWMMXT)
12029 arm_init_iwmmxt_builtins ();
12032 /* Errors in the source file can cause expand_expr to return const0_rtx
12033 where we expect a vector. To avoid crashing, use one of the vector
12034 clear instructions. */
12036 static rtx
12037 safe_vector_operand (rtx x, enum machine_mode mode)
12039 if (x != const0_rtx)
12040 return x;
12041 x = gen_reg_rtx (mode);
12043 emit_insn (gen_iwmmxt_clrdi (mode == DImode ? x
12044 : gen_rtx_SUBREG (DImode, x, 0)));
12045 return x;
12048 /* Subroutine of arm_expand_builtin to take care of binop insns. */
12050 static rtx
12051 arm_expand_binop_builtin (enum insn_code icode,
12052 tree arglist, rtx target)
12054 rtx pat;
12055 tree arg0 = TREE_VALUE (arglist);
12056 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12057 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12058 rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12059 enum machine_mode tmode = insn_data[icode].operand[0].mode;
12060 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
12061 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
12063 if (VECTOR_MODE_P (mode0))
12064 op0 = safe_vector_operand (op0, mode0);
12065 if (VECTOR_MODE_P (mode1))
12066 op1 = safe_vector_operand (op1, mode1);
12068 if (! target
12069 || GET_MODE (target) != tmode
12070 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12071 target = gen_reg_rtx (tmode);
12073 /* In case the insn wants input operands in modes different from
12074 the result, abort. */
12075 if (GET_MODE (op0) != mode0 || GET_MODE (op1) != mode1)
12076 abort ();
12078 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12079 op0 = copy_to_mode_reg (mode0, op0);
12080 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12081 op1 = copy_to_mode_reg (mode1, op1);
12083 pat = GEN_FCN (icode) (target, op0, op1);
12084 if (! pat)
12085 return 0;
12086 emit_insn (pat);
12087 return target;
12090 /* Subroutine of arm_expand_builtin to take care of unop insns. */
12092 static rtx
12093 arm_expand_unop_builtin (enum insn_code icode,
12094 tree arglist, rtx target, int do_load)
12096 rtx pat;
12097 tree arg0 = TREE_VALUE (arglist);
12098 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12099 enum machine_mode tmode = insn_data[icode].operand[0].mode;
12100 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
12102 if (! target
12103 || GET_MODE (target) != tmode
12104 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12105 target = gen_reg_rtx (tmode);
12106 if (do_load)
12107 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
12108 else
12110 if (VECTOR_MODE_P (mode0))
12111 op0 = safe_vector_operand (op0, mode0);
12113 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12114 op0 = copy_to_mode_reg (mode0, op0);
12117 pat = GEN_FCN (icode) (target, op0);
12118 if (! pat)
12119 return 0;
12120 emit_insn (pat);
12121 return target;
12124 /* Expand an expression EXP that calls a built-in function,
12125 with result going to TARGET if that's convenient
12126 (and in mode MODE if that's convenient).
12127 SUBTARGET may be used as the target for computing one of EXP's operands.
12128 IGNORE is nonzero if the value is to be ignored. */
12130 static rtx
12131 arm_expand_builtin (tree exp,
12132 rtx target,
12133 rtx subtarget ATTRIBUTE_UNUSED,
12134 enum machine_mode mode ATTRIBUTE_UNUSED,
12135 int ignore ATTRIBUTE_UNUSED)
12137 const struct builtin_description * d;
12138 enum insn_code icode;
12139 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
12140 tree arglist = TREE_OPERAND (exp, 1);
12141 tree arg0;
12142 tree arg1;
12143 tree arg2;
12144 rtx op0;
12145 rtx op1;
12146 rtx op2;
12147 rtx pat;
12148 int fcode = DECL_FUNCTION_CODE (fndecl);
12149 size_t i;
12150 enum machine_mode tmode;
12151 enum machine_mode mode0;
12152 enum machine_mode mode1;
12153 enum machine_mode mode2;
12155 switch (fcode)
12157 case ARM_BUILTIN_TEXTRMSB:
12158 case ARM_BUILTIN_TEXTRMUB:
12159 case ARM_BUILTIN_TEXTRMSH:
12160 case ARM_BUILTIN_TEXTRMUH:
12161 case ARM_BUILTIN_TEXTRMSW:
12162 case ARM_BUILTIN_TEXTRMUW:
12163 icode = (fcode == ARM_BUILTIN_TEXTRMSB ? CODE_FOR_iwmmxt_textrmsb
12164 : fcode == ARM_BUILTIN_TEXTRMUB ? CODE_FOR_iwmmxt_textrmub
12165 : fcode == ARM_BUILTIN_TEXTRMSH ? CODE_FOR_iwmmxt_textrmsh
12166 : fcode == ARM_BUILTIN_TEXTRMUH ? CODE_FOR_iwmmxt_textrmuh
12167 : CODE_FOR_iwmmxt_textrmw);
12169 arg0 = TREE_VALUE (arglist);
12170 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12171 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12172 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12173 tmode = insn_data[icode].operand[0].mode;
12174 mode0 = insn_data[icode].operand[1].mode;
12175 mode1 = insn_data[icode].operand[2].mode;
12177 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12178 op0 = copy_to_mode_reg (mode0, op0);
12179 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12181 /* @@@ better error message */
12182 error ("selector must be an immediate");
12183 return gen_reg_rtx (tmode);
12185 if (target == 0
12186 || GET_MODE (target) != tmode
12187 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12188 target = gen_reg_rtx (tmode);
12189 pat = GEN_FCN (icode) (target, op0, op1);
12190 if (! pat)
12191 return 0;
12192 emit_insn (pat);
12193 return target;
12195 case ARM_BUILTIN_TINSRB:
12196 case ARM_BUILTIN_TINSRH:
12197 case ARM_BUILTIN_TINSRW:
12198 icode = (fcode == ARM_BUILTIN_TINSRB ? CODE_FOR_iwmmxt_tinsrb
12199 : fcode == ARM_BUILTIN_TINSRH ? CODE_FOR_iwmmxt_tinsrh
12200 : CODE_FOR_iwmmxt_tinsrw);
12201 arg0 = TREE_VALUE (arglist);
12202 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12203 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
12204 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12205 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12206 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
12207 tmode = insn_data[icode].operand[0].mode;
12208 mode0 = insn_data[icode].operand[1].mode;
12209 mode1 = insn_data[icode].operand[2].mode;
12210 mode2 = insn_data[icode].operand[3].mode;
12212 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12213 op0 = copy_to_mode_reg (mode0, op0);
12214 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12215 op1 = copy_to_mode_reg (mode1, op1);
12216 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
12218 /* @@@ better error message */
12219 error ("selector must be an immediate");
12220 return const0_rtx;
12222 if (target == 0
12223 || GET_MODE (target) != tmode
12224 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12225 target = gen_reg_rtx (tmode);
12226 pat = GEN_FCN (icode) (target, op0, op1, op2);
12227 if (! pat)
12228 return 0;
12229 emit_insn (pat);
12230 return target;
12232 case ARM_BUILTIN_SETWCX:
12233 arg0 = TREE_VALUE (arglist);
12234 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12235 op0 = force_reg (SImode, expand_expr (arg0, NULL_RTX, VOIDmode, 0));
12236 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12237 emit_insn (gen_iwmmxt_tmcr (op1, op0));
12238 return 0;
12240 case ARM_BUILTIN_GETWCX:
12241 arg0 = TREE_VALUE (arglist);
12242 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12243 target = gen_reg_rtx (SImode);
12244 emit_insn (gen_iwmmxt_tmrc (target, op0));
12245 return target;
12247 case ARM_BUILTIN_WSHUFH:
12248 icode = CODE_FOR_iwmmxt_wshufh;
12249 arg0 = TREE_VALUE (arglist);
12250 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12251 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12252 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12253 tmode = insn_data[icode].operand[0].mode;
12254 mode1 = insn_data[icode].operand[1].mode;
12255 mode2 = insn_data[icode].operand[2].mode;
12257 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
12258 op0 = copy_to_mode_reg (mode1, op0);
12259 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
12261 /* @@@ better error message */
12262 error ("mask must be an immediate");
12263 return const0_rtx;
12265 if (target == 0
12266 || GET_MODE (target) != tmode
12267 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12268 target = gen_reg_rtx (tmode);
12269 pat = GEN_FCN (icode) (target, op0, op1);
12270 if (! pat)
12271 return 0;
12272 emit_insn (pat);
12273 return target;
12275 case ARM_BUILTIN_WSADB:
12276 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadb, arglist, target);
12277 case ARM_BUILTIN_WSADH:
12278 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadh, arglist, target);
12279 case ARM_BUILTIN_WSADBZ:
12280 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadbz, arglist, target);
12281 case ARM_BUILTIN_WSADHZ:
12282 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadhz, arglist, target);
12284 /* Several three-argument builtins. */
12285 case ARM_BUILTIN_WMACS:
12286 case ARM_BUILTIN_WMACU:
12287 case ARM_BUILTIN_WALIGN:
12288 case ARM_BUILTIN_TMIA:
12289 case ARM_BUILTIN_TMIAPH:
12290 case ARM_BUILTIN_TMIATT:
12291 case ARM_BUILTIN_TMIATB:
12292 case ARM_BUILTIN_TMIABT:
12293 case ARM_BUILTIN_TMIABB:
12294 icode = (fcode == ARM_BUILTIN_WMACS ? CODE_FOR_iwmmxt_wmacs
12295 : fcode == ARM_BUILTIN_WMACU ? CODE_FOR_iwmmxt_wmacu
12296 : fcode == ARM_BUILTIN_TMIA ? CODE_FOR_iwmmxt_tmia
12297 : fcode == ARM_BUILTIN_TMIAPH ? CODE_FOR_iwmmxt_tmiaph
12298 : fcode == ARM_BUILTIN_TMIABB ? CODE_FOR_iwmmxt_tmiabb
12299 : fcode == ARM_BUILTIN_TMIABT ? CODE_FOR_iwmmxt_tmiabt
12300 : fcode == ARM_BUILTIN_TMIATB ? CODE_FOR_iwmmxt_tmiatb
12301 : fcode == ARM_BUILTIN_TMIATT ? CODE_FOR_iwmmxt_tmiatt
12302 : CODE_FOR_iwmmxt_walign);
12303 arg0 = TREE_VALUE (arglist);
12304 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12305 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
12306 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12307 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12308 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
12309 tmode = insn_data[icode].operand[0].mode;
12310 mode0 = insn_data[icode].operand[1].mode;
12311 mode1 = insn_data[icode].operand[2].mode;
12312 mode2 = insn_data[icode].operand[3].mode;
12314 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12315 op0 = copy_to_mode_reg (mode0, op0);
12316 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12317 op1 = copy_to_mode_reg (mode1, op1);
12318 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
12319 op2 = copy_to_mode_reg (mode2, op2);
12320 if (target == 0
12321 || GET_MODE (target) != tmode
12322 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12323 target = gen_reg_rtx (tmode);
12324 pat = GEN_FCN (icode) (target, op0, op1, op2);
12325 if (! pat)
12326 return 0;
12327 emit_insn (pat);
12328 return target;
12330 case ARM_BUILTIN_WZERO:
12331 target = gen_reg_rtx (DImode);
12332 emit_insn (gen_iwmmxt_clrdi (target));
12333 return target;
12335 default:
12336 break;
12339 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
12340 if (d->code == (const enum arm_builtins) fcode)
12341 return arm_expand_binop_builtin (d->icode, arglist, target);
12343 for (i = 0, d = bdesc_1arg; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
12344 if (d->code == (const enum arm_builtins) fcode)
12345 return arm_expand_unop_builtin (d->icode, arglist, target, 0);
12347 /* @@@ Should really do something sensible here. */
12348 return NULL_RTX;
12351 /* Recursively search through all of the blocks in a function
12352 checking to see if any of the variables created in that
12353 function match the RTX called 'orig'. If they do then
12354 replace them with the RTX called 'new'. */
12355 static void
12356 replace_symbols_in_block (tree block, rtx orig, rtx new)
12358 for (; block; block = BLOCK_CHAIN (block))
12360 tree sym;
12362 if (!TREE_USED (block))
12363 continue;
12365 for (sym = BLOCK_VARS (block); sym; sym = TREE_CHAIN (sym))
12367 if ( (DECL_NAME (sym) == 0 && TREE_CODE (sym) != TYPE_DECL)
12368 || DECL_IGNORED_P (sym)
12369 || TREE_CODE (sym) != VAR_DECL
12370 || DECL_EXTERNAL (sym)
12371 || !rtx_equal_p (DECL_RTL (sym), orig)
12373 continue;
12375 SET_DECL_RTL (sym, new);
12378 replace_symbols_in_block (BLOCK_SUBBLOCKS (block), orig, new);
12382 /* Return the number (counting from 0) of
12383 the least significant set bit in MASK. */
12385 inline static int
12386 number_of_first_bit_set (int mask)
12388 int bit;
12390 for (bit = 0;
12391 (mask & (1 << bit)) == 0;
12392 ++bit)
12393 continue;
12395 return bit;
12398 /* Generate code to return from a thumb function.
12399 If 'reg_containing_return_addr' is -1, then the return address is
12400 actually on the stack, at the stack pointer. */
12401 static void
12402 thumb_exit (FILE *f, int reg_containing_return_addr)
12404 unsigned regs_available_for_popping;
12405 unsigned regs_to_pop;
12406 int pops_needed;
12407 unsigned available;
12408 unsigned required;
12409 int mode;
12410 int size;
12411 int restore_a4 = FALSE;
12413 /* Compute the registers we need to pop. */
12414 regs_to_pop = 0;
12415 pops_needed = 0;
12417 if (reg_containing_return_addr == -1)
12419 regs_to_pop |= 1 << LR_REGNUM;
12420 ++pops_needed;
12423 if (TARGET_BACKTRACE)
12425 /* Restore the (ARM) frame pointer and stack pointer. */
12426 regs_to_pop |= (1 << ARM_HARD_FRAME_POINTER_REGNUM) | (1 << SP_REGNUM);
12427 pops_needed += 2;
12430 /* If there is nothing to pop then just emit the BX instruction and
12431 return. */
12432 if (pops_needed == 0)
12434 if (current_function_calls_eh_return)
12435 asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, ARM_EH_STACKADJ_REGNUM);
12437 asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
12438 return;
12440 /* Otherwise if we are not supporting interworking and we have not created
12441 a backtrace structure and the function was not entered in ARM mode then
12442 just pop the return address straight into the PC. */
12443 else if (!TARGET_INTERWORK
12444 && !TARGET_BACKTRACE
12445 && !is_called_in_ARM_mode (current_function_decl)
12446 && !current_function_calls_eh_return)
12448 asm_fprintf (f, "\tpop\t{%r}\n", PC_REGNUM);
12449 return;
12452 /* Find out how many of the (return) argument registers we can corrupt. */
12453 regs_available_for_popping = 0;
12455 /* If returning via __builtin_eh_return, the bottom three registers
12456 all contain information needed for the return. */
12457 if (current_function_calls_eh_return)
12458 size = 12;
12459 else
12461 /* If we can deduce the registers used from the function's
12462 return value. This is more reliable that examining
12463 regs_ever_live[] because that will be set if the register is
12464 ever used in the function, not just if the register is used
12465 to hold a return value. */
12467 if (current_function_return_rtx != 0)
12468 mode = GET_MODE (current_function_return_rtx);
12469 else
12470 mode = DECL_MODE (DECL_RESULT (current_function_decl));
12472 size = GET_MODE_SIZE (mode);
12474 if (size == 0)
12476 /* In a void function we can use any argument register.
12477 In a function that returns a structure on the stack
12478 we can use the second and third argument registers. */
12479 if (mode == VOIDmode)
12480 regs_available_for_popping =
12481 (1 << ARG_REGISTER (1))
12482 | (1 << ARG_REGISTER (2))
12483 | (1 << ARG_REGISTER (3));
12484 else
12485 regs_available_for_popping =
12486 (1 << ARG_REGISTER (2))
12487 | (1 << ARG_REGISTER (3));
12489 else if (size <= 4)
12490 regs_available_for_popping =
12491 (1 << ARG_REGISTER (2))
12492 | (1 << ARG_REGISTER (3));
12493 else if (size <= 8)
12494 regs_available_for_popping =
12495 (1 << ARG_REGISTER (3));
12498 /* Match registers to be popped with registers into which we pop them. */
12499 for (available = regs_available_for_popping,
12500 required = regs_to_pop;
12501 required != 0 && available != 0;
12502 available &= ~(available & - available),
12503 required &= ~(required & - required))
12504 -- pops_needed;
12506 /* If we have any popping registers left over, remove them. */
12507 if (available > 0)
12508 regs_available_for_popping &= ~available;
12510 /* Otherwise if we need another popping register we can use
12511 the fourth argument register. */
12512 else if (pops_needed)
12514 /* If we have not found any free argument registers and
12515 reg a4 contains the return address, we must move it. */
12516 if (regs_available_for_popping == 0
12517 && reg_containing_return_addr == LAST_ARG_REGNUM)
12519 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM, LAST_ARG_REGNUM);
12520 reg_containing_return_addr = LR_REGNUM;
12522 else if (size > 12)
12524 /* Register a4 is being used to hold part of the return value,
12525 but we have dire need of a free, low register. */
12526 restore_a4 = TRUE;
12528 asm_fprintf (f, "\tmov\t%r, %r\n",IP_REGNUM, LAST_ARG_REGNUM);
12531 if (reg_containing_return_addr != LAST_ARG_REGNUM)
12533 /* The fourth argument register is available. */
12534 regs_available_for_popping |= 1 << LAST_ARG_REGNUM;
12536 --pops_needed;
12540 /* Pop as many registers as we can. */
12541 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
12542 regs_available_for_popping);
12544 /* Process the registers we popped. */
12545 if (reg_containing_return_addr == -1)
12547 /* The return address was popped into the lowest numbered register. */
12548 regs_to_pop &= ~(1 << LR_REGNUM);
12550 reg_containing_return_addr =
12551 number_of_first_bit_set (regs_available_for_popping);
12553 /* Remove this register for the mask of available registers, so that
12554 the return address will not be corrupted by further pops. */
12555 regs_available_for_popping &= ~(1 << reg_containing_return_addr);
12558 /* If we popped other registers then handle them here. */
12559 if (regs_available_for_popping)
12561 int frame_pointer;
12563 /* Work out which register currently contains the frame pointer. */
12564 frame_pointer = number_of_first_bit_set (regs_available_for_popping);
12566 /* Move it into the correct place. */
12567 asm_fprintf (f, "\tmov\t%r, %r\n",
12568 ARM_HARD_FRAME_POINTER_REGNUM, frame_pointer);
12570 /* (Temporarily) remove it from the mask of popped registers. */
12571 regs_available_for_popping &= ~(1 << frame_pointer);
12572 regs_to_pop &= ~(1 << ARM_HARD_FRAME_POINTER_REGNUM);
12574 if (regs_available_for_popping)
12576 int stack_pointer;
12578 /* We popped the stack pointer as well,
12579 find the register that contains it. */
12580 stack_pointer = number_of_first_bit_set (regs_available_for_popping);
12582 /* Move it into the stack register. */
12583 asm_fprintf (f, "\tmov\t%r, %r\n", SP_REGNUM, stack_pointer);
12585 /* At this point we have popped all necessary registers, so
12586 do not worry about restoring regs_available_for_popping
12587 to its correct value:
12589 assert (pops_needed == 0)
12590 assert (regs_available_for_popping == (1 << frame_pointer))
12591 assert (regs_to_pop == (1 << STACK_POINTER)) */
12593 else
12595 /* Since we have just move the popped value into the frame
12596 pointer, the popping register is available for reuse, and
12597 we know that we still have the stack pointer left to pop. */
12598 regs_available_for_popping |= (1 << frame_pointer);
12602 /* If we still have registers left on the stack, but we no longer have
12603 any registers into which we can pop them, then we must move the return
12604 address into the link register and make available the register that
12605 contained it. */
12606 if (regs_available_for_popping == 0 && pops_needed > 0)
12608 regs_available_for_popping |= 1 << reg_containing_return_addr;
12610 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM,
12611 reg_containing_return_addr);
12613 reg_containing_return_addr = LR_REGNUM;
12616 /* If we have registers left on the stack then pop some more.
12617 We know that at most we will want to pop FP and SP. */
12618 if (pops_needed > 0)
12620 int popped_into;
12621 int move_to;
12623 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
12624 regs_available_for_popping);
12626 /* We have popped either FP or SP.
12627 Move whichever one it is into the correct register. */
12628 popped_into = number_of_first_bit_set (regs_available_for_popping);
12629 move_to = number_of_first_bit_set (regs_to_pop);
12631 asm_fprintf (f, "\tmov\t%r, %r\n", move_to, popped_into);
12633 regs_to_pop &= ~(1 << move_to);
12635 --pops_needed;
12638 /* If we still have not popped everything then we must have only
12639 had one register available to us and we are now popping the SP. */
12640 if (pops_needed > 0)
12642 int popped_into;
12644 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
12645 regs_available_for_popping);
12647 popped_into = number_of_first_bit_set (regs_available_for_popping);
12649 asm_fprintf (f, "\tmov\t%r, %r\n", SP_REGNUM, popped_into);
12651 assert (regs_to_pop == (1 << STACK_POINTER))
12652 assert (pops_needed == 1)
12656 /* If necessary restore the a4 register. */
12657 if (restore_a4)
12659 if (reg_containing_return_addr != LR_REGNUM)
12661 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM, LAST_ARG_REGNUM);
12662 reg_containing_return_addr = LR_REGNUM;
12665 asm_fprintf (f, "\tmov\t%r, %r\n", LAST_ARG_REGNUM, IP_REGNUM);
12668 if (current_function_calls_eh_return)
12669 asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, ARM_EH_STACKADJ_REGNUM);
12671 /* Return to caller. */
12672 asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
12675 /* Emit code to push or pop registers to or from the stack. F is the
12676 assembly file. MASK is the registers to push or pop. PUSH is
12677 nonzero if we should push, and zero if we should pop. For debugging
12678 output, if pushing, adjust CFA_OFFSET by the amount of space added
12679 to the stack. REAL_REGS should have the same number of bits set as
12680 MASK, and will be used instead (in the same order) to describe which
12681 registers were saved - this is used to mark the save slots when we
12682 push high registers after moving them to low registers. */
12683 static void
12684 thumb_pushpop (FILE *f, int mask, int push, int *cfa_offset, int real_regs)
12686 int regno;
12687 int lo_mask = mask & 0xFF;
12688 int pushed_words = 0;
12690 if (lo_mask == 0 && !push && (mask & (1 << PC_REGNUM)))
12692 /* Special case. Do not generate a POP PC statement here, do it in
12693 thumb_exit() */
12694 thumb_exit (f, -1);
12695 return;
12698 fprintf (f, "\t%s\t{", push ? "push" : "pop");
12700 /* Look at the low registers first. */
12701 for (regno = 0; regno <= LAST_LO_REGNUM; regno++, lo_mask >>= 1)
12703 if (lo_mask & 1)
12705 asm_fprintf (f, "%r", regno);
12707 if ((lo_mask & ~1) != 0)
12708 fprintf (f, ", ");
12710 pushed_words++;
12714 if (push && (mask & (1 << LR_REGNUM)))
12716 /* Catch pushing the LR. */
12717 if (mask & 0xFF)
12718 fprintf (f, ", ");
12720 asm_fprintf (f, "%r", LR_REGNUM);
12722 pushed_words++;
12724 else if (!push && (mask & (1 << PC_REGNUM)))
12726 /* Catch popping the PC. */
12727 if (TARGET_INTERWORK || TARGET_BACKTRACE
12728 || current_function_calls_eh_return)
12730 /* The PC is never poped directly, instead
12731 it is popped into r3 and then BX is used. */
12732 fprintf (f, "}\n");
12734 thumb_exit (f, -1);
12736 return;
12738 else
12740 if (mask & 0xFF)
12741 fprintf (f, ", ");
12743 asm_fprintf (f, "%r", PC_REGNUM);
12747 fprintf (f, "}\n");
12749 if (push && pushed_words && dwarf2out_do_frame ())
12751 char *l = dwarf2out_cfi_label ();
12752 int pushed_mask = real_regs;
12754 *cfa_offset += pushed_words * 4;
12755 dwarf2out_def_cfa (l, SP_REGNUM, *cfa_offset);
12757 pushed_words = 0;
12758 pushed_mask = real_regs;
12759 for (regno = 0; regno <= 14; regno++, pushed_mask >>= 1)
12761 if (pushed_mask & 1)
12762 dwarf2out_reg_save (l, regno, 4 * pushed_words++ - *cfa_offset);
12767 void
12768 thumb_final_prescan_insn (rtx insn)
12770 if (flag_print_asm_name)
12771 asm_fprintf (asm_out_file, "%@ 0x%04x\n",
12772 INSN_ADDRESSES (INSN_UID (insn)));
12776 thumb_shiftable_const (unsigned HOST_WIDE_INT val)
12778 unsigned HOST_WIDE_INT mask = 0xff;
12779 int i;
12781 if (val == 0) /* XXX */
12782 return 0;
12784 for (i = 0; i < 25; i++)
12785 if ((val & (mask << i)) == val)
12786 return 1;
12788 return 0;
12791 /* Returns nonzero if the current function contains,
12792 or might contain a far jump. */
12793 static int
12794 thumb_far_jump_used_p (void)
12796 rtx insn;
12798 /* This test is only important for leaf functions. */
12799 /* assert (!leaf_function_p ()); */
12801 /* If we have already decided that far jumps may be used,
12802 do not bother checking again, and always return true even if
12803 it turns out that they are not being used. Once we have made
12804 the decision that far jumps are present (and that hence the link
12805 register will be pushed onto the stack) we cannot go back on it. */
12806 if (cfun->machine->far_jump_used)
12807 return 1;
12809 /* If this function is not being called from the prologue/epilogue
12810 generation code then it must be being called from the
12811 INITIAL_ELIMINATION_OFFSET macro. */
12812 if (!(ARM_DOUBLEWORD_ALIGN || reload_completed))
12814 /* In this case we know that we are being asked about the elimination
12815 of the arg pointer register. If that register is not being used,
12816 then there are no arguments on the stack, and we do not have to
12817 worry that a far jump might force the prologue to push the link
12818 register, changing the stack offsets. In this case we can just
12819 return false, since the presence of far jumps in the function will
12820 not affect stack offsets.
12822 If the arg pointer is live (or if it was live, but has now been
12823 eliminated and so set to dead) then we do have to test to see if
12824 the function might contain a far jump. This test can lead to some
12825 false negatives, since before reload is completed, then length of
12826 branch instructions is not known, so gcc defaults to returning their
12827 longest length, which in turn sets the far jump attribute to true.
12829 A false negative will not result in bad code being generated, but it
12830 will result in a needless push and pop of the link register. We
12831 hope that this does not occur too often.
12833 If we need doubleword stack alignment this could affect the other
12834 elimination offsets so we can't risk getting it wrong. */
12835 if (regs_ever_live [ARG_POINTER_REGNUM])
12836 cfun->machine->arg_pointer_live = 1;
12837 else if (!cfun->machine->arg_pointer_live)
12838 return 0;
12841 /* Check to see if the function contains a branch
12842 insn with the far jump attribute set. */
12843 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
12845 if (GET_CODE (insn) == JUMP_INSN
12846 /* Ignore tablejump patterns. */
12847 && GET_CODE (PATTERN (insn)) != ADDR_VEC
12848 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC
12849 && get_attr_far_jump (insn) == FAR_JUMP_YES
12852 /* Record the fact that we have decided that
12853 the function does use far jumps. */
12854 cfun->machine->far_jump_used = 1;
12855 return 1;
12859 return 0;
12862 /* Return nonzero if FUNC must be entered in ARM mode. */
12864 is_called_in_ARM_mode (tree func)
12866 if (TREE_CODE (func) != FUNCTION_DECL)
12867 abort ();
12869 /* Ignore the problem about functions whoes address is taken. */
12870 if (TARGET_CALLEE_INTERWORKING && TREE_PUBLIC (func))
12871 return TRUE;
12873 #ifdef ARM_PE
12874 return lookup_attribute ("interfacearm", DECL_ATTRIBUTES (func)) != NULL_TREE;
12875 #else
12876 return FALSE;
12877 #endif
12880 /* The bits which aren't usefully expanded as rtl. */
12881 const char *
12882 thumb_unexpanded_epilogue (void)
12884 int regno;
12885 int live_regs_mask = 0;
12886 int high_regs_pushed = 0;
12887 int had_to_push_lr;
12888 int size;
12889 int mode;
12891 if (return_used_this_function)
12892 return "";
12894 if (IS_NAKED (arm_current_func_type ()))
12895 return "";
12897 live_regs_mask = thumb_compute_save_reg_mask ();
12898 high_regs_pushed = bit_count (live_regs_mask & 0x0f00);
12900 /* If we can deduce the registers used from the function's return value.
12901 This is more reliable that examining regs_ever_live[] because that
12902 will be set if the register is ever used in the function, not just if
12903 the register is used to hold a return value. */
12905 if (current_function_return_rtx != 0)
12906 mode = GET_MODE (current_function_return_rtx);
12907 else
12908 mode = DECL_MODE (DECL_RESULT (current_function_decl));
12910 size = GET_MODE_SIZE (mode);
12912 /* The prolog may have pushed some high registers to use as
12913 work registers. e.g. the testsuite file:
12914 gcc/testsuite/gcc/gcc.c-torture/execute/complex-2.c
12915 compiles to produce:
12916 push {r4, r5, r6, r7, lr}
12917 mov r7, r9
12918 mov r6, r8
12919 push {r6, r7}
12920 as part of the prolog. We have to undo that pushing here. */
12922 if (high_regs_pushed)
12924 int mask = live_regs_mask & 0xff;
12925 int next_hi_reg;
12927 /* The available low registers depend on the size of the value we are
12928 returning. */
12929 if (size <= 12)
12930 mask |= 1 << 3;
12931 if (size <= 8)
12932 mask |= 1 << 2;
12934 if (mask == 0)
12935 /* Oh dear! We have no low registers into which we can pop
12936 high registers! */
12937 internal_error
12938 ("no low registers available for popping high registers");
12940 for (next_hi_reg = 8; next_hi_reg < 13; next_hi_reg++)
12941 if (live_regs_mask & (1 << next_hi_reg))
12942 break;
12944 while (high_regs_pushed)
12946 /* Find lo register(s) into which the high register(s) can
12947 be popped. */
12948 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
12950 if (mask & (1 << regno))
12951 high_regs_pushed--;
12952 if (high_regs_pushed == 0)
12953 break;
12956 mask &= (2 << regno) - 1; /* A noop if regno == 8 */
12958 /* Pop the values into the low register(s). */
12959 thumb_pushpop (asm_out_file, mask, 0, NULL, mask);
12961 /* Move the value(s) into the high registers. */
12962 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
12964 if (mask & (1 << regno))
12966 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", next_hi_reg,
12967 regno);
12969 for (next_hi_reg++; next_hi_reg < 13; next_hi_reg++)
12970 if (live_regs_mask & (1 << next_hi_reg))
12971 break;
12975 live_regs_mask &= ~0x0f00;
12978 had_to_push_lr = (live_regs_mask & (1 << LR_REGNUM)) != 0;
12979 live_regs_mask &= 0xff;
12981 if (current_function_pretend_args_size == 0 || TARGET_BACKTRACE)
12983 /* Pop the return address into the PC. */
12984 if (had_to_push_lr)
12985 live_regs_mask |= 1 << PC_REGNUM;
12987 /* Either no argument registers were pushed or a backtrace
12988 structure was created which includes an adjusted stack
12989 pointer, so just pop everything. */
12990 if (live_regs_mask)
12991 thumb_pushpop (asm_out_file, live_regs_mask, FALSE, NULL,
12992 live_regs_mask);
12994 /* We have either just popped the return address into the
12995 PC or it is was kept in LR for the entire function. */
12996 if (!had_to_push_lr)
12997 thumb_exit (asm_out_file, LR_REGNUM);
12999 else
13001 /* Pop everything but the return address. */
13002 if (live_regs_mask)
13003 thumb_pushpop (asm_out_file, live_regs_mask, FALSE, NULL,
13004 live_regs_mask);
13006 if (had_to_push_lr)
13008 if (size > 12)
13010 /* We have no free low regs, so save one. */
13011 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", IP_REGNUM,
13012 LAST_ARG_REGNUM);
13015 /* Get the return address into a temporary register. */
13016 thumb_pushpop (asm_out_file, 1 << LAST_ARG_REGNUM, 0, NULL,
13017 1 << LAST_ARG_REGNUM);
13019 if (size > 12)
13021 /* Move the return address to lr. */
13022 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", LR_REGNUM,
13023 LAST_ARG_REGNUM);
13024 /* Restore the low register. */
13025 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", LAST_ARG_REGNUM,
13026 IP_REGNUM);
13027 regno = LR_REGNUM;
13029 else
13030 regno = LAST_ARG_REGNUM;
13032 else
13033 regno = LR_REGNUM;
13035 /* Remove the argument registers that were pushed onto the stack. */
13036 asm_fprintf (asm_out_file, "\tadd\t%r, %r, #%d\n",
13037 SP_REGNUM, SP_REGNUM,
13038 current_function_pretend_args_size);
13040 thumb_exit (asm_out_file, regno);
13043 return "";
13046 /* Functions to save and restore machine-specific function data. */
13047 static struct machine_function *
13048 arm_init_machine_status (void)
13050 struct machine_function *machine;
13051 machine = (machine_function *) ggc_alloc_cleared (sizeof (machine_function));
13053 #if ARM_FT_UNKNOWN != 0
13054 machine->func_type = ARM_FT_UNKNOWN;
13055 #endif
13056 return machine;
13059 /* Return an RTX indicating where the return address to the
13060 calling function can be found. */
13062 arm_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
13064 if (count != 0)
13065 return NULL_RTX;
13067 return get_hard_reg_initial_val (Pmode, LR_REGNUM);
13070 /* Do anything needed before RTL is emitted for each function. */
13071 void
13072 arm_init_expanders (void)
13074 /* Arrange to initialize and mark the machine per-function status. */
13075 init_machine_status = arm_init_machine_status;
13077 /* This is to stop the combine pass optimizing away the alignment
13078 adjustment of va_arg. */
13079 /* ??? It is claimed that this should not be necessary. */
13080 if (cfun)
13081 mark_reg_pointer (arg_pointer_rtx, PARM_BOUNDARY);
13085 /* Like arm_compute_initial_elimination offset. Simpler because
13086 THUMB_HARD_FRAME_POINTER isn't actually the ABI specified frame pointer. */
13088 HOST_WIDE_INT
13089 thumb_compute_initial_elimination_offset (unsigned int from, unsigned int to)
13091 arm_stack_offsets *offsets;
13093 offsets = arm_get_frame_offsets ();
13095 switch (from)
13097 case ARG_POINTER_REGNUM:
13098 switch (to)
13100 case STACK_POINTER_REGNUM:
13101 return offsets->outgoing_args - offsets->saved_args;
13103 case FRAME_POINTER_REGNUM:
13104 return offsets->soft_frame - offsets->saved_args;
13106 case THUMB_HARD_FRAME_POINTER_REGNUM:
13107 case ARM_HARD_FRAME_POINTER_REGNUM:
13108 return offsets->saved_regs - offsets->saved_args;
13110 default:
13111 abort();
13113 break;
13115 case FRAME_POINTER_REGNUM:
13116 switch (to)
13118 case STACK_POINTER_REGNUM:
13119 return offsets->outgoing_args - offsets->soft_frame;
13121 case THUMB_HARD_FRAME_POINTER_REGNUM:
13122 case ARM_HARD_FRAME_POINTER_REGNUM:
13123 return offsets->saved_regs - offsets->soft_frame;
13125 default:
13126 abort();
13128 break;
13130 default:
13131 abort ();
13136 /* Generate the rest of a function's prologue. */
13137 void
13138 thumb_expand_prologue (void)
13140 rtx insn, dwarf;
13142 HOST_WIDE_INT amount;
13143 arm_stack_offsets *offsets;
13144 unsigned long func_type;
13145 int regno;
13146 unsigned long live_regs_mask;
13148 func_type = arm_current_func_type ();
13150 /* Naked functions don't have prologues. */
13151 if (IS_NAKED (func_type))
13152 return;
13154 if (IS_INTERRUPT (func_type))
13156 error ("interrupt Service Routines cannot be coded in Thumb mode");
13157 return;
13160 live_regs_mask = thumb_compute_save_reg_mask ();
13161 /* Load the pic register before setting the frame pointer, so we can use r7
13162 as a temporary work register. */
13163 if (flag_pic)
13164 arm_load_pic_register (thumb_find_work_register (live_regs_mask));
13166 offsets = arm_get_frame_offsets ();
13168 if (frame_pointer_needed)
13170 insn = emit_insn (gen_movsi (hard_frame_pointer_rtx,
13171 stack_pointer_rtx));
13172 RTX_FRAME_RELATED_P (insn) = 1;
13174 else if (CALLER_INTERWORKING_SLOT_SIZE > 0)
13175 emit_move_insn (gen_rtx_REG (Pmode, ARM_HARD_FRAME_POINTER_REGNUM),
13176 stack_pointer_rtx);
13178 amount = offsets->outgoing_args - offsets->saved_regs;
13179 if (amount)
13181 if (amount < 512)
13183 insn = emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
13184 GEN_INT (- amount)));
13185 RTX_FRAME_RELATED_P (insn) = 1;
13187 else
13189 rtx reg;
13191 /* The stack decrement is too big for an immediate value in a single
13192 insn. In theory we could issue multiple subtracts, but after
13193 three of them it becomes more space efficient to place the full
13194 value in the constant pool and load into a register. (Also the
13195 ARM debugger really likes to see only one stack decrement per
13196 function). So instead we look for a scratch register into which
13197 we can load the decrement, and then we subtract this from the
13198 stack pointer. Unfortunately on the thumb the only available
13199 scratch registers are the argument registers, and we cannot use
13200 these as they may hold arguments to the function. Instead we
13201 attempt to locate a call preserved register which is used by this
13202 function. If we can find one, then we know that it will have
13203 been pushed at the start of the prologue and so we can corrupt
13204 it now. */
13205 for (regno = LAST_ARG_REGNUM + 1; regno <= LAST_LO_REGNUM; regno++)
13206 if (live_regs_mask & (1 << regno)
13207 && !(frame_pointer_needed
13208 && (regno == THUMB_HARD_FRAME_POINTER_REGNUM)))
13209 break;
13211 if (regno > LAST_LO_REGNUM) /* Very unlikely. */
13213 rtx spare = gen_rtx_REG (SImode, IP_REGNUM);
13215 /* Choose an arbitrary, non-argument low register. */
13216 reg = gen_rtx_REG (SImode, LAST_LO_REGNUM);
13218 /* Save it by copying it into a high, scratch register. */
13219 emit_insn (gen_movsi (spare, reg));
13220 /* Add a USE to stop propagate_one_insn() from barfing. */
13221 emit_insn (gen_prologue_use (spare));
13223 /* Decrement the stack. */
13224 emit_insn (gen_movsi (reg, GEN_INT (- amount)));
13225 insn = emit_insn (gen_addsi3 (stack_pointer_rtx,
13226 stack_pointer_rtx, reg));
13227 RTX_FRAME_RELATED_P (insn) = 1;
13228 dwarf = gen_rtx_SET (SImode, stack_pointer_rtx,
13229 plus_constant (stack_pointer_rtx,
13230 -amount));
13231 RTX_FRAME_RELATED_P (dwarf) = 1;
13232 REG_NOTES (insn)
13233 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
13234 REG_NOTES (insn));
13236 /* Restore the low register's original value. */
13237 emit_insn (gen_movsi (reg, spare));
13239 /* Emit a USE of the restored scratch register, so that flow
13240 analysis will not consider the restore redundant. The
13241 register won't be used again in this function and isn't
13242 restored by the epilogue. */
13243 emit_insn (gen_prologue_use (reg));
13245 else
13247 reg = gen_rtx_REG (SImode, regno);
13249 emit_insn (gen_movsi (reg, GEN_INT (- amount)));
13251 insn = emit_insn (gen_addsi3 (stack_pointer_rtx,
13252 stack_pointer_rtx, reg));
13253 RTX_FRAME_RELATED_P (insn) = 1;
13254 dwarf = gen_rtx_SET (SImode, stack_pointer_rtx,
13255 plus_constant (stack_pointer_rtx,
13256 -amount));
13257 RTX_FRAME_RELATED_P (dwarf) = 1;
13258 REG_NOTES (insn)
13259 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
13260 REG_NOTES (insn));
13263 /* If the frame pointer is needed, emit a special barrier that
13264 will prevent the scheduler from moving stores to the frame
13265 before the stack adjustment. */
13266 if (frame_pointer_needed)
13267 emit_insn (gen_stack_tie (stack_pointer_rtx,
13268 hard_frame_pointer_rtx));
13271 if (current_function_profile || TARGET_NO_SCHED_PRO)
13272 emit_insn (gen_blockage ());
13274 cfun->machine->lr_save_eliminated = !thumb_force_lr_save ();
13275 if (live_regs_mask & 0xff)
13276 cfun->machine->lr_save_eliminated = 0;
13278 /* If the link register is being kept alive, with the return address in it,
13279 then make sure that it does not get reused by the ce2 pass. */
13280 if (cfun->machine->lr_save_eliminated)
13281 emit_insn (gen_prologue_use (gen_rtx_REG (SImode, LR_REGNUM)));
13285 void
13286 thumb_expand_epilogue (void)
13288 HOST_WIDE_INT amount;
13289 arm_stack_offsets *offsets;
13290 int regno;
13292 /* Naked functions don't have prologues. */
13293 if (IS_NAKED (arm_current_func_type ()))
13294 return;
13296 offsets = arm_get_frame_offsets ();
13297 amount = offsets->outgoing_args - offsets->saved_regs;
13299 if (frame_pointer_needed)
13300 emit_insn (gen_movsi (stack_pointer_rtx, hard_frame_pointer_rtx));
13301 else if (amount)
13303 if (amount < 512)
13304 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
13305 GEN_INT (amount)));
13306 else
13308 /* r3 is always free in the epilogue. */
13309 rtx reg = gen_rtx_REG (SImode, LAST_ARG_REGNUM);
13311 emit_insn (gen_movsi (reg, GEN_INT (amount)));
13312 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, reg));
13316 /* Emit a USE (stack_pointer_rtx), so that
13317 the stack adjustment will not be deleted. */
13318 emit_insn (gen_prologue_use (stack_pointer_rtx));
13320 if (current_function_profile || TARGET_NO_SCHED_PRO)
13321 emit_insn (gen_blockage ());
13323 /* Emit a clobber for each insn that will be restored in the epilogue,
13324 so that flow2 will get register lifetimes correct. */
13325 for (regno = 0; regno < 13; regno++)
13326 if (regs_ever_live[regno] && !call_used_regs[regno])
13327 emit_insn (gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, regno)));
13329 if (! regs_ever_live[LR_REGNUM])
13330 emit_insn (gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, LR_REGNUM)));
13333 static void
13334 thumb_output_function_prologue (FILE *f, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
13336 int live_regs_mask = 0;
13337 int l_mask;
13338 int high_regs_pushed = 0;
13339 int cfa_offset = 0;
13340 int regno;
13342 if (IS_NAKED (arm_current_func_type ()))
13343 return;
13345 if (is_called_in_ARM_mode (current_function_decl))
13347 const char * name;
13349 if (GET_CODE (DECL_RTL (current_function_decl)) != MEM)
13350 abort ();
13351 if (GET_CODE (XEXP (DECL_RTL (current_function_decl), 0)) != SYMBOL_REF)
13352 abort ();
13353 name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
13355 /* Generate code sequence to switch us into Thumb mode. */
13356 /* The .code 32 directive has already been emitted by
13357 ASM_DECLARE_FUNCTION_NAME. */
13358 asm_fprintf (f, "\torr\t%r, %r, #1\n", IP_REGNUM, PC_REGNUM);
13359 asm_fprintf (f, "\tbx\t%r\n", IP_REGNUM);
13361 /* Generate a label, so that the debugger will notice the
13362 change in instruction sets. This label is also used by
13363 the assembler to bypass the ARM code when this function
13364 is called from a Thumb encoded function elsewhere in the
13365 same file. Hence the definition of STUB_NAME here must
13366 agree with the definition in gas/config/tc-arm.c. */
13368 #define STUB_NAME ".real_start_of"
13370 fprintf (f, "\t.code\t16\n");
13371 #ifdef ARM_PE
13372 if (arm_dllexport_name_p (name))
13373 name = arm_strip_name_encoding (name);
13374 #endif
13375 asm_fprintf (f, "\t.globl %s%U%s\n", STUB_NAME, name);
13376 fprintf (f, "\t.thumb_func\n");
13377 asm_fprintf (f, "%s%U%s:\n", STUB_NAME, name);
13380 if (current_function_pretend_args_size)
13382 if (cfun->machine->uses_anonymous_args)
13384 int num_pushes;
13386 fprintf (f, "\tpush\t{");
13388 num_pushes = ARM_NUM_INTS (current_function_pretend_args_size);
13390 for (regno = LAST_ARG_REGNUM + 1 - num_pushes;
13391 regno <= LAST_ARG_REGNUM;
13392 regno++)
13393 asm_fprintf (f, "%r%s", regno,
13394 regno == LAST_ARG_REGNUM ? "" : ", ");
13396 fprintf (f, "}\n");
13398 else
13399 asm_fprintf (f, "\tsub\t%r, %r, #%d\n",
13400 SP_REGNUM, SP_REGNUM,
13401 current_function_pretend_args_size);
13403 /* We don't need to record the stores for unwinding (would it
13404 help the debugger any if we did?), but record the change in
13405 the stack pointer. */
13406 if (dwarf2out_do_frame ())
13408 char *l = dwarf2out_cfi_label ();
13409 cfa_offset = cfa_offset + current_function_pretend_args_size;
13410 dwarf2out_def_cfa (l, SP_REGNUM, cfa_offset);
13414 live_regs_mask = thumb_compute_save_reg_mask ();
13415 /* Just low regs and lr. */
13416 l_mask = live_regs_mask & 0x40ff;
13418 if (TARGET_BACKTRACE)
13420 int offset;
13421 int work_register;
13423 /* We have been asked to create a stack backtrace structure.
13424 The code looks like this:
13426 0 .align 2
13427 0 func:
13428 0 sub SP, #16 Reserve space for 4 registers.
13429 2 push {R7} Push low registers.
13430 4 add R7, SP, #20 Get the stack pointer before the push.
13431 6 str R7, [SP, #8] Store the stack pointer (before reserving the space).
13432 8 mov R7, PC Get hold of the start of this code plus 12.
13433 10 str R7, [SP, #16] Store it.
13434 12 mov R7, FP Get hold of the current frame pointer.
13435 14 str R7, [SP, #4] Store it.
13436 16 mov R7, LR Get hold of the current return address.
13437 18 str R7, [SP, #12] Store it.
13438 20 add R7, SP, #16 Point at the start of the backtrace structure.
13439 22 mov FP, R7 Put this value into the frame pointer. */
13441 work_register = thumb_find_work_register (live_regs_mask);
13443 asm_fprintf
13444 (f, "\tsub\t%r, %r, #16\t%@ Create stack backtrace structure\n",
13445 SP_REGNUM, SP_REGNUM);
13447 if (dwarf2out_do_frame ())
13449 char *l = dwarf2out_cfi_label ();
13450 cfa_offset = cfa_offset + 16;
13451 dwarf2out_def_cfa (l, SP_REGNUM, cfa_offset);
13454 if (l_mask)
13456 thumb_pushpop (f, l_mask, 1, &cfa_offset, l_mask);
13457 offset = bit_count (l_mask);
13459 else
13460 offset = 0;
13462 asm_fprintf (f, "\tadd\t%r, %r, #%d\n", work_register, SP_REGNUM,
13463 offset + 16 + current_function_pretend_args_size);
13465 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13466 offset + 4);
13468 /* Make sure that the instruction fetching the PC is in the right place
13469 to calculate "start of backtrace creation code + 12". */
13470 if (l_mask)
13472 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, PC_REGNUM);
13473 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13474 offset + 12);
13475 asm_fprintf (f, "\tmov\t%r, %r\n", work_register,
13476 ARM_HARD_FRAME_POINTER_REGNUM);
13477 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13478 offset);
13480 else
13482 asm_fprintf (f, "\tmov\t%r, %r\n", work_register,
13483 ARM_HARD_FRAME_POINTER_REGNUM);
13484 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13485 offset);
13486 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, PC_REGNUM);
13487 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13488 offset + 12);
13491 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, LR_REGNUM);
13492 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13493 offset + 8);
13494 asm_fprintf (f, "\tadd\t%r, %r, #%d\n", work_register, SP_REGNUM,
13495 offset + 12);
13496 asm_fprintf (f, "\tmov\t%r, %r\t\t%@ Backtrace structure created\n",
13497 ARM_HARD_FRAME_POINTER_REGNUM, work_register);
13499 else if (l_mask)
13500 thumb_pushpop (f, l_mask, 1, &cfa_offset, l_mask);
13502 high_regs_pushed = bit_count (live_regs_mask & 0x0f00);
13504 if (high_regs_pushed)
13506 int pushable_regs = 0;
13507 int next_hi_reg;
13509 for (next_hi_reg = 12; next_hi_reg > LAST_LO_REGNUM; next_hi_reg--)
13510 if (live_regs_mask & (1 << next_hi_reg))
13511 break;
13513 pushable_regs = l_mask & 0xff;
13515 if (pushable_regs == 0)
13516 pushable_regs = 1 << thumb_find_work_register (live_regs_mask);
13518 while (high_regs_pushed > 0)
13520 int real_regs_mask = 0;
13522 for (regno = LAST_LO_REGNUM; regno >= 0; regno--)
13524 if (pushable_regs & (1 << regno))
13526 asm_fprintf (f, "\tmov\t%r, %r\n", regno, next_hi_reg);
13528 high_regs_pushed--;
13529 real_regs_mask |= (1 << next_hi_reg);
13531 if (high_regs_pushed)
13533 for (next_hi_reg--; next_hi_reg > LAST_LO_REGNUM;
13534 next_hi_reg--)
13535 if (live_regs_mask & (1 << next_hi_reg))
13536 break;
13538 else
13540 pushable_regs &= ~((1 << regno) - 1);
13541 break;
13546 thumb_pushpop (f, pushable_regs, 1, &cfa_offset, real_regs_mask);
13551 /* Handle the case of a double word load into a low register from
13552 a computed memory address. The computed address may involve a
13553 register which is overwritten by the load. */
13554 const char *
13555 thumb_load_double_from_address (rtx *operands)
13557 rtx addr;
13558 rtx base;
13559 rtx offset;
13560 rtx arg1;
13561 rtx arg2;
13563 if (GET_CODE (operands[0]) != REG)
13564 abort ();
13566 if (GET_CODE (operands[1]) != MEM)
13567 abort ();
13569 /* Get the memory address. */
13570 addr = XEXP (operands[1], 0);
13572 /* Work out how the memory address is computed. */
13573 switch (GET_CODE (addr))
13575 case REG:
13576 operands[2] = gen_rtx_MEM (SImode,
13577 plus_constant (XEXP (operands[1], 0), 4));
13579 if (REGNO (operands[0]) == REGNO (addr))
13581 output_asm_insn ("ldr\t%H0, %2", operands);
13582 output_asm_insn ("ldr\t%0, %1", operands);
13584 else
13586 output_asm_insn ("ldr\t%0, %1", operands);
13587 output_asm_insn ("ldr\t%H0, %2", operands);
13589 break;
13591 case CONST:
13592 /* Compute <address> + 4 for the high order load. */
13593 operands[2] = gen_rtx_MEM (SImode,
13594 plus_constant (XEXP (operands[1], 0), 4));
13596 output_asm_insn ("ldr\t%0, %1", operands);
13597 output_asm_insn ("ldr\t%H0, %2", operands);
13598 break;
13600 case PLUS:
13601 arg1 = XEXP (addr, 0);
13602 arg2 = XEXP (addr, 1);
13604 if (CONSTANT_P (arg1))
13605 base = arg2, offset = arg1;
13606 else
13607 base = arg1, offset = arg2;
13609 if (GET_CODE (base) != REG)
13610 abort ();
13612 /* Catch the case of <address> = <reg> + <reg> */
13613 if (GET_CODE (offset) == REG)
13615 int reg_offset = REGNO (offset);
13616 int reg_base = REGNO (base);
13617 int reg_dest = REGNO (operands[0]);
13619 /* Add the base and offset registers together into the
13620 higher destination register. */
13621 asm_fprintf (asm_out_file, "\tadd\t%r, %r, %r",
13622 reg_dest + 1, reg_base, reg_offset);
13624 /* Load the lower destination register from the address in
13625 the higher destination register. */
13626 asm_fprintf (asm_out_file, "\tldr\t%r, [%r, #0]",
13627 reg_dest, reg_dest + 1);
13629 /* Load the higher destination register from its own address
13630 plus 4. */
13631 asm_fprintf (asm_out_file, "\tldr\t%r, [%r, #4]",
13632 reg_dest + 1, reg_dest + 1);
13634 else
13636 /* Compute <address> + 4 for the high order load. */
13637 operands[2] = gen_rtx_MEM (SImode,
13638 plus_constant (XEXP (operands[1], 0), 4));
13640 /* If the computed address is held in the low order register
13641 then load the high order register first, otherwise always
13642 load the low order register first. */
13643 if (REGNO (operands[0]) == REGNO (base))
13645 output_asm_insn ("ldr\t%H0, %2", operands);
13646 output_asm_insn ("ldr\t%0, %1", operands);
13648 else
13650 output_asm_insn ("ldr\t%0, %1", operands);
13651 output_asm_insn ("ldr\t%H0, %2", operands);
13654 break;
13656 case LABEL_REF:
13657 /* With no registers to worry about we can just load the value
13658 directly. */
13659 operands[2] = gen_rtx_MEM (SImode,
13660 plus_constant (XEXP (operands[1], 0), 4));
13662 output_asm_insn ("ldr\t%H0, %2", operands);
13663 output_asm_insn ("ldr\t%0, %1", operands);
13664 break;
13666 default:
13667 abort ();
13668 break;
13671 return "";
13674 const char *
13675 thumb_output_move_mem_multiple (int n, rtx *operands)
13677 rtx tmp;
13679 switch (n)
13681 case 2:
13682 if (REGNO (operands[4]) > REGNO (operands[5]))
13684 tmp = operands[4];
13685 operands[4] = operands[5];
13686 operands[5] = tmp;
13688 output_asm_insn ("ldmia\t%1!, {%4, %5}", operands);
13689 output_asm_insn ("stmia\t%0!, {%4, %5}", operands);
13690 break;
13692 case 3:
13693 if (REGNO (operands[4]) > REGNO (operands[5]))
13695 tmp = operands[4];
13696 operands[4] = operands[5];
13697 operands[5] = tmp;
13699 if (REGNO (operands[5]) > REGNO (operands[6]))
13701 tmp = operands[5];
13702 operands[5] = operands[6];
13703 operands[6] = tmp;
13705 if (REGNO (operands[4]) > REGNO (operands[5]))
13707 tmp = operands[4];
13708 operands[4] = operands[5];
13709 operands[5] = tmp;
13712 output_asm_insn ("ldmia\t%1!, {%4, %5, %6}", operands);
13713 output_asm_insn ("stmia\t%0!, {%4, %5, %6}", operands);
13714 break;
13716 default:
13717 abort ();
13720 return "";
13723 /* Output a call-via instruction for thumb state. */
13724 const char *
13725 thumb_call_via_reg (rtx reg)
13727 int regno = REGNO (reg);
13728 rtx *labelp;
13730 gcc_assert (regno < SP_REGNUM);
13732 /* If we are in the normal text section we can use a single instance
13733 per compilation unit. If we are doing function sections, then we need
13734 an entry per section, since we can't rely on reachability. */
13735 if (in_text_section ())
13737 thumb_call_reg_needed = 1;
13739 if (thumb_call_via_label[regno] == NULL)
13740 thumb_call_via_label[regno] = gen_label_rtx ();
13741 labelp = thumb_call_via_label + regno;
13743 else
13745 if (cfun->machine->call_via[regno] == NULL)
13746 cfun->machine->call_via[regno] = gen_label_rtx ();
13747 labelp = cfun->machine->call_via + regno;
13750 output_asm_insn ("bl\t%a0", labelp);
13751 return "";
13754 /* Routines for generating rtl. */
13755 void
13756 thumb_expand_movmemqi (rtx *operands)
13758 rtx out = copy_to_mode_reg (SImode, XEXP (operands[0], 0));
13759 rtx in = copy_to_mode_reg (SImode, XEXP (operands[1], 0));
13760 HOST_WIDE_INT len = INTVAL (operands[2]);
13761 HOST_WIDE_INT offset = 0;
13763 while (len >= 12)
13765 emit_insn (gen_movmem12b (out, in, out, in));
13766 len -= 12;
13769 if (len >= 8)
13771 emit_insn (gen_movmem8b (out, in, out, in));
13772 len -= 8;
13775 if (len >= 4)
13777 rtx reg = gen_reg_rtx (SImode);
13778 emit_insn (gen_movsi (reg, gen_rtx_MEM (SImode, in)));
13779 emit_insn (gen_movsi (gen_rtx_MEM (SImode, out), reg));
13780 len -= 4;
13781 offset += 4;
13784 if (len >= 2)
13786 rtx reg = gen_reg_rtx (HImode);
13787 emit_insn (gen_movhi (reg, gen_rtx_MEM (HImode,
13788 plus_constant (in, offset))));
13789 emit_insn (gen_movhi (gen_rtx_MEM (HImode, plus_constant (out, offset)),
13790 reg));
13791 len -= 2;
13792 offset += 2;
13795 if (len)
13797 rtx reg = gen_reg_rtx (QImode);
13798 emit_insn (gen_movqi (reg, gen_rtx_MEM (QImode,
13799 plus_constant (in, offset))));
13800 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (out, offset)),
13801 reg));
13805 void
13806 thumb_reload_out_hi (rtx *operands)
13808 emit_insn (gen_thumb_movhi_clobber (operands[0], operands[1], operands[2]));
13811 /* Handle reading a half-word from memory during reload. */
13812 void
13813 thumb_reload_in_hi (rtx *operands ATTRIBUTE_UNUSED)
13815 abort ();
13818 /* Return the length of a function name prefix
13819 that starts with the character 'c'. */
13820 static int
13821 arm_get_strip_length (int c)
13823 switch (c)
13825 ARM_NAME_ENCODING_LENGTHS
13826 default: return 0;
13830 /* Return a pointer to a function's name with any
13831 and all prefix encodings stripped from it. */
13832 const char *
13833 arm_strip_name_encoding (const char *name)
13835 int skip;
13837 while ((skip = arm_get_strip_length (* name)))
13838 name += skip;
13840 return name;
13843 /* If there is a '*' anywhere in the name's prefix, then
13844 emit the stripped name verbatim, otherwise prepend an
13845 underscore if leading underscores are being used. */
13846 void
13847 arm_asm_output_labelref (FILE *stream, const char *name)
13849 int skip;
13850 int verbatim = 0;
13852 while ((skip = arm_get_strip_length (* name)))
13854 verbatim |= (*name == '*');
13855 name += skip;
13858 if (verbatim)
13859 fputs (name, stream);
13860 else
13861 asm_fprintf (stream, "%U%s", name);
13864 static void
13865 arm_file_end (void)
13867 int regno;
13869 if (! thumb_call_reg_needed)
13870 return;
13872 text_section ();
13873 asm_fprintf (asm_out_file, "\t.code 16\n");
13874 ASM_OUTPUT_ALIGN (asm_out_file, 1);
13876 for (regno = 0; regno < SP_REGNUM; regno++)
13878 rtx label = thumb_call_via_label[regno];
13880 if (label != 0)
13882 targetm.asm_out.internal_label (asm_out_file, "L",
13883 CODE_LABEL_NUMBER (label));
13884 asm_fprintf (asm_out_file, "\tbx\t%r\n", regno);
13889 rtx aof_pic_label;
13891 #ifdef AOF_ASSEMBLER
13892 /* Special functions only needed when producing AOF syntax assembler. */
13894 struct pic_chain
13896 struct pic_chain * next;
13897 const char * symname;
13900 static struct pic_chain * aof_pic_chain = NULL;
13903 aof_pic_entry (rtx x)
13905 struct pic_chain ** chainp;
13906 int offset;
13908 if (aof_pic_label == NULL_RTX)
13910 aof_pic_label = gen_rtx_SYMBOL_REF (Pmode, "x$adcons");
13913 for (offset = 0, chainp = &aof_pic_chain; *chainp;
13914 offset += 4, chainp = &(*chainp)->next)
13915 if ((*chainp)->symname == XSTR (x, 0))
13916 return plus_constant (aof_pic_label, offset);
13918 *chainp = (struct pic_chain *) xmalloc (sizeof (struct pic_chain));
13919 (*chainp)->next = NULL;
13920 (*chainp)->symname = XSTR (x, 0);
13921 return plus_constant (aof_pic_label, offset);
13924 void
13925 aof_dump_pic_table (FILE *f)
13927 struct pic_chain * chain;
13929 if (aof_pic_chain == NULL)
13930 return;
13932 asm_fprintf (f, "\tAREA |%r$$adcons|, BASED %r\n",
13933 PIC_OFFSET_TABLE_REGNUM,
13934 PIC_OFFSET_TABLE_REGNUM);
13935 fputs ("|x$adcons|\n", f);
13937 for (chain = aof_pic_chain; chain; chain = chain->next)
13939 fputs ("\tDCD\t", f);
13940 assemble_name (f, chain->symname);
13941 fputs ("\n", f);
13945 int arm_text_section_count = 1;
13947 char *
13948 aof_text_section (void )
13950 static char buf[100];
13951 sprintf (buf, "\tAREA |C$$code%d|, CODE, READONLY",
13952 arm_text_section_count++);
13953 if (flag_pic)
13954 strcat (buf, ", PIC, REENTRANT");
13955 return buf;
13958 static int arm_data_section_count = 1;
13960 char *
13961 aof_data_section (void)
13963 static char buf[100];
13964 sprintf (buf, "\tAREA |C$$data%d|, DATA", arm_data_section_count++);
13965 return buf;
13968 /* The AOF assembler is religiously strict about declarations of
13969 imported and exported symbols, so that it is impossible to declare
13970 a function as imported near the beginning of the file, and then to
13971 export it later on. It is, however, possible to delay the decision
13972 until all the functions in the file have been compiled. To get
13973 around this, we maintain a list of the imports and exports, and
13974 delete from it any that are subsequently defined. At the end of
13975 compilation we spit the remainder of the list out before the END
13976 directive. */
13978 struct import
13980 struct import * next;
13981 const char * name;
13984 static struct import * imports_list = NULL;
13986 void
13987 aof_add_import (const char *name)
13989 struct import * new;
13991 for (new = imports_list; new; new = new->next)
13992 if (new->name == name)
13993 return;
13995 new = (struct import *) xmalloc (sizeof (struct import));
13996 new->next = imports_list;
13997 imports_list = new;
13998 new->name = name;
14001 void
14002 aof_delete_import (const char *name)
14004 struct import ** old;
14006 for (old = &imports_list; *old; old = & (*old)->next)
14008 if ((*old)->name == name)
14010 *old = (*old)->next;
14011 return;
14016 int arm_main_function = 0;
14018 static void
14019 aof_dump_imports (FILE *f)
14021 /* The AOF assembler needs this to cause the startup code to be extracted
14022 from the library. Brining in __main causes the whole thing to work
14023 automagically. */
14024 if (arm_main_function)
14026 text_section ();
14027 fputs ("\tIMPORT __main\n", f);
14028 fputs ("\tDCD __main\n", f);
14031 /* Now dump the remaining imports. */
14032 while (imports_list)
14034 fprintf (f, "\tIMPORT\t");
14035 assemble_name (f, imports_list->name);
14036 fputc ('\n', f);
14037 imports_list = imports_list->next;
14041 static void
14042 aof_globalize_label (FILE *stream, const char *name)
14044 default_globalize_label (stream, name);
14045 if (! strcmp (name, "main"))
14046 arm_main_function = 1;
14049 static void
14050 aof_file_start (void)
14052 fputs ("__r0\tRN\t0\n", asm_out_file);
14053 fputs ("__a1\tRN\t0\n", asm_out_file);
14054 fputs ("__a2\tRN\t1\n", asm_out_file);
14055 fputs ("__a3\tRN\t2\n", asm_out_file);
14056 fputs ("__a4\tRN\t3\n", asm_out_file);
14057 fputs ("__v1\tRN\t4\n", asm_out_file);
14058 fputs ("__v2\tRN\t5\n", asm_out_file);
14059 fputs ("__v3\tRN\t6\n", asm_out_file);
14060 fputs ("__v4\tRN\t7\n", asm_out_file);
14061 fputs ("__v5\tRN\t8\n", asm_out_file);
14062 fputs ("__v6\tRN\t9\n", asm_out_file);
14063 fputs ("__sl\tRN\t10\n", asm_out_file);
14064 fputs ("__fp\tRN\t11\n", asm_out_file);
14065 fputs ("__ip\tRN\t12\n", asm_out_file);
14066 fputs ("__sp\tRN\t13\n", asm_out_file);
14067 fputs ("__lr\tRN\t14\n", asm_out_file);
14068 fputs ("__pc\tRN\t15\n", asm_out_file);
14069 fputs ("__f0\tFN\t0\n", asm_out_file);
14070 fputs ("__f1\tFN\t1\n", asm_out_file);
14071 fputs ("__f2\tFN\t2\n", asm_out_file);
14072 fputs ("__f3\tFN\t3\n", asm_out_file);
14073 fputs ("__f4\tFN\t4\n", asm_out_file);
14074 fputs ("__f5\tFN\t5\n", asm_out_file);
14075 fputs ("__f6\tFN\t6\n", asm_out_file);
14076 fputs ("__f7\tFN\t7\n", asm_out_file);
14077 text_section ();
14080 static void
14081 aof_file_end (void)
14083 if (flag_pic)
14084 aof_dump_pic_table (asm_out_file);
14085 arm_file_end ();
14086 aof_dump_imports (asm_out_file);
14087 fputs ("\tEND\n", asm_out_file);
14089 #endif /* AOF_ASSEMBLER */
14091 #ifndef ARM_PE
14092 /* Symbols in the text segment can be accessed without indirecting via the
14093 constant pool; it may take an extra binary operation, but this is still
14094 faster than indirecting via memory. Don't do this when not optimizing,
14095 since we won't be calculating al of the offsets necessary to do this
14096 simplification. */
14098 static void
14099 arm_encode_section_info (tree decl, rtx rtl, int first)
14101 /* This doesn't work with AOF syntax, since the string table may be in
14102 a different AREA. */
14103 #ifndef AOF_ASSEMBLER
14104 if (optimize > 0 && TREE_CONSTANT (decl))
14105 SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
14106 #endif
14108 /* If we are referencing a function that is weak then encode a long call
14109 flag in the function name, otherwise if the function is static or
14110 or known to be defined in this file then encode a short call flag. */
14111 if (first && DECL_P (decl))
14113 if (TREE_CODE (decl) == FUNCTION_DECL && DECL_WEAK (decl))
14114 arm_encode_call_attribute (decl, LONG_CALL_FLAG_CHAR);
14115 else if (! TREE_PUBLIC (decl))
14116 arm_encode_call_attribute (decl, SHORT_CALL_FLAG_CHAR);
14119 #endif /* !ARM_PE */
14121 static void
14122 arm_internal_label (FILE *stream, const char *prefix, unsigned long labelno)
14124 if (arm_ccfsm_state == 3 && (unsigned) arm_target_label == labelno
14125 && !strcmp (prefix, "L"))
14127 arm_ccfsm_state = 0;
14128 arm_target_insn = NULL;
14130 default_internal_label (stream, prefix, labelno);
14133 /* Output code to add DELTA to the first argument, and then jump
14134 to FUNCTION. Used for C++ multiple inheritance. */
14135 static void
14136 arm_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
14137 HOST_WIDE_INT delta,
14138 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
14139 tree function)
14141 static int thunk_label = 0;
14142 char label[256];
14143 int mi_delta = delta;
14144 const char *const mi_op = mi_delta < 0 ? "sub" : "add";
14145 int shift = 0;
14146 int this_regno = (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function)
14147 ? 1 : 0);
14148 if (mi_delta < 0)
14149 mi_delta = - mi_delta;
14150 if (TARGET_THUMB)
14152 int labelno = thunk_label++;
14153 ASM_GENERATE_INTERNAL_LABEL (label, "LTHUMBFUNC", labelno);
14154 fputs ("\tldr\tr12, ", file);
14155 assemble_name (file, label);
14156 fputc ('\n', file);
14158 while (mi_delta != 0)
14160 if ((mi_delta & (3 << shift)) == 0)
14161 shift += 2;
14162 else
14164 asm_fprintf (file, "\t%s\t%r, %r, #%d\n",
14165 mi_op, this_regno, this_regno,
14166 mi_delta & (0xff << shift));
14167 mi_delta &= ~(0xff << shift);
14168 shift += 8;
14171 if (TARGET_THUMB)
14173 fprintf (file, "\tbx\tr12\n");
14174 ASM_OUTPUT_ALIGN (file, 2);
14175 assemble_name (file, label);
14176 fputs (":\n", file);
14177 assemble_integer (XEXP (DECL_RTL (function), 0), 4, BITS_PER_WORD, 1);
14179 else
14181 fputs ("\tb\t", file);
14182 assemble_name (file, XSTR (XEXP (DECL_RTL (function), 0), 0));
14183 if (NEED_PLT_RELOC)
14184 fputs ("(PLT)", file);
14185 fputc ('\n', file);
14190 arm_emit_vector_const (FILE *file, rtx x)
14192 int i;
14193 const char * pattern;
14195 if (GET_CODE (x) != CONST_VECTOR)
14196 abort ();
14198 switch (GET_MODE (x))
14200 case V2SImode: pattern = "%08x"; break;
14201 case V4HImode: pattern = "%04x"; break;
14202 case V8QImode: pattern = "%02x"; break;
14203 default: abort ();
14206 fprintf (file, "0x");
14207 for (i = CONST_VECTOR_NUNITS (x); i--;)
14209 rtx element;
14211 element = CONST_VECTOR_ELT (x, i);
14212 fprintf (file, pattern, INTVAL (element));
14215 return 1;
14218 const char *
14219 arm_output_load_gr (rtx *operands)
14221 rtx reg;
14222 rtx offset;
14223 rtx wcgr;
14224 rtx sum;
14226 if (GET_CODE (operands [1]) != MEM
14227 || GET_CODE (sum = XEXP (operands [1], 0)) != PLUS
14228 || GET_CODE (reg = XEXP (sum, 0)) != REG
14229 || GET_CODE (offset = XEXP (sum, 1)) != CONST_INT
14230 || ((INTVAL (offset) < 1024) && (INTVAL (offset) > -1024)))
14231 return "wldrw%?\t%0, %1";
14233 /* Fix up an out-of-range load of a GR register. */
14234 output_asm_insn ("str%?\t%0, [sp, #-4]!\t@ Start of GR load expansion", & reg);
14235 wcgr = operands[0];
14236 operands[0] = reg;
14237 output_asm_insn ("ldr%?\t%0, %1", operands);
14239 operands[0] = wcgr;
14240 operands[1] = reg;
14241 output_asm_insn ("tmcr%?\t%0, %1", operands);
14242 output_asm_insn ("ldr%?\t%0, [sp], #4\t@ End of GR load expansion", & reg);
14244 return "";
14247 static rtx
14248 arm_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
14249 int incoming ATTRIBUTE_UNUSED)
14251 #if 0
14252 /* FIXME: The ARM backend has special code to handle structure
14253 returns, and will reserve its own hidden first argument. So
14254 if this macro is enabled a *second* hidden argument will be
14255 reserved, which will break binary compatibility with old
14256 toolchains and also thunk handling. One day this should be
14257 fixed. */
14258 return 0;
14259 #else
14260 /* Register in which address to store a structure value
14261 is passed to a function. */
14262 return gen_rtx_REG (Pmode, ARG_REGISTER (1));
14263 #endif
14266 /* Worker function for TARGET_SETUP_INCOMING_VARARGS.
14268 On the ARM, PRETEND_SIZE is set in order to have the prologue push the last
14269 named arg and all anonymous args onto the stack.
14270 XXX I know the prologue shouldn't be pushing registers, but it is faster
14271 that way. */
14273 static void
14274 arm_setup_incoming_varargs (CUMULATIVE_ARGS *cum,
14275 enum machine_mode mode ATTRIBUTE_UNUSED,
14276 tree type ATTRIBUTE_UNUSED,
14277 int *pretend_size,
14278 int second_time ATTRIBUTE_UNUSED)
14280 cfun->machine->uses_anonymous_args = 1;
14281 if (cum->nregs < NUM_ARG_REGS)
14282 *pretend_size = (NUM_ARG_REGS - cum->nregs) * UNITS_PER_WORD;
14285 /* Return nonzero if the CONSUMER instruction (a store) does not need
14286 PRODUCER's value to calculate the address. */
14289 arm_no_early_store_addr_dep (rtx producer, rtx consumer)
14291 rtx value = PATTERN (producer);
14292 rtx addr = PATTERN (consumer);
14294 if (GET_CODE (value) == COND_EXEC)
14295 value = COND_EXEC_CODE (value);
14296 if (GET_CODE (value) == PARALLEL)
14297 value = XVECEXP (value, 0, 0);
14298 value = XEXP (value, 0);
14299 if (GET_CODE (addr) == COND_EXEC)
14300 addr = COND_EXEC_CODE (addr);
14301 if (GET_CODE (addr) == PARALLEL)
14302 addr = XVECEXP (addr, 0, 0);
14303 addr = XEXP (addr, 0);
14305 return !reg_overlap_mentioned_p (value, addr);
14308 /* Return nonzero if the CONSUMER instruction (an ALU op) does not
14309 have an early register shift value or amount dependency on the
14310 result of PRODUCER. */
14313 arm_no_early_alu_shift_dep (rtx producer, rtx consumer)
14315 rtx value = PATTERN (producer);
14316 rtx op = PATTERN (consumer);
14317 rtx early_op;
14319 if (GET_CODE (value) == COND_EXEC)
14320 value = COND_EXEC_CODE (value);
14321 if (GET_CODE (value) == PARALLEL)
14322 value = XVECEXP (value, 0, 0);
14323 value = XEXP (value, 0);
14324 if (GET_CODE (op) == COND_EXEC)
14325 op = COND_EXEC_CODE (op);
14326 if (GET_CODE (op) == PARALLEL)
14327 op = XVECEXP (op, 0, 0);
14328 op = XEXP (op, 1);
14330 early_op = XEXP (op, 0);
14331 /* This is either an actual independent shift, or a shift applied to
14332 the first operand of another operation. We want the whole shift
14333 operation. */
14334 if (GET_CODE (early_op) == REG)
14335 early_op = op;
14337 return !reg_overlap_mentioned_p (value, early_op);
14340 /* Return nonzero if the CONSUMER instruction (an ALU op) does not
14341 have an early register shift value dependency on the result of
14342 PRODUCER. */
14345 arm_no_early_alu_shift_value_dep (rtx producer, rtx consumer)
14347 rtx value = PATTERN (producer);
14348 rtx op = PATTERN (consumer);
14349 rtx early_op;
14351 if (GET_CODE (value) == COND_EXEC)
14352 value = COND_EXEC_CODE (value);
14353 if (GET_CODE (value) == PARALLEL)
14354 value = XVECEXP (value, 0, 0);
14355 value = XEXP (value, 0);
14356 if (GET_CODE (op) == COND_EXEC)
14357 op = COND_EXEC_CODE (op);
14358 if (GET_CODE (op) == PARALLEL)
14359 op = XVECEXP (op, 0, 0);
14360 op = XEXP (op, 1);
14362 early_op = XEXP (op, 0);
14364 /* This is either an actual independent shift, or a shift applied to
14365 the first operand of another operation. We want the value being
14366 shifted, in either case. */
14367 if (GET_CODE (early_op) != REG)
14368 early_op = XEXP (early_op, 0);
14370 return !reg_overlap_mentioned_p (value, early_op);
14373 /* Return nonzero if the CONSUMER (a mul or mac op) does not
14374 have an early register mult dependency on the result of
14375 PRODUCER. */
14378 arm_no_early_mul_dep (rtx producer, rtx consumer)
14380 rtx value = PATTERN (producer);
14381 rtx op = PATTERN (consumer);
14383 if (GET_CODE (value) == COND_EXEC)
14384 value = COND_EXEC_CODE (value);
14385 if (GET_CODE (value) == PARALLEL)
14386 value = XVECEXP (value, 0, 0);
14387 value = XEXP (value, 0);
14388 if (GET_CODE (op) == COND_EXEC)
14389 op = COND_EXEC_CODE (op);
14390 if (GET_CODE (op) == PARALLEL)
14391 op = XVECEXP (op, 0, 0);
14392 op = XEXP (op, 1);
14394 return (GET_CODE (op) == PLUS
14395 && !reg_overlap_mentioned_p (value, XEXP (op, 0)));
14399 /* We can't rely on the caller doing the proper promotion when
14400 using APCS or ATPCS. */
14402 static bool
14403 arm_promote_prototypes (tree t ATTRIBUTE_UNUSED)
14405 return !TARGET_AAPCS_BASED;
14409 /* AAPCS based ABIs use short enums by default. */
14411 static bool
14412 arm_default_short_enums (void)
14414 return TARGET_AAPCS_BASED;
14418 /* AAPCS requires that anonymous bitfields affect structure alignment. */
14420 static bool
14421 arm_align_anon_bitfield (void)
14423 return TARGET_AAPCS_BASED;
14427 /* The generic C++ ABI says 64-bit (long long). The EABI says 32-bit. */
14429 static tree
14430 arm_cxx_guard_type (void)
14432 return TARGET_AAPCS_BASED ? integer_type_node : long_long_integer_type_node;
14436 /* The EABI says test the least significan bit of a guard variable. */
14438 static bool
14439 arm_cxx_guard_mask_bit (void)
14441 return TARGET_AAPCS_BASED;
14445 /* The EABI specifies that all array cookies are 8 bytes long. */
14447 static tree
14448 arm_get_cookie_size (tree type)
14450 tree size;
14452 if (!TARGET_AAPCS_BASED)
14453 return default_cxx_get_cookie_size (type);
14455 size = build_int_cst (sizetype, 8);
14456 return size;
14460 /* The EABI says that array cookies should also contain the element size. */
14462 static bool
14463 arm_cookie_has_size (void)
14465 return TARGET_AAPCS_BASED;
14469 /* The EABI says constructors and destructors should return a pointer to
14470 the object constructed/destroyed. */
14472 static bool
14473 arm_cxx_cdtor_returns_this (void)
14475 return TARGET_AAPCS_BASED;
14478 /* The EABI says that an inline function may never be the key
14479 method. */
14481 static bool
14482 arm_cxx_key_method_may_be_inline (void)
14484 return !TARGET_AAPCS_BASED;
14487 /* The EABI says that the virtual table, etc., for a class must be
14488 exported if it has a key method. The EABI does not specific the
14489 behavior if there is no key method, but there is no harm in
14490 exporting the class data in that case too. */
14492 static bool
14493 arm_cxx_export_class_data (void)
14495 return TARGET_AAPCS_BASED;
14498 void
14499 arm_set_return_address (rtx source, rtx scratch)
14501 arm_stack_offsets *offsets;
14502 HOST_WIDE_INT delta;
14503 rtx addr;
14504 unsigned long saved_regs;
14506 saved_regs = arm_compute_save_reg_mask ();
14508 if ((saved_regs & (1 << LR_REGNUM)) == 0)
14509 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNUM), source);
14510 else
14512 if (frame_pointer_needed)
14513 addr = plus_constant(hard_frame_pointer_rtx, -4);
14514 else
14516 /* LR will be the first saved register. */
14517 offsets = arm_get_frame_offsets ();
14518 delta = offsets->outgoing_args - (offsets->frame + 4);
14521 if (delta >= 4096)
14523 emit_insn (gen_addsi3 (scratch, stack_pointer_rtx,
14524 GEN_INT (delta & ~4095)));
14525 addr = scratch;
14526 delta &= 4095;
14528 else
14529 addr = stack_pointer_rtx;
14531 addr = plus_constant (addr, delta);
14533 emit_move_insn (gen_rtx_MEM (Pmode, addr), source);
14538 void
14539 thumb_set_return_address (rtx source, rtx scratch)
14541 arm_stack_offsets *offsets;
14542 HOST_WIDE_INT delta;
14543 int reg;
14544 rtx addr;
14545 unsigned long mask;
14547 emit_insn (gen_rtx_USE (VOIDmode, source));
14549 mask = thumb_compute_save_reg_mask ();
14550 if (mask & (1 << LR_REGNUM))
14552 offsets = arm_get_frame_offsets ();
14554 /* Find the saved regs. */
14555 if (frame_pointer_needed)
14557 delta = offsets->soft_frame - offsets->saved_args;
14558 reg = THUMB_HARD_FRAME_POINTER_REGNUM;
14560 else
14562 delta = offsets->outgoing_args - offsets->saved_args;
14563 reg = SP_REGNUM;
14565 /* Allow for the stack frame. */
14566 if (TARGET_BACKTRACE)
14567 delta -= 16;
14568 /* The link register is always the first saved register. */
14569 delta -= 4;
14571 /* Construct the address. */
14572 addr = gen_rtx_REG (SImode, reg);
14573 if ((reg != SP_REGNUM && delta >= 128)
14574 || delta >= 1024)
14576 emit_insn (gen_movsi (scratch, GEN_INT (delta)));
14577 emit_insn (gen_addsi3 (scratch, scratch, stack_pointer_rtx));
14578 addr = scratch;
14580 else
14581 addr = plus_constant (addr, delta);
14583 emit_move_insn (gen_rtx_MEM (Pmode, addr), source);
14585 else
14586 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNUM), source);
14589 /* Implements target hook vector_mode_supported_p. */
14590 bool
14591 arm_vector_mode_supported_p (enum machine_mode mode)
14593 if ((mode == V2SImode)
14594 || (mode == V4HImode)
14595 || (mode == V8QImode))
14596 return true;
14598 return false;
14601 /* Implement TARGET_SHIFT_TRUNCATION_MASK. SImode shifts use normal
14602 ARM insns and therefore guarantee that the shift count is modulo 256.
14603 DImode shifts (those implemented by lib1funcs.asm or by optabs.c)
14604 guarantee no particular behavior for out-of-range counts. */
14606 static unsigned HOST_WIDE_INT
14607 arm_shift_truncation_mask (enum machine_mode mode)
14609 return mode == SImode ? 255 : 0;