* arm.c (output_call_mem): Add missing \t.
[official-gcc.git] / gcc / config / arm / arm.c
blob6f0f6fb00fde472a5e1730d1d95bbdc93427bc6e
1 /* Output routines for GCC for ARM.
2 Copyright (C) 1991, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004 Free Software Foundation, Inc.
4 Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
5 and Martin Simmons (@harleqn.co.uk).
6 More major hacks by Richard Earnshaw (rearnsha@arm.com).
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it
11 under the terms of the GNU General Public License as published
12 by the Free Software Foundation; either version 2, or (at your
13 option) any later version.
15 GCC is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
17 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
18 License for more details.
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING. If not, write to
22 the Free Software Foundation, 59 Temple Place - Suite 330,
23 Boston, MA 02111-1307, USA. */
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "tm.h"
29 #include "rtl.h"
30 #include "tree.h"
31 #include "obstack.h"
32 #include "regs.h"
33 #include "hard-reg-set.h"
34 #include "real.h"
35 #include "insn-config.h"
36 #include "conditions.h"
37 #include "output.h"
38 #include "insn-attr.h"
39 #include "flags.h"
40 #include "reload.h"
41 #include "function.h"
42 #include "expr.h"
43 #include "optabs.h"
44 #include "toplev.h"
45 #include "recog.h"
46 #include "ggc.h"
47 #include "except.h"
48 #include "c-pragma.h"
49 #include "integrate.h"
50 #include "tm_p.h"
51 #include "target.h"
52 #include "target-def.h"
53 #include "debug.h"
54 #include "langhooks.h"
56 /* Forward definitions of types. */
57 typedef struct minipool_node Mnode;
58 typedef struct minipool_fixup Mfix;
60 const struct attribute_spec arm_attribute_table[];
62 /* Forward function declarations. */
63 static arm_stack_offsets *arm_get_frame_offsets (void);
64 static void arm_add_gc_roots (void);
65 static int arm_gen_constant (enum rtx_code, enum machine_mode, rtx,
66 HOST_WIDE_INT, rtx, rtx, int, int);
67 static unsigned bit_count (unsigned long);
68 static int arm_address_register_rtx_p (rtx, int);
69 static int arm_legitimate_index_p (enum machine_mode, rtx, RTX_CODE, int);
70 static int thumb_base_register_rtx_p (rtx, enum machine_mode, int);
71 inline static int thumb_index_register_rtx_p (rtx, int);
72 static int thumb_far_jump_used_p (void);
73 static bool thumb_force_lr_save (void);
74 static unsigned long thumb_compute_save_reg_mask (void);
75 static int const_ok_for_op (HOST_WIDE_INT, enum rtx_code);
76 static rtx emit_multi_reg_push (int);
77 static rtx emit_sfm (int, int);
78 #ifndef AOF_ASSEMBLER
79 static bool arm_assemble_integer (rtx, unsigned int, int);
80 #endif
81 static const char *fp_const_from_val (REAL_VALUE_TYPE *);
82 static arm_cc get_arm_condition_code (rtx);
83 static HOST_WIDE_INT int_log2 (HOST_WIDE_INT);
84 static rtx is_jump_table (rtx);
85 static const char *output_multi_immediate (rtx *, const char *, const char *,
86 int, HOST_WIDE_INT);
87 static void print_multi_reg (FILE *, const char *, int, int);
88 static const char *shift_op (rtx, HOST_WIDE_INT *);
89 static struct machine_function *arm_init_machine_status (void);
90 static int number_of_first_bit_set (int);
91 static void replace_symbols_in_block (tree, rtx, rtx);
92 static void thumb_exit (FILE *, int);
93 static void thumb_pushpop (FILE *, int, int, int *, int);
94 static rtx is_jump_table (rtx);
95 static HOST_WIDE_INT get_jump_table_size (rtx);
96 static Mnode *move_minipool_fix_forward_ref (Mnode *, Mnode *, HOST_WIDE_INT);
97 static Mnode *add_minipool_forward_ref (Mfix *);
98 static Mnode *move_minipool_fix_backward_ref (Mnode *, Mnode *, HOST_WIDE_INT);
99 static Mnode *add_minipool_backward_ref (Mfix *);
100 static void assign_minipool_offsets (Mfix *);
101 static void arm_print_value (FILE *, rtx);
102 static void dump_minipool (rtx);
103 static int arm_barrier_cost (rtx);
104 static Mfix *create_fix_barrier (Mfix *, HOST_WIDE_INT);
105 static void push_minipool_barrier (rtx, HOST_WIDE_INT);
106 static void push_minipool_fix (rtx, HOST_WIDE_INT, rtx *, enum machine_mode,
107 rtx);
108 static void arm_reorg (void);
109 static bool note_invalid_constants (rtx, HOST_WIDE_INT, int);
110 static int current_file_function_operand (rtx);
111 static unsigned long arm_compute_save_reg0_reg12_mask (void);
112 static unsigned long arm_compute_save_reg_mask (void);
113 static unsigned long arm_isr_value (tree);
114 static unsigned long arm_compute_func_type (void);
115 static tree arm_handle_fndecl_attribute (tree *, tree, tree, int, bool *);
116 static tree arm_handle_isr_attribute (tree *, tree, tree, int, bool *);
117 static void arm_output_function_epilogue (FILE *, HOST_WIDE_INT);
118 static void arm_output_function_prologue (FILE *, HOST_WIDE_INT);
119 static void thumb_output_function_prologue (FILE *, HOST_WIDE_INT);
120 static int arm_comp_type_attributes (tree, tree);
121 static void arm_set_default_type_attributes (tree);
122 static int arm_adjust_cost (rtx, rtx, rtx, int);
123 static int count_insns_for_constant (HOST_WIDE_INT, int);
124 static int arm_get_strip_length (int);
125 static bool arm_function_ok_for_sibcall (tree, tree);
126 static void arm_internal_label (FILE *, const char *, unsigned long);
127 static void arm_output_mi_thunk (FILE *, tree, HOST_WIDE_INT, HOST_WIDE_INT,
128 tree);
129 static int arm_rtx_costs_1 (rtx, enum rtx_code, enum rtx_code);
130 static bool arm_size_rtx_costs (rtx, int, int, int *);
131 static bool arm_slowmul_rtx_costs (rtx, int, int, int *);
132 static bool arm_fastmul_rtx_costs (rtx, int, int, int *);
133 static bool arm_xscale_rtx_costs (rtx, int, int, int *);
134 static bool arm_9e_rtx_costs (rtx, int, int, int *);
135 static int arm_address_cost (rtx);
136 static bool arm_memory_load_p (rtx);
137 static bool arm_cirrus_insn_p (rtx);
138 static void cirrus_reorg (rtx);
139 static void arm_init_builtins (void);
140 static rtx arm_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
141 static void arm_init_iwmmxt_builtins (void);
142 static rtx safe_vector_operand (rtx, enum machine_mode);
143 static rtx arm_expand_binop_builtin (enum insn_code, tree, rtx);
144 static rtx arm_expand_unop_builtin (enum insn_code, tree, rtx, int);
145 static rtx arm_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
146 static void emit_constant_insn (rtx cond, rtx pattern);
148 #ifndef ARM_PE
149 static void arm_encode_section_info (tree, rtx, int);
150 #endif
151 #ifdef AOF_ASSEMBLER
152 static void aof_globalize_label (FILE *, const char *);
153 static void aof_dump_imports (FILE *);
154 static void aof_dump_pic_table (FILE *);
155 static void aof_file_start (void);
156 static void aof_file_end (void);
157 #endif
158 static rtx arm_struct_value_rtx (tree, int);
159 static void arm_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
160 tree, int *, int);
161 static bool arm_pass_by_reference (CUMULATIVE_ARGS *,
162 enum machine_mode, tree, bool);
163 static bool arm_promote_prototypes (tree);
164 static bool arm_default_short_enums (void);
165 static bool arm_align_anon_bitfield (void);
167 static tree arm_cxx_guard_type (void);
168 static bool arm_cxx_guard_mask_bit (void);
169 static tree arm_get_cookie_size (tree);
170 static bool arm_cookie_has_size (void);
171 static bool arm_cxx_cdtor_returns_this (void);
172 static bool arm_cxx_key_method_may_be_inline (void);
173 static bool arm_cxx_export_class_data (void);
174 static void arm_init_libfuncs (void);
175 static unsigned HOST_WIDE_INT arm_shift_truncation_mask (enum machine_mode);
177 /* Initialize the GCC target structure. */
178 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
179 #undef TARGET_MERGE_DECL_ATTRIBUTES
180 #define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
181 #endif
183 #undef TARGET_ATTRIBUTE_TABLE
184 #define TARGET_ATTRIBUTE_TABLE arm_attribute_table
186 #ifdef AOF_ASSEMBLER
187 #undef TARGET_ASM_BYTE_OP
188 #define TARGET_ASM_BYTE_OP "\tDCB\t"
189 #undef TARGET_ASM_ALIGNED_HI_OP
190 #define TARGET_ASM_ALIGNED_HI_OP "\tDCW\t"
191 #undef TARGET_ASM_ALIGNED_SI_OP
192 #define TARGET_ASM_ALIGNED_SI_OP "\tDCD\t"
193 #undef TARGET_ASM_GLOBALIZE_LABEL
194 #define TARGET_ASM_GLOBALIZE_LABEL aof_globalize_label
195 #undef TARGET_ASM_FILE_START
196 #define TARGET_ASM_FILE_START aof_file_start
197 #undef TARGET_ASM_FILE_END
198 #define TARGET_ASM_FILE_END aof_file_end
199 #else
200 #undef TARGET_ASM_ALIGNED_SI_OP
201 #define TARGET_ASM_ALIGNED_SI_OP NULL
202 #undef TARGET_ASM_INTEGER
203 #define TARGET_ASM_INTEGER arm_assemble_integer
204 #endif
206 #undef TARGET_ASM_FUNCTION_PROLOGUE
207 #define TARGET_ASM_FUNCTION_PROLOGUE arm_output_function_prologue
209 #undef TARGET_ASM_FUNCTION_EPILOGUE
210 #define TARGET_ASM_FUNCTION_EPILOGUE arm_output_function_epilogue
212 #undef TARGET_COMP_TYPE_ATTRIBUTES
213 #define TARGET_COMP_TYPE_ATTRIBUTES arm_comp_type_attributes
215 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
216 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES arm_set_default_type_attributes
218 #undef TARGET_SCHED_ADJUST_COST
219 #define TARGET_SCHED_ADJUST_COST arm_adjust_cost
221 #undef TARGET_ENCODE_SECTION_INFO
222 #ifdef ARM_PE
223 #define TARGET_ENCODE_SECTION_INFO arm_pe_encode_section_info
224 #else
225 #define TARGET_ENCODE_SECTION_INFO arm_encode_section_info
226 #endif
228 #undef TARGET_STRIP_NAME_ENCODING
229 #define TARGET_STRIP_NAME_ENCODING arm_strip_name_encoding
231 #undef TARGET_ASM_INTERNAL_LABEL
232 #define TARGET_ASM_INTERNAL_LABEL arm_internal_label
234 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
235 #define TARGET_FUNCTION_OK_FOR_SIBCALL arm_function_ok_for_sibcall
237 #undef TARGET_ASM_OUTPUT_MI_THUNK
238 #define TARGET_ASM_OUTPUT_MI_THUNK arm_output_mi_thunk
239 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
240 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
242 /* This will be overridden in arm_override_options. */
243 #undef TARGET_RTX_COSTS
244 #define TARGET_RTX_COSTS arm_slowmul_rtx_costs
245 #undef TARGET_ADDRESS_COST
246 #define TARGET_ADDRESS_COST arm_address_cost
248 #undef TARGET_SHIFT_TRUNCATION_MASK
249 #define TARGET_SHIFT_TRUNCATION_MASK arm_shift_truncation_mask
250 #undef TARGET_VECTOR_MODE_SUPPORTED_P
251 #define TARGET_VECTOR_MODE_SUPPORTED_P arm_vector_mode_supported_p
253 #undef TARGET_MACHINE_DEPENDENT_REORG
254 #define TARGET_MACHINE_DEPENDENT_REORG arm_reorg
256 #undef TARGET_INIT_BUILTINS
257 #define TARGET_INIT_BUILTINS arm_init_builtins
258 #undef TARGET_EXPAND_BUILTIN
259 #define TARGET_EXPAND_BUILTIN arm_expand_builtin
261 #undef TARGET_INIT_LIBFUNCS
262 #define TARGET_INIT_LIBFUNCS arm_init_libfuncs
264 #undef TARGET_PROMOTE_FUNCTION_ARGS
265 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
266 #undef TARGET_PROMOTE_FUNCTION_RETURN
267 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
268 #undef TARGET_PROMOTE_PROTOTYPES
269 #define TARGET_PROMOTE_PROTOTYPES arm_promote_prototypes
270 #undef TARGET_PASS_BY_REFERENCE
271 #define TARGET_PASS_BY_REFERENCE arm_pass_by_reference
273 #undef TARGET_STRUCT_VALUE_RTX
274 #define TARGET_STRUCT_VALUE_RTX arm_struct_value_rtx
276 #undef TARGET_SETUP_INCOMING_VARARGS
277 #define TARGET_SETUP_INCOMING_VARARGS arm_setup_incoming_varargs
279 #undef TARGET_DEFAULT_SHORT_ENUMS
280 #define TARGET_DEFAULT_SHORT_ENUMS arm_default_short_enums
282 #undef TARGET_ALIGN_ANON_BITFIELD
283 #define TARGET_ALIGN_ANON_BITFIELD arm_align_anon_bitfield
285 #undef TARGET_CXX_GUARD_TYPE
286 #define TARGET_CXX_GUARD_TYPE arm_cxx_guard_type
288 #undef TARGET_CXX_GUARD_MASK_BIT
289 #define TARGET_CXX_GUARD_MASK_BIT arm_cxx_guard_mask_bit
291 #undef TARGET_CXX_GET_COOKIE_SIZE
292 #define TARGET_CXX_GET_COOKIE_SIZE arm_get_cookie_size
294 #undef TARGET_CXX_COOKIE_HAS_SIZE
295 #define TARGET_CXX_COOKIE_HAS_SIZE arm_cookie_has_size
297 #undef TARGET_CXX_CDTOR_RETURNS_THIS
298 #define TARGET_CXX_CDTOR_RETURNS_THIS arm_cxx_cdtor_returns_this
300 #undef TARGET_CXX_KEY_METHOD_MAY_BE_INLINE
301 #define TARGET_CXX_KEY_METHOD_MAY_BE_INLINE arm_cxx_key_method_may_be_inline
303 #undef TARGET_CXX_EXPORT_CLASS_DATA
304 #define TARGET_CXX_EXPORT_CLASS_DATA arm_cxx_export_class_data
306 struct gcc_target targetm = TARGET_INITIALIZER;
308 /* Obstack for minipool constant handling. */
309 static struct obstack minipool_obstack;
310 static char * minipool_startobj;
312 /* The maximum number of insns skipped which
313 will be conditionalised if possible. */
314 static int max_insns_skipped = 5;
316 extern FILE * asm_out_file;
318 /* True if we are currently building a constant table. */
319 int making_const_table;
321 /* Define the information needed to generate branch insns. This is
322 stored from the compare operation. */
323 rtx arm_compare_op0, arm_compare_op1;
325 /* The processor for which instructions should be scheduled. */
326 enum processor_type arm_tune = arm_none;
328 /* Which floating point model to use. */
329 enum arm_fp_model arm_fp_model;
331 /* Which floating point hardware is available. */
332 enum fputype arm_fpu_arch;
334 /* Which floating point hardware to schedule for. */
335 enum fputype arm_fpu_tune;
337 /* Whether to use floating point hardware. */
338 enum float_abi_type arm_float_abi;
340 /* Which ABI to use. */
341 enum arm_abi_type arm_abi;
343 /* Set by the -mfpu=... option. */
344 const char * target_fpu_name = NULL;
346 /* Set by the -mfpe=... option. */
347 const char * target_fpe_name = NULL;
349 /* Set by the -mfloat-abi=... option. */
350 const char * target_float_abi_name = NULL;
352 /* Set by the -mabi=... option. */
353 const char * target_abi_name = NULL;
355 /* Used to parse -mstructure_size_boundary command line option. */
356 const char * structure_size_string = NULL;
357 int arm_structure_size_boundary = DEFAULT_STRUCTURE_SIZE_BOUNDARY;
359 /* Bit values used to identify processor capabilities. */
360 #define FL_CO_PROC (1 << 0) /* Has external co-processor bus */
361 #define FL_ARCH3M (1 << 1) /* Extended multiply */
362 #define FL_MODE26 (1 << 2) /* 26-bit mode support */
363 #define FL_MODE32 (1 << 3) /* 32-bit mode support */
364 #define FL_ARCH4 (1 << 4) /* Architecture rel 4 */
365 #define FL_ARCH5 (1 << 5) /* Architecture rel 5 */
366 #define FL_THUMB (1 << 6) /* Thumb aware */
367 #define FL_LDSCHED (1 << 7) /* Load scheduling necessary */
368 #define FL_STRONG (1 << 8) /* StrongARM */
369 #define FL_ARCH5E (1 << 9) /* DSP extensions to v5 */
370 #define FL_XSCALE (1 << 10) /* XScale */
371 #define FL_CIRRUS (1 << 11) /* Cirrus/DSP. */
372 #define FL_ARCH6 (1 << 12) /* Architecture rel 6. Adds
373 media instructions. */
374 #define FL_VFPV2 (1 << 13) /* Vector Floating Point V2. */
376 #define FL_IWMMXT (1 << 29) /* XScale v2 or "Intel Wireless MMX technology". */
378 #define FL_FOR_ARCH2 0
379 #define FL_FOR_ARCH3 FL_MODE32
380 #define FL_FOR_ARCH3M (FL_FOR_ARCH3 | FL_ARCH3M)
381 #define FL_FOR_ARCH4 (FL_FOR_ARCH3M | FL_ARCH4)
382 #define FL_FOR_ARCH4T (FL_FOR_ARCH4 | FL_THUMB)
383 #define FL_FOR_ARCH5 (FL_FOR_ARCH4 | FL_ARCH5)
384 #define FL_FOR_ARCH5T (FL_FOR_ARCH5 | FL_THUMB)
385 #define FL_FOR_ARCH5E (FL_FOR_ARCH5 | FL_ARCH5E)
386 #define FL_FOR_ARCH5TE (FL_FOR_ARCH5E | FL_THUMB)
387 #define FL_FOR_ARCH5TEJ FL_FOR_ARCH5TE
388 #define FL_FOR_ARCH6 (FL_FOR_ARCH5TE | FL_ARCH6)
389 #define FL_FOR_ARCH6J FL_FOR_ARCH6
391 /* The bits in this mask specify which
392 instructions we are allowed to generate. */
393 static unsigned long insn_flags = 0;
395 /* The bits in this mask specify which instruction scheduling options should
396 be used. */
397 static unsigned long tune_flags = 0;
399 /* The following are used in the arm.md file as equivalents to bits
400 in the above two flag variables. */
402 /* Nonzero if this chip supports the ARM Architecture 3M extensions. */
403 int arm_arch3m = 0;
405 /* Nonzero if this chip supports the ARM Architecture 4 extensions. */
406 int arm_arch4 = 0;
408 /* Nonzero if this chip supports the ARM Architecture 4t extensions. */
409 int arm_arch4t = 0;
411 /* Nonzero if this chip supports the ARM Architecture 5 extensions. */
412 int arm_arch5 = 0;
414 /* Nonzero if this chip supports the ARM Architecture 5E extensions. */
415 int arm_arch5e = 0;
417 /* Nonzero if this chip supports the ARM Architecture 6 extensions. */
418 int arm_arch6 = 0;
420 /* Nonzero if this chip can benefit from load scheduling. */
421 int arm_ld_sched = 0;
423 /* Nonzero if this chip is a StrongARM. */
424 int arm_is_strong = 0;
426 /* Nonzero if this chip is a Cirrus variant. */
427 int arm_arch_cirrus = 0;
429 /* Nonzero if this chip supports Intel Wireless MMX technology. */
430 int arm_arch_iwmmxt = 0;
432 /* Nonzero if this chip is an XScale. */
433 int arm_arch_xscale = 0;
435 /* Nonzero if tuning for XScale */
436 int arm_tune_xscale = 0;
438 /* Nonzero if this chip is an ARM6 or an ARM7. */
439 int arm_is_6_or_7 = 0;
441 /* Nonzero if generating Thumb instructions. */
442 int thumb_code = 0;
444 /* Nonzero if we should define __THUMB_INTERWORK__ in the
445 preprocessor.
446 XXX This is a bit of a hack, it's intended to help work around
447 problems in GLD which doesn't understand that armv5t code is
448 interworking clean. */
449 int arm_cpp_interwork = 0;
451 /* In case of a PRE_INC, POST_INC, PRE_DEC, POST_DEC memory reference, we
452 must report the mode of the memory reference from PRINT_OPERAND to
453 PRINT_OPERAND_ADDRESS. */
454 enum machine_mode output_memory_reference_mode;
456 /* The register number to be used for the PIC offset register. */
457 const char * arm_pic_register_string = NULL;
458 int arm_pic_register = INVALID_REGNUM;
460 /* Set to 1 when a return insn is output, this means that the epilogue
461 is not needed. */
462 int return_used_this_function;
464 /* Set to 1 after arm_reorg has started. Reset to start at the start of
465 the next function. */
466 static int after_arm_reorg = 0;
468 /* The maximum number of insns to be used when loading a constant. */
469 static int arm_constant_limit = 3;
471 /* For an explanation of these variables, see final_prescan_insn below. */
472 int arm_ccfsm_state;
473 enum arm_cond_code arm_current_cc;
474 rtx arm_target_insn;
475 int arm_target_label;
477 /* The condition codes of the ARM, and the inverse function. */
478 static const char * const arm_condition_codes[] =
480 "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc",
481 "hi", "ls", "ge", "lt", "gt", "le", "al", "nv"
484 #define streq(string1, string2) (strcmp (string1, string2) == 0)
486 /* Initialization code. */
488 struct processors
490 const char *const name;
491 enum processor_type core;
492 const char *arch;
493 const unsigned long flags;
494 bool (* rtx_costs) (rtx, int, int, int *);
497 /* Not all of these give usefully different compilation alternatives,
498 but there is no simple way of generalizing them. */
499 static const struct processors all_cores[] =
501 /* ARM Cores */
502 #define ARM_CORE(NAME, IDENT, ARCH, FLAGS, COSTS) \
503 {NAME, arm_none, #ARCH, FLAGS | FL_FOR_ARCH##ARCH, arm_##COSTS##_rtx_costs},
504 #include "arm-cores.def"
505 #undef ARM_CORE
506 {NULL, arm_none, NULL, 0, NULL}
509 static const struct processors all_architectures[] =
511 /* ARM Architectures */
512 /* We don't specify rtx_costs here as it will be figured out
513 from the core. */
515 {"armv2", arm2, "2", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH2, NULL},
516 {"armv2a", arm2, "2", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH2, NULL},
517 {"armv3", arm6, "3", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH3, NULL},
518 {"armv3m", arm7m, "3M", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH3M, NULL},
519 {"armv4", arm7tdmi, "4", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH4, NULL},
520 /* Strictly, FL_MODE26 is a permitted option for v4t, but there are no
521 implementations that support it, so we will leave it out for now. */
522 {"armv4t", arm7tdmi, "4T", FL_CO_PROC | FL_FOR_ARCH4T, NULL},
523 {"armv5", arm10tdmi, "5", FL_CO_PROC | FL_FOR_ARCH5, NULL},
524 {"armv5t", arm10tdmi, "5T", FL_CO_PROC | FL_FOR_ARCH5T, NULL},
525 {"armv5e", arm1026ejs, "5E", FL_CO_PROC | FL_FOR_ARCH5E, NULL},
526 {"armv5te", arm1026ejs, "5TE", FL_CO_PROC | FL_FOR_ARCH5TE, NULL},
527 {"armv6", arm1136js, "6", FL_CO_PROC | FL_FOR_ARCH6, NULL},
528 {"armv6j", arm1136js, "6J", FL_CO_PROC | FL_FOR_ARCH6J, NULL},
529 {"ep9312", ep9312, "4T", FL_LDSCHED | FL_CIRRUS | FL_FOR_ARCH4, NULL},
530 {"iwmmxt", iwmmxt, "5TE", FL_LDSCHED | FL_STRONG | FL_FOR_ARCH5TE | FL_XSCALE | FL_IWMMXT , NULL},
531 {NULL, arm_none, NULL, 0 , NULL}
534 /* This is a magic structure. The 'string' field is magically filled in
535 with a pointer to the value specified by the user on the command line
536 assuming that the user has specified such a value. */
538 struct arm_cpu_select arm_select[] =
540 /* string name processors */
541 { NULL, "-mcpu=", all_cores },
542 { NULL, "-march=", all_architectures },
543 { NULL, "-mtune=", all_cores }
547 /* The name of the proprocessor macro to define for this architecture. */
549 char arm_arch_name[] = "__ARM_ARCH_0UNK__";
551 struct fpu_desc
553 const char * name;
554 enum fputype fpu;
558 /* Available values for for -mfpu=. */
560 static const struct fpu_desc all_fpus[] =
562 {"fpa", FPUTYPE_FPA},
563 {"fpe2", FPUTYPE_FPA_EMU2},
564 {"fpe3", FPUTYPE_FPA_EMU2},
565 {"maverick", FPUTYPE_MAVERICK},
566 {"vfp", FPUTYPE_VFP}
570 /* Floating point models used by the different hardware.
571 See fputype in arm.h. */
573 static const enum fputype fp_model_for_fpu[] =
575 /* No FP hardware. */
576 ARM_FP_MODEL_UNKNOWN, /* FPUTYPE_NONE */
577 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA */
578 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA_EMU2 */
579 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA_EMU3 */
580 ARM_FP_MODEL_MAVERICK, /* FPUTYPE_MAVERICK */
581 ARM_FP_MODEL_VFP /* FPUTYPE_VFP */
585 struct float_abi
587 const char * name;
588 enum float_abi_type abi_type;
592 /* Available values for -mfloat-abi=. */
594 static const struct float_abi all_float_abis[] =
596 {"soft", ARM_FLOAT_ABI_SOFT},
597 {"softfp", ARM_FLOAT_ABI_SOFTFP},
598 {"hard", ARM_FLOAT_ABI_HARD}
602 struct abi_name
604 const char *name;
605 enum arm_abi_type abi_type;
609 /* Available values for -mabi=. */
611 static const struct abi_name arm_all_abis[] =
613 {"apcs-gnu", ARM_ABI_APCS},
614 {"atpcs", ARM_ABI_ATPCS},
615 {"aapcs", ARM_ABI_AAPCS},
616 {"iwmmxt", ARM_ABI_IWMMXT}
619 /* Return the number of bits set in VALUE. */
620 static unsigned
621 bit_count (unsigned long value)
623 unsigned long count = 0;
625 while (value)
627 count++;
628 value &= value - 1; /* Clear the least-significant set bit. */
631 return count;
634 /* Set up library functions unique to ARM. */
636 static void
637 arm_init_libfuncs (void)
639 /* There are no special library functions unless we are using the
640 ARM BPABI. */
641 if (!TARGET_BPABI)
642 return;
644 /* The functions below are described in Section 4 of the "Run-Time
645 ABI for the ARM architecture", Version 1.0. */
647 /* Double-precision floating-point arithmetic. Table 2. */
648 set_optab_libfunc (add_optab, DFmode, "__aeabi_dadd");
649 set_optab_libfunc (sdiv_optab, DFmode, "__aeabi_ddiv");
650 set_optab_libfunc (smul_optab, DFmode, "__aeabi_dmul");
651 set_optab_libfunc (neg_optab, DFmode, "__aeabi_dneg");
652 set_optab_libfunc (sub_optab, DFmode, "__aeabi_dsub");
654 /* Double-precision comparisons. Table 3. */
655 set_optab_libfunc (eq_optab, DFmode, "__aeabi_dcmpeq");
656 set_optab_libfunc (ne_optab, DFmode, NULL);
657 set_optab_libfunc (lt_optab, DFmode, "__aeabi_dcmplt");
658 set_optab_libfunc (le_optab, DFmode, "__aeabi_dcmple");
659 set_optab_libfunc (ge_optab, DFmode, "__aeabi_dcmpge");
660 set_optab_libfunc (gt_optab, DFmode, "__aeabi_dcmpgt");
661 set_optab_libfunc (unord_optab, DFmode, "__aeabi_dcmpun");
663 /* Single-precision floating-point arithmetic. Table 4. */
664 set_optab_libfunc (add_optab, SFmode, "__aeabi_fadd");
665 set_optab_libfunc (sdiv_optab, SFmode, "__aeabi_fdiv");
666 set_optab_libfunc (smul_optab, SFmode, "__aeabi_fmul");
667 set_optab_libfunc (neg_optab, SFmode, "__aeabi_fneg");
668 set_optab_libfunc (sub_optab, SFmode, "__aeabi_fsub");
670 /* Single-precision comparisons. Table 5. */
671 set_optab_libfunc (eq_optab, SFmode, "__aeabi_fcmpeq");
672 set_optab_libfunc (ne_optab, SFmode, NULL);
673 set_optab_libfunc (lt_optab, SFmode, "__aeabi_fcmplt");
674 set_optab_libfunc (le_optab, SFmode, "__aeabi_fcmple");
675 set_optab_libfunc (ge_optab, SFmode, "__aeabi_fcmpge");
676 set_optab_libfunc (gt_optab, SFmode, "__aeabi_fcmpgt");
677 set_optab_libfunc (unord_optab, SFmode, "__aeabi_fcmpun");
679 /* Floating-point to integer conversions. Table 6. */
680 set_conv_libfunc (sfix_optab, SImode, DFmode, "__aeabi_d2iz");
681 set_conv_libfunc (ufix_optab, SImode, DFmode, "__aeabi_d2uiz");
682 set_conv_libfunc (sfix_optab, DImode, DFmode, "__aeabi_d2lz");
683 set_conv_libfunc (ufix_optab, DImode, DFmode, "__aeabi_d2ulz");
684 set_conv_libfunc (sfix_optab, SImode, SFmode, "__aeabi_f2iz");
685 set_conv_libfunc (ufix_optab, SImode, SFmode, "__aeabi_f2uiz");
686 set_conv_libfunc (sfix_optab, DImode, SFmode, "__aeabi_f2lz");
687 set_conv_libfunc (ufix_optab, DImode, SFmode, "__aeabi_f2ulz");
689 /* Conversions between floating types. Table 7. */
690 set_conv_libfunc (trunc_optab, SFmode, DFmode, "__aeabi_d2f");
691 set_conv_libfunc (sext_optab, DFmode, SFmode, "__aeabi_f2d");
693 /* Integer to floating-point conversions. Table 8. */
694 set_conv_libfunc (sfloat_optab, DFmode, SImode, "__aeabi_i2d");
695 set_conv_libfunc (ufloat_optab, DFmode, SImode, "__aeabi_ui2d");
696 set_conv_libfunc (sfloat_optab, DFmode, DImode, "__aeabi_l2d");
697 set_conv_libfunc (ufloat_optab, DFmode, DImode, "__aeabi_ul2d");
698 set_conv_libfunc (sfloat_optab, SFmode, SImode, "__aeabi_i2f");
699 set_conv_libfunc (ufloat_optab, SFmode, SImode, "__aeabi_ui2f");
700 set_conv_libfunc (sfloat_optab, SFmode, DImode, "__aeabi_l2f");
701 set_conv_libfunc (ufloat_optab, SFmode, DImode, "__aeabi_ul2f");
703 /* Long long. Table 9. */
704 set_optab_libfunc (smul_optab, DImode, "__aeabi_lmul");
705 set_optab_libfunc (sdivmod_optab, DImode, "__aeabi_ldivmod");
706 set_optab_libfunc (udivmod_optab, DImode, "__aeabi_uldivmod");
707 set_optab_libfunc (ashl_optab, DImode, "__aeabi_llsl");
708 set_optab_libfunc (lshr_optab, DImode, "__aeabi_llsr");
709 set_optab_libfunc (ashr_optab, DImode, "__aeabi_lasr");
710 set_optab_libfunc (cmp_optab, DImode, "__aeabi_lcmp");
711 set_optab_libfunc (ucmp_optab, DImode, "__aeabi_ulcmp");
713 /* Integer (32/32->32) division. \S 4.3.1. */
714 set_optab_libfunc (sdivmod_optab, SImode, "__aeabi_idivmod");
715 set_optab_libfunc (udivmod_optab, SImode, "__aeabi_uidivmod");
717 /* The divmod functions are designed so that they can be used for
718 plain division, even though they return both the quotient and the
719 remainder. The quotient is returned in the usual location (i.e.,
720 r0 for SImode, {r0, r1} for DImode), just as would be expected
721 for an ordinary division routine. Because the AAPCS calling
722 conventions specify that all of { r0, r1, r2, r3 } are
723 callee-saved registers, there is no need to tell the compiler
724 explicitly that those registers are clobbered by these
725 routines. */
726 set_optab_libfunc (sdiv_optab, DImode, "__aeabi_ldivmod");
727 set_optab_libfunc (udiv_optab, DImode, "__aeabi_uldivmod");
728 set_optab_libfunc (sdiv_optab, SImode, "__aeabi_idivmod");
729 set_optab_libfunc (udiv_optab, SImode, "__aeabi_uidivmod");
732 /* Fix up any incompatible options that the user has specified.
733 This has now turned into a maze. */
734 void
735 arm_override_options (void)
737 unsigned i;
739 /* Set up the flags based on the cpu/architecture selected by the user. */
740 for (i = ARRAY_SIZE (arm_select); i--;)
742 struct arm_cpu_select * ptr = arm_select + i;
744 if (ptr->string != NULL && ptr->string[0] != '\0')
746 const struct processors * sel;
748 for (sel = ptr->processors; sel->name != NULL; sel++)
749 if (streq (ptr->string, sel->name))
751 /* Set the architecture define. */
752 if (i != 2)
753 sprintf (arm_arch_name, "__ARM_ARCH_%s__", sel->arch);
755 /* Determine the processor core for which we should
756 tune code-generation. */
757 if (/* -mcpu= is a sensible default. */
758 i == 0
759 /* If -march= is used, and -mcpu= has not been used,
760 assume that we should tune for a representative
761 CPU from that architecture. */
762 || i == 1
763 /* -mtune= overrides -mcpu= and -march=. */
764 || i == 2)
765 arm_tune = (enum processor_type) (sel - ptr->processors);
767 if (i != 2)
769 /* If we have been given an architecture and a processor
770 make sure that they are compatible. We only generate
771 a warning though, and we prefer the CPU over the
772 architecture. */
773 if (insn_flags != 0 && (insn_flags ^ sel->flags))
774 warning ("switch -mcpu=%s conflicts with -march= switch",
775 ptr->string);
777 insn_flags = sel->flags;
780 break;
783 if (sel->name == NULL)
784 error ("bad value (%s) for %s switch", ptr->string, ptr->name);
788 /* If the user did not specify a processor, choose one for them. */
789 if (insn_flags == 0)
791 const struct processors * sel;
792 unsigned int sought;
793 enum processor_type cpu;
795 cpu = TARGET_CPU_DEFAULT;
796 if (cpu == arm_none)
798 #ifdef SUBTARGET_CPU_DEFAULT
799 /* Use the subtarget default CPU if none was specified by
800 configure. */
801 cpu = SUBTARGET_CPU_DEFAULT;
802 #endif
803 /* Default to ARM6. */
804 if (cpu == arm_none)
805 cpu = arm6;
807 sel = &all_cores[cpu];
809 insn_flags = sel->flags;
811 /* Now check to see if the user has specified some command line
812 switch that require certain abilities from the cpu. */
813 sought = 0;
815 if (TARGET_INTERWORK || TARGET_THUMB)
817 sought |= (FL_THUMB | FL_MODE32);
819 /* There are no ARM processors that support both APCS-26 and
820 interworking. Therefore we force FL_MODE26 to be removed
821 from insn_flags here (if it was set), so that the search
822 below will always be able to find a compatible processor. */
823 insn_flags &= ~FL_MODE26;
826 if (sought != 0 && ((sought & insn_flags) != sought))
828 /* Try to locate a CPU type that supports all of the abilities
829 of the default CPU, plus the extra abilities requested by
830 the user. */
831 for (sel = all_cores; sel->name != NULL; sel++)
832 if ((sel->flags & sought) == (sought | insn_flags))
833 break;
835 if (sel->name == NULL)
837 unsigned current_bit_count = 0;
838 const struct processors * best_fit = NULL;
840 /* Ideally we would like to issue an error message here
841 saying that it was not possible to find a CPU compatible
842 with the default CPU, but which also supports the command
843 line options specified by the programmer, and so they
844 ought to use the -mcpu=<name> command line option to
845 override the default CPU type.
847 If we cannot find a cpu that has both the
848 characteristics of the default cpu and the given
849 command line options we scan the array again looking
850 for a best match. */
851 for (sel = all_cores; sel->name != NULL; sel++)
852 if ((sel->flags & sought) == sought)
854 unsigned count;
856 count = bit_count (sel->flags & insn_flags);
858 if (count >= current_bit_count)
860 best_fit = sel;
861 current_bit_count = count;
865 if (best_fit == NULL)
866 abort ();
867 else
868 sel = best_fit;
871 insn_flags = sel->flags;
873 sprintf (arm_arch_name, "__ARM_ARCH_%s__", sel->arch);
874 if (arm_tune == arm_none)
875 arm_tune = (enum processor_type) (sel - all_cores);
878 /* The processor for which we should tune should now have been
879 chosen. */
880 if (arm_tune == arm_none)
881 abort ();
883 tune_flags = all_cores[(int)arm_tune].flags;
884 if (optimize_size)
885 targetm.rtx_costs = arm_size_rtx_costs;
886 else
887 targetm.rtx_costs = all_cores[(int)arm_tune].rtx_costs;
889 /* Make sure that the processor choice does not conflict with any of the
890 other command line choices. */
891 if (TARGET_INTERWORK && !(insn_flags & FL_THUMB))
893 warning ("target CPU does not support interworking" );
894 target_flags &= ~ARM_FLAG_INTERWORK;
897 if (TARGET_THUMB && !(insn_flags & FL_THUMB))
899 warning ("target CPU does not support THUMB instructions");
900 target_flags &= ~ARM_FLAG_THUMB;
903 if (TARGET_APCS_FRAME && TARGET_THUMB)
905 /* warning ("ignoring -mapcs-frame because -mthumb was used"); */
906 target_flags &= ~ARM_FLAG_APCS_FRAME;
909 /* TARGET_BACKTRACE calls leaf_function_p, which causes a crash if done
910 from here where no function is being compiled currently. */
911 if ((target_flags & (THUMB_FLAG_LEAF_BACKTRACE | THUMB_FLAG_BACKTRACE))
912 && TARGET_ARM)
913 warning ("enabling backtrace support is only meaningful when compiling for the Thumb");
915 if (TARGET_ARM && TARGET_CALLEE_INTERWORKING)
916 warning ("enabling callee interworking support is only meaningful when compiling for the Thumb");
918 if (TARGET_ARM && TARGET_CALLER_INTERWORKING)
919 warning ("enabling caller interworking support is only meaningful when compiling for the Thumb");
921 if (TARGET_APCS_STACK && !TARGET_APCS_FRAME)
923 warning ("-mapcs-stack-check incompatible with -mno-apcs-frame");
924 target_flags |= ARM_FLAG_APCS_FRAME;
927 if (TARGET_POKE_FUNCTION_NAME)
928 target_flags |= ARM_FLAG_APCS_FRAME;
930 if (TARGET_APCS_REENT && flag_pic)
931 error ("-fpic and -mapcs-reent are incompatible");
933 if (TARGET_APCS_REENT)
934 warning ("APCS reentrant code not supported. Ignored");
936 /* If this target is normally configured to use APCS frames, warn if they
937 are turned off and debugging is turned on. */
938 if (TARGET_ARM
939 && write_symbols != NO_DEBUG
940 && !TARGET_APCS_FRAME
941 && (TARGET_DEFAULT & ARM_FLAG_APCS_FRAME))
942 warning ("-g with -mno-apcs-frame may not give sensible debugging");
944 /* If stack checking is disabled, we can use r10 as the PIC register,
945 which keeps r9 available. */
946 if (flag_pic)
947 arm_pic_register = TARGET_APCS_STACK ? 9 : 10;
949 if (TARGET_APCS_FLOAT)
950 warning ("passing floating point arguments in fp regs not yet supported");
952 /* Initialize boolean versions of the flags, for use in the arm.md file. */
953 arm_arch3m = (insn_flags & FL_ARCH3M) != 0;
954 arm_arch4 = (insn_flags & FL_ARCH4) != 0;
955 arm_arch4t = arm_arch4 & ((insn_flags & FL_THUMB) != 0);
956 arm_arch5 = (insn_flags & FL_ARCH5) != 0;
957 arm_arch5e = (insn_flags & FL_ARCH5E) != 0;
958 arm_arch6 = (insn_flags & FL_ARCH6) != 0;
959 arm_arch_xscale = (insn_flags & FL_XSCALE) != 0;
960 arm_arch_cirrus = (insn_flags & FL_CIRRUS) != 0;
962 arm_ld_sched = (tune_flags & FL_LDSCHED) != 0;
963 arm_is_strong = (tune_flags & FL_STRONG) != 0;
964 thumb_code = (TARGET_ARM == 0);
965 arm_is_6_or_7 = (((tune_flags & (FL_MODE26 | FL_MODE32))
966 && !(tune_flags & FL_ARCH4))) != 0;
967 arm_tune_xscale = (tune_flags & FL_XSCALE) != 0;
968 arm_arch_iwmmxt = (insn_flags & FL_IWMMXT) != 0;
970 /* V5 code we generate is completely interworking capable, so we turn off
971 TARGET_INTERWORK here to avoid many tests later on. */
973 /* XXX However, we must pass the right pre-processor defines to CPP
974 or GLD can get confused. This is a hack. */
975 if (TARGET_INTERWORK)
976 arm_cpp_interwork = 1;
978 if (arm_arch5)
979 target_flags &= ~ARM_FLAG_INTERWORK;
981 if (target_abi_name)
983 for (i = 0; i < ARRAY_SIZE (arm_all_abis); i++)
985 if (streq (arm_all_abis[i].name, target_abi_name))
987 arm_abi = arm_all_abis[i].abi_type;
988 break;
991 if (i == ARRAY_SIZE (arm_all_abis))
992 error ("invalid ABI option: -mabi=%s", target_abi_name);
994 else
995 arm_abi = ARM_DEFAULT_ABI;
997 if (TARGET_IWMMXT && !ARM_DOUBLEWORD_ALIGN)
998 error ("iwmmxt requires an AAPCS compatible ABI for proper operation");
1000 if (TARGET_IWMMXT_ABI && !TARGET_IWMMXT)
1001 error ("iwmmxt abi requires an iwmmxt capable cpu");
1003 arm_fp_model = ARM_FP_MODEL_UNKNOWN;
1004 if (target_fpu_name == NULL && target_fpe_name != NULL)
1006 if (streq (target_fpe_name, "2"))
1007 target_fpu_name = "fpe2";
1008 else if (streq (target_fpe_name, "3"))
1009 target_fpu_name = "fpe3";
1010 else
1011 error ("invalid floating point emulation option: -mfpe=%s",
1012 target_fpe_name);
1014 if (target_fpu_name != NULL)
1016 /* The user specified a FPU. */
1017 for (i = 0; i < ARRAY_SIZE (all_fpus); i++)
1019 if (streq (all_fpus[i].name, target_fpu_name))
1021 arm_fpu_arch = all_fpus[i].fpu;
1022 arm_fpu_tune = arm_fpu_arch;
1023 arm_fp_model = fp_model_for_fpu[arm_fpu_arch];
1024 break;
1027 if (arm_fp_model == ARM_FP_MODEL_UNKNOWN)
1028 error ("invalid floating point option: -mfpu=%s", target_fpu_name);
1030 else
1032 #ifdef FPUTYPE_DEFAULT
1033 /* Use the default if it is specified for this platform. */
1034 arm_fpu_arch = FPUTYPE_DEFAULT;
1035 arm_fpu_tune = FPUTYPE_DEFAULT;
1036 #else
1037 /* Pick one based on CPU type. */
1038 /* ??? Some targets assume FPA is the default.
1039 if ((insn_flags & FL_VFP) != 0)
1040 arm_fpu_arch = FPUTYPE_VFP;
1041 else
1043 if (arm_arch_cirrus)
1044 arm_fpu_arch = FPUTYPE_MAVERICK;
1045 else
1046 arm_fpu_arch = FPUTYPE_FPA_EMU2;
1047 #endif
1048 if (tune_flags & FL_CO_PROC && arm_fpu_arch == FPUTYPE_FPA_EMU2)
1049 arm_fpu_tune = FPUTYPE_FPA;
1050 else
1051 arm_fpu_tune = arm_fpu_arch;
1052 arm_fp_model = fp_model_for_fpu[arm_fpu_arch];
1053 if (arm_fp_model == ARM_FP_MODEL_UNKNOWN)
1054 abort ();
1057 if (target_float_abi_name != NULL)
1059 /* The user specified a FP ABI. */
1060 for (i = 0; i < ARRAY_SIZE (all_float_abis); i++)
1062 if (streq (all_float_abis[i].name, target_float_abi_name))
1064 arm_float_abi = all_float_abis[i].abi_type;
1065 break;
1068 if (i == ARRAY_SIZE (all_float_abis))
1069 error ("invalid floating point abi: -mfloat-abi=%s",
1070 target_float_abi_name);
1072 else
1074 /* Use soft-float target flag. */
1075 if (target_flags & ARM_FLAG_SOFT_FLOAT)
1076 arm_float_abi = ARM_FLOAT_ABI_SOFT;
1077 else
1078 arm_float_abi = ARM_FLOAT_ABI_HARD;
1081 if (arm_float_abi == ARM_FLOAT_ABI_HARD && TARGET_VFP)
1082 sorry ("-mfloat-abi=hard and VFP");
1084 /* If soft-float is specified then don't use FPU. */
1085 if (TARGET_SOFT_FLOAT)
1086 arm_fpu_arch = FPUTYPE_NONE;
1088 /* For arm2/3 there is no need to do any scheduling if there is only
1089 a floating point emulator, or we are doing software floating-point. */
1090 if ((TARGET_SOFT_FLOAT
1091 || arm_fpu_tune == FPUTYPE_FPA_EMU2
1092 || arm_fpu_tune == FPUTYPE_FPA_EMU3)
1093 && (tune_flags & FL_MODE32) == 0)
1094 flag_schedule_insns = flag_schedule_insns_after_reload = 0;
1096 /* Override the default structure alignment for AAPCS ABI. */
1097 if (arm_abi == ARM_ABI_AAPCS)
1098 arm_structure_size_boundary = 8;
1100 if (structure_size_string != NULL)
1102 int size = strtol (structure_size_string, NULL, 0);
1104 if (size == 8 || size == 32
1105 || (ARM_DOUBLEWORD_ALIGN && size == 64))
1106 arm_structure_size_boundary = size;
1107 else
1108 warning ("structure size boundary can only be set to %s",
1109 ARM_DOUBLEWORD_ALIGN ? "8, 32 or 64": "8 or 32");
1112 if (arm_pic_register_string != NULL)
1114 int pic_register = decode_reg_name (arm_pic_register_string);
1116 if (!flag_pic)
1117 warning ("-mpic-register= is useless without -fpic");
1119 /* Prevent the user from choosing an obviously stupid PIC register. */
1120 else if (pic_register < 0 || call_used_regs[pic_register]
1121 || pic_register == HARD_FRAME_POINTER_REGNUM
1122 || pic_register == STACK_POINTER_REGNUM
1123 || pic_register >= PC_REGNUM)
1124 error ("unable to use '%s' for PIC register", arm_pic_register_string);
1125 else
1126 arm_pic_register = pic_register;
1129 if (TARGET_THUMB && flag_schedule_insns)
1131 /* Don't warn since it's on by default in -O2. */
1132 flag_schedule_insns = 0;
1135 if (optimize_size)
1137 /* There's some dispute as to whether this should be 1 or 2. However,
1138 experiments seem to show that in pathological cases a setting of
1139 1 degrades less severely than a setting of 2. This could change if
1140 other parts of the compiler change their behavior. */
1141 arm_constant_limit = 1;
1143 /* If optimizing for size, bump the number of instructions that we
1144 are prepared to conditionally execute (even on a StrongARM). */
1145 max_insns_skipped = 6;
1147 else
1149 /* For processors with load scheduling, it never costs more than
1150 2 cycles to load a constant, and the load scheduler may well
1151 reduce that to 1. */
1152 if (tune_flags & FL_LDSCHED)
1153 arm_constant_limit = 1;
1155 /* On XScale the longer latency of a load makes it more difficult
1156 to achieve a good schedule, so it's faster to synthesize
1157 constants that can be done in two insns. */
1158 if (arm_tune_xscale)
1159 arm_constant_limit = 2;
1161 /* StrongARM has early execution of branches, so a sequence
1162 that is worth skipping is shorter. */
1163 if (arm_is_strong)
1164 max_insns_skipped = 3;
1167 /* Register global variables with the garbage collector. */
1168 arm_add_gc_roots ();
1171 static void
1172 arm_add_gc_roots (void)
1174 gcc_obstack_init(&minipool_obstack);
1175 minipool_startobj = (char *) obstack_alloc (&minipool_obstack, 0);
1178 /* A table of known ARM exception types.
1179 For use with the interrupt function attribute. */
1181 typedef struct
1183 const char *const arg;
1184 const unsigned long return_value;
1186 isr_attribute_arg;
1188 static const isr_attribute_arg isr_attribute_args [] =
1190 { "IRQ", ARM_FT_ISR },
1191 { "irq", ARM_FT_ISR },
1192 { "FIQ", ARM_FT_FIQ },
1193 { "fiq", ARM_FT_FIQ },
1194 { "ABORT", ARM_FT_ISR },
1195 { "abort", ARM_FT_ISR },
1196 { "ABORT", ARM_FT_ISR },
1197 { "abort", ARM_FT_ISR },
1198 { "UNDEF", ARM_FT_EXCEPTION },
1199 { "undef", ARM_FT_EXCEPTION },
1200 { "SWI", ARM_FT_EXCEPTION },
1201 { "swi", ARM_FT_EXCEPTION },
1202 { NULL, ARM_FT_NORMAL }
1205 /* Returns the (interrupt) function type of the current
1206 function, or ARM_FT_UNKNOWN if the type cannot be determined. */
1208 static unsigned long
1209 arm_isr_value (tree argument)
1211 const isr_attribute_arg * ptr;
1212 const char * arg;
1214 /* No argument - default to IRQ. */
1215 if (argument == NULL_TREE)
1216 return ARM_FT_ISR;
1218 /* Get the value of the argument. */
1219 if (TREE_VALUE (argument) == NULL_TREE
1220 || TREE_CODE (TREE_VALUE (argument)) != STRING_CST)
1221 return ARM_FT_UNKNOWN;
1223 arg = TREE_STRING_POINTER (TREE_VALUE (argument));
1225 /* Check it against the list of known arguments. */
1226 for (ptr = isr_attribute_args; ptr->arg != NULL; ptr++)
1227 if (streq (arg, ptr->arg))
1228 return ptr->return_value;
1230 /* An unrecognized interrupt type. */
1231 return ARM_FT_UNKNOWN;
1234 /* Computes the type of the current function. */
1236 static unsigned long
1237 arm_compute_func_type (void)
1239 unsigned long type = ARM_FT_UNKNOWN;
1240 tree a;
1241 tree attr;
1243 if (TREE_CODE (current_function_decl) != FUNCTION_DECL)
1244 abort ();
1246 /* Decide if the current function is volatile. Such functions
1247 never return, and many memory cycles can be saved by not storing
1248 register values that will never be needed again. This optimization
1249 was added to speed up context switching in a kernel application. */
1250 if (optimize > 0
1251 && TREE_NOTHROW (current_function_decl)
1252 && TREE_THIS_VOLATILE (current_function_decl))
1253 type |= ARM_FT_VOLATILE;
1255 if (cfun->static_chain_decl != NULL)
1256 type |= ARM_FT_NESTED;
1258 attr = DECL_ATTRIBUTES (current_function_decl);
1260 a = lookup_attribute ("naked", attr);
1261 if (a != NULL_TREE)
1262 type |= ARM_FT_NAKED;
1264 a = lookup_attribute ("isr", attr);
1265 if (a == NULL_TREE)
1266 a = lookup_attribute ("interrupt", attr);
1268 if (a == NULL_TREE)
1269 type |= TARGET_INTERWORK ? ARM_FT_INTERWORKED : ARM_FT_NORMAL;
1270 else
1271 type |= arm_isr_value (TREE_VALUE (a));
1273 return type;
1276 /* Returns the type of the current function. */
1278 unsigned long
1279 arm_current_func_type (void)
1281 if (ARM_FUNC_TYPE (cfun->machine->func_type) == ARM_FT_UNKNOWN)
1282 cfun->machine->func_type = arm_compute_func_type ();
1284 return cfun->machine->func_type;
1287 /* Return 1 if it is possible to return using a single instruction.
1288 If SIBLING is non-null, this is a test for a return before a sibling
1289 call. SIBLING is the call insn, so we can examine its register usage. */
1292 use_return_insn (int iscond, rtx sibling)
1294 int regno;
1295 unsigned int func_type;
1296 unsigned long saved_int_regs;
1297 unsigned HOST_WIDE_INT stack_adjust;
1298 arm_stack_offsets *offsets;
1300 /* Never use a return instruction before reload has run. */
1301 if (!reload_completed)
1302 return 0;
1304 func_type = arm_current_func_type ();
1306 /* Naked functions and volatile functions need special
1307 consideration. */
1308 if (func_type & (ARM_FT_VOLATILE | ARM_FT_NAKED))
1309 return 0;
1311 /* So do interrupt functions that use the frame pointer. */
1312 if (IS_INTERRUPT (func_type) && frame_pointer_needed)
1313 return 0;
1315 offsets = arm_get_frame_offsets ();
1316 stack_adjust = offsets->outgoing_args - offsets->saved_regs;
1318 /* As do variadic functions. */
1319 if (current_function_pretend_args_size
1320 || cfun->machine->uses_anonymous_args
1321 /* Or if the function calls __builtin_eh_return () */
1322 || current_function_calls_eh_return
1323 /* Or if the function calls alloca */
1324 || current_function_calls_alloca
1325 /* Or if there is a stack adjustment. However, if the stack pointer
1326 is saved on the stack, we can use a pre-incrementing stack load. */
1327 || !(stack_adjust == 0 || (frame_pointer_needed && stack_adjust == 4)))
1328 return 0;
1330 saved_int_regs = arm_compute_save_reg_mask ();
1332 /* Unfortunately, the insn
1334 ldmib sp, {..., sp, ...}
1336 triggers a bug on most SA-110 based devices, such that the stack
1337 pointer won't be correctly restored if the instruction takes a
1338 page fault. We work around this problem by popping r3 along with
1339 the other registers, since that is never slower than executing
1340 another instruction.
1342 We test for !arm_arch5 here, because code for any architecture
1343 less than this could potentially be run on one of the buggy
1344 chips. */
1345 if (stack_adjust == 4 && !arm_arch5)
1347 /* Validate that r3 is a call-clobbered register (always true in
1348 the default abi) ... */
1349 if (!call_used_regs[3])
1350 return 0;
1352 /* ... that it isn't being used for a return value (always true
1353 until we implement return-in-regs), or for a tail-call
1354 argument ... */
1355 if (sibling)
1357 if (GET_CODE (sibling) != CALL_INSN)
1358 abort ();
1360 if (find_regno_fusage (sibling, USE, 3))
1361 return 0;
1364 /* ... and that there are no call-saved registers in r0-r2
1365 (always true in the default ABI). */
1366 if (saved_int_regs & 0x7)
1367 return 0;
1370 /* Can't be done if interworking with Thumb, and any registers have been
1371 stacked. */
1372 if (TARGET_INTERWORK && saved_int_regs != 0)
1373 return 0;
1375 /* On StrongARM, conditional returns are expensive if they aren't
1376 taken and multiple registers have been stacked. */
1377 if (iscond && arm_is_strong)
1379 /* Conditional return when just the LR is stored is a simple
1380 conditional-load instruction, that's not expensive. */
1381 if (saved_int_regs != 0 && saved_int_regs != (1 << LR_REGNUM))
1382 return 0;
1384 if (flag_pic && regs_ever_live[PIC_OFFSET_TABLE_REGNUM])
1385 return 0;
1388 /* If there are saved registers but the LR isn't saved, then we need
1389 two instructions for the return. */
1390 if (saved_int_regs && !(saved_int_regs & (1 << LR_REGNUM)))
1391 return 0;
1393 /* Can't be done if any of the FPA regs are pushed,
1394 since this also requires an insn. */
1395 if (TARGET_HARD_FLOAT && TARGET_FPA)
1396 for (regno = FIRST_FPA_REGNUM; regno <= LAST_FPA_REGNUM; regno++)
1397 if (regs_ever_live[regno] && !call_used_regs[regno])
1398 return 0;
1400 /* Likewise VFP regs. */
1401 if (TARGET_HARD_FLOAT && TARGET_VFP)
1402 for (regno = FIRST_VFP_REGNUM; regno <= LAST_VFP_REGNUM; regno++)
1403 if (regs_ever_live[regno] && !call_used_regs[regno])
1404 return 0;
1406 if (TARGET_REALLY_IWMMXT)
1407 for (regno = FIRST_IWMMXT_REGNUM; regno <= LAST_IWMMXT_REGNUM; regno++)
1408 if (regs_ever_live[regno] && ! call_used_regs [regno])
1409 return 0;
1411 return 1;
1414 /* Return TRUE if int I is a valid immediate ARM constant. */
1417 const_ok_for_arm (HOST_WIDE_INT i)
1419 unsigned HOST_WIDE_INT mask = ~(unsigned HOST_WIDE_INT)0xFF;
1421 /* For machines with >32 bit HOST_WIDE_INT, the bits above bit 31 must
1422 be all zero, or all one. */
1423 if ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff) != 0
1424 && ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff)
1425 != ((~(unsigned HOST_WIDE_INT) 0)
1426 & ~(unsigned HOST_WIDE_INT) 0xffffffff)))
1427 return FALSE;
1429 /* Fast return for 0 and powers of 2 */
1430 if ((i & (i - 1)) == 0)
1431 return TRUE;
1435 if ((i & mask & (unsigned HOST_WIDE_INT) 0xffffffff) == 0)
1436 return TRUE;
1437 mask =
1438 (mask << 2) | ((mask & (unsigned HOST_WIDE_INT) 0xffffffff)
1439 >> (32 - 2)) | ~(unsigned HOST_WIDE_INT) 0xffffffff;
1441 while (mask != ~(unsigned HOST_WIDE_INT) 0xFF);
1443 return FALSE;
1446 /* Return true if I is a valid constant for the operation CODE. */
1447 static int
1448 const_ok_for_op (HOST_WIDE_INT i, enum rtx_code code)
1450 if (const_ok_for_arm (i))
1451 return 1;
1453 switch (code)
1455 case PLUS:
1456 return const_ok_for_arm (ARM_SIGN_EXTEND (-i));
1458 case MINUS: /* Should only occur with (MINUS I reg) => rsb */
1459 case XOR:
1460 case IOR:
1461 return 0;
1463 case AND:
1464 return const_ok_for_arm (ARM_SIGN_EXTEND (~i));
1466 default:
1467 abort ();
1471 /* Emit a sequence of insns to handle a large constant.
1472 CODE is the code of the operation required, it can be any of SET, PLUS,
1473 IOR, AND, XOR, MINUS;
1474 MODE is the mode in which the operation is being performed;
1475 VAL is the integer to operate on;
1476 SOURCE is the other operand (a register, or a null-pointer for SET);
1477 SUBTARGETS means it is safe to create scratch registers if that will
1478 either produce a simpler sequence, or we will want to cse the values.
1479 Return value is the number of insns emitted. */
1482 arm_split_constant (enum rtx_code code, enum machine_mode mode, rtx insn,
1483 HOST_WIDE_INT val, rtx target, rtx source, int subtargets)
1485 rtx cond;
1487 if (insn && GET_CODE (PATTERN (insn)) == COND_EXEC)
1488 cond = COND_EXEC_TEST (PATTERN (insn));
1489 else
1490 cond = NULL_RTX;
1492 if (subtargets || code == SET
1493 || (GET_CODE (target) == REG && GET_CODE (source) == REG
1494 && REGNO (target) != REGNO (source)))
1496 /* After arm_reorg has been called, we can't fix up expensive
1497 constants by pushing them into memory so we must synthesize
1498 them in-line, regardless of the cost. This is only likely to
1499 be more costly on chips that have load delay slots and we are
1500 compiling without running the scheduler (so no splitting
1501 occurred before the final instruction emission).
1503 Ref: gcc -O1 -mcpu=strongarm gcc.c-torture/compile/980506-2.c
1505 if (!after_arm_reorg
1506 && !cond
1507 && (arm_gen_constant (code, mode, NULL_RTX, val, target, source,
1508 1, 0)
1509 > arm_constant_limit + (code != SET)))
1511 if (code == SET)
1513 /* Currently SET is the only monadic value for CODE, all
1514 the rest are diadic. */
1515 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (val)));
1516 return 1;
1518 else
1520 rtx temp = subtargets ? gen_reg_rtx (mode) : target;
1522 emit_insn (gen_rtx_SET (VOIDmode, temp, GEN_INT (val)));
1523 /* For MINUS, the value is subtracted from, since we never
1524 have subtraction of a constant. */
1525 if (code == MINUS)
1526 emit_insn (gen_rtx_SET (VOIDmode, target,
1527 gen_rtx_MINUS (mode, temp, source)));
1528 else
1529 emit_insn (gen_rtx_SET (VOIDmode, target,
1530 gen_rtx_fmt_ee (code, mode, source, temp)));
1531 return 2;
1536 return arm_gen_constant (code, mode, cond, val, target, source, subtargets,
1540 static int
1541 count_insns_for_constant (HOST_WIDE_INT remainder, int i)
1543 HOST_WIDE_INT temp1;
1544 int num_insns = 0;
1547 int end;
1549 if (i <= 0)
1550 i += 32;
1551 if (remainder & (3 << (i - 2)))
1553 end = i - 8;
1554 if (end < 0)
1555 end += 32;
1556 temp1 = remainder & ((0x0ff << end)
1557 | ((i < end) ? (0xff >> (32 - end)) : 0));
1558 remainder &= ~temp1;
1559 num_insns++;
1560 i -= 6;
1562 i -= 2;
1563 } while (remainder);
1564 return num_insns;
1567 /* Emit an instruction with the indicated PATTERN. If COND is
1568 non-NULL, conditionalize the execution of the instruction on COND
1569 being true. */
1571 static void
1572 emit_constant_insn (rtx cond, rtx pattern)
1574 if (cond)
1575 pattern = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (cond), pattern);
1576 emit_insn (pattern);
1579 /* As above, but extra parameter GENERATE which, if clear, suppresses
1580 RTL generation. */
1582 static int
1583 arm_gen_constant (enum rtx_code code, enum machine_mode mode, rtx cond,
1584 HOST_WIDE_INT val, rtx target, rtx source, int subtargets,
1585 int generate)
1587 int can_invert = 0;
1588 int can_negate = 0;
1589 int can_negate_initial = 0;
1590 int can_shift = 0;
1591 int i;
1592 int num_bits_set = 0;
1593 int set_sign_bit_copies = 0;
1594 int clear_sign_bit_copies = 0;
1595 int clear_zero_bit_copies = 0;
1596 int set_zero_bit_copies = 0;
1597 int insns = 0;
1598 unsigned HOST_WIDE_INT temp1, temp2;
1599 unsigned HOST_WIDE_INT remainder = val & 0xffffffff;
1601 /* Find out which operations are safe for a given CODE. Also do a quick
1602 check for degenerate cases; these can occur when DImode operations
1603 are split. */
1604 switch (code)
1606 case SET:
1607 can_invert = 1;
1608 can_shift = 1;
1609 can_negate = 1;
1610 break;
1612 case PLUS:
1613 can_negate = 1;
1614 can_negate_initial = 1;
1615 break;
1617 case IOR:
1618 if (remainder == 0xffffffff)
1620 if (generate)
1621 emit_constant_insn (cond,
1622 gen_rtx_SET (VOIDmode, target,
1623 GEN_INT (ARM_SIGN_EXTEND (val))));
1624 return 1;
1626 if (remainder == 0)
1628 if (reload_completed && rtx_equal_p (target, source))
1629 return 0;
1630 if (generate)
1631 emit_constant_insn (cond,
1632 gen_rtx_SET (VOIDmode, target, source));
1633 return 1;
1635 break;
1637 case AND:
1638 if (remainder == 0)
1640 if (generate)
1641 emit_constant_insn (cond,
1642 gen_rtx_SET (VOIDmode, target, const0_rtx));
1643 return 1;
1645 if (remainder == 0xffffffff)
1647 if (reload_completed && rtx_equal_p (target, source))
1648 return 0;
1649 if (generate)
1650 emit_constant_insn (cond,
1651 gen_rtx_SET (VOIDmode, target, source));
1652 return 1;
1654 can_invert = 1;
1655 break;
1657 case XOR:
1658 if (remainder == 0)
1660 if (reload_completed && rtx_equal_p (target, source))
1661 return 0;
1662 if (generate)
1663 emit_constant_insn (cond,
1664 gen_rtx_SET (VOIDmode, target, source));
1665 return 1;
1667 if (remainder == 0xffffffff)
1669 if (generate)
1670 emit_constant_insn (cond,
1671 gen_rtx_SET (VOIDmode, target,
1672 gen_rtx_NOT (mode, source)));
1673 return 1;
1676 /* We don't know how to handle this yet below. */
1677 abort ();
1679 case MINUS:
1680 /* We treat MINUS as (val - source), since (source - val) is always
1681 passed as (source + (-val)). */
1682 if (remainder == 0)
1684 if (generate)
1685 emit_constant_insn (cond,
1686 gen_rtx_SET (VOIDmode, target,
1687 gen_rtx_NEG (mode, source)));
1688 return 1;
1690 if (const_ok_for_arm (val))
1692 if (generate)
1693 emit_constant_insn (cond,
1694 gen_rtx_SET (VOIDmode, target,
1695 gen_rtx_MINUS (mode, GEN_INT (val),
1696 source)));
1697 return 1;
1699 can_negate = 1;
1701 break;
1703 default:
1704 abort ();
1707 /* If we can do it in one insn get out quickly. */
1708 if (const_ok_for_arm (val)
1709 || (can_negate_initial && const_ok_for_arm (-val))
1710 || (can_invert && const_ok_for_arm (~val)))
1712 if (generate)
1713 emit_constant_insn (cond,
1714 gen_rtx_SET (VOIDmode, target,
1715 (source
1716 ? gen_rtx_fmt_ee (code, mode, source,
1717 GEN_INT (val))
1718 : GEN_INT (val))));
1719 return 1;
1722 /* Calculate a few attributes that may be useful for specific
1723 optimizations. */
1724 for (i = 31; i >= 0; i--)
1726 if ((remainder & (1 << i)) == 0)
1727 clear_sign_bit_copies++;
1728 else
1729 break;
1732 for (i = 31; i >= 0; i--)
1734 if ((remainder & (1 << i)) != 0)
1735 set_sign_bit_copies++;
1736 else
1737 break;
1740 for (i = 0; i <= 31; i++)
1742 if ((remainder & (1 << i)) == 0)
1743 clear_zero_bit_copies++;
1744 else
1745 break;
1748 for (i = 0; i <= 31; i++)
1750 if ((remainder & (1 << i)) != 0)
1751 set_zero_bit_copies++;
1752 else
1753 break;
1756 switch (code)
1758 case SET:
1759 /* See if we can do this by sign_extending a constant that is known
1760 to be negative. This is a good, way of doing it, since the shift
1761 may well merge into a subsequent insn. */
1762 if (set_sign_bit_copies > 1)
1764 if (const_ok_for_arm
1765 (temp1 = ARM_SIGN_EXTEND (remainder
1766 << (set_sign_bit_copies - 1))))
1768 if (generate)
1770 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1771 emit_constant_insn (cond,
1772 gen_rtx_SET (VOIDmode, new_src,
1773 GEN_INT (temp1)));
1774 emit_constant_insn (cond,
1775 gen_ashrsi3 (target, new_src,
1776 GEN_INT (set_sign_bit_copies - 1)));
1778 return 2;
1780 /* For an inverted constant, we will need to set the low bits,
1781 these will be shifted out of harm's way. */
1782 temp1 |= (1 << (set_sign_bit_copies - 1)) - 1;
1783 if (const_ok_for_arm (~temp1))
1785 if (generate)
1787 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1788 emit_constant_insn (cond,
1789 gen_rtx_SET (VOIDmode, new_src,
1790 GEN_INT (temp1)));
1791 emit_constant_insn (cond,
1792 gen_ashrsi3 (target, new_src,
1793 GEN_INT (set_sign_bit_copies - 1)));
1795 return 2;
1799 /* See if we can generate this by setting the bottom (or the top)
1800 16 bits, and then shifting these into the other half of the
1801 word. We only look for the simplest cases, to do more would cost
1802 too much. Be careful, however, not to generate this when the
1803 alternative would take fewer insns. */
1804 if (val & 0xffff0000)
1806 temp1 = remainder & 0xffff0000;
1807 temp2 = remainder & 0x0000ffff;
1809 /* Overlaps outside this range are best done using other methods. */
1810 for (i = 9; i < 24; i++)
1812 if ((((temp2 | (temp2 << i)) & 0xffffffff) == remainder)
1813 && !const_ok_for_arm (temp2))
1815 rtx new_src = (subtargets
1816 ? (generate ? gen_reg_rtx (mode) : NULL_RTX)
1817 : target);
1818 insns = arm_gen_constant (code, mode, cond, temp2, new_src,
1819 source, subtargets, generate);
1820 source = new_src;
1821 if (generate)
1822 emit_constant_insn
1823 (cond,
1824 gen_rtx_SET
1825 (VOIDmode, target,
1826 gen_rtx_IOR (mode,
1827 gen_rtx_ASHIFT (mode, source,
1828 GEN_INT (i)),
1829 source)));
1830 return insns + 1;
1834 /* Don't duplicate cases already considered. */
1835 for (i = 17; i < 24; i++)
1837 if (((temp1 | (temp1 >> i)) == remainder)
1838 && !const_ok_for_arm (temp1))
1840 rtx new_src = (subtargets
1841 ? (generate ? gen_reg_rtx (mode) : NULL_RTX)
1842 : target);
1843 insns = arm_gen_constant (code, mode, cond, temp1, new_src,
1844 source, subtargets, generate);
1845 source = new_src;
1846 if (generate)
1847 emit_constant_insn
1848 (cond,
1849 gen_rtx_SET (VOIDmode, target,
1850 gen_rtx_IOR
1851 (mode,
1852 gen_rtx_LSHIFTRT (mode, source,
1853 GEN_INT (i)),
1854 source)));
1855 return insns + 1;
1859 break;
1861 case IOR:
1862 case XOR:
1863 /* If we have IOR or XOR, and the constant can be loaded in a
1864 single instruction, and we can find a temporary to put it in,
1865 then this can be done in two instructions instead of 3-4. */
1866 if (subtargets
1867 /* TARGET can't be NULL if SUBTARGETS is 0 */
1868 || (reload_completed && !reg_mentioned_p (target, source)))
1870 if (const_ok_for_arm (ARM_SIGN_EXTEND (~val)))
1872 if (generate)
1874 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1876 emit_constant_insn (cond,
1877 gen_rtx_SET (VOIDmode, sub,
1878 GEN_INT (val)));
1879 emit_constant_insn (cond,
1880 gen_rtx_SET (VOIDmode, target,
1881 gen_rtx_fmt_ee (code, mode,
1882 source, sub)));
1884 return 2;
1888 if (code == XOR)
1889 break;
1891 if (set_sign_bit_copies > 8
1892 && (val & (-1 << (32 - set_sign_bit_copies))) == val)
1894 if (generate)
1896 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1897 rtx shift = GEN_INT (set_sign_bit_copies);
1899 emit_constant_insn
1900 (cond,
1901 gen_rtx_SET (VOIDmode, sub,
1902 gen_rtx_NOT (mode,
1903 gen_rtx_ASHIFT (mode,
1904 source,
1905 shift))));
1906 emit_constant_insn
1907 (cond,
1908 gen_rtx_SET (VOIDmode, target,
1909 gen_rtx_NOT (mode,
1910 gen_rtx_LSHIFTRT (mode, sub,
1911 shift))));
1913 return 2;
1916 if (set_zero_bit_copies > 8
1917 && (remainder & ((1 << set_zero_bit_copies) - 1)) == remainder)
1919 if (generate)
1921 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1922 rtx shift = GEN_INT (set_zero_bit_copies);
1924 emit_constant_insn
1925 (cond,
1926 gen_rtx_SET (VOIDmode, sub,
1927 gen_rtx_NOT (mode,
1928 gen_rtx_LSHIFTRT (mode,
1929 source,
1930 shift))));
1931 emit_constant_insn
1932 (cond,
1933 gen_rtx_SET (VOIDmode, target,
1934 gen_rtx_NOT (mode,
1935 gen_rtx_ASHIFT (mode, sub,
1936 shift))));
1938 return 2;
1941 if (const_ok_for_arm (temp1 = ARM_SIGN_EXTEND (~val)))
1943 if (generate)
1945 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1946 emit_constant_insn (cond,
1947 gen_rtx_SET (VOIDmode, sub,
1948 gen_rtx_NOT (mode, source)));
1949 source = sub;
1950 if (subtargets)
1951 sub = gen_reg_rtx (mode);
1952 emit_constant_insn (cond,
1953 gen_rtx_SET (VOIDmode, sub,
1954 gen_rtx_AND (mode, source,
1955 GEN_INT (temp1))));
1956 emit_constant_insn (cond,
1957 gen_rtx_SET (VOIDmode, target,
1958 gen_rtx_NOT (mode, sub)));
1960 return 3;
1962 break;
1964 case AND:
1965 /* See if two shifts will do 2 or more insn's worth of work. */
1966 if (clear_sign_bit_copies >= 16 && clear_sign_bit_copies < 24)
1968 HOST_WIDE_INT shift_mask = ((0xffffffff
1969 << (32 - clear_sign_bit_copies))
1970 & 0xffffffff);
1972 if ((remainder | shift_mask) != 0xffffffff)
1974 if (generate)
1976 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1977 insns = arm_gen_constant (AND, mode, cond,
1978 remainder | shift_mask,
1979 new_src, source, subtargets, 1);
1980 source = new_src;
1982 else
1984 rtx targ = subtargets ? NULL_RTX : target;
1985 insns = arm_gen_constant (AND, mode, cond,
1986 remainder | shift_mask,
1987 targ, source, subtargets, 0);
1991 if (generate)
1993 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1994 rtx shift = GEN_INT (clear_sign_bit_copies);
1996 emit_insn (gen_ashlsi3 (new_src, source, shift));
1997 emit_insn (gen_lshrsi3 (target, new_src, shift));
2000 return insns + 2;
2003 if (clear_zero_bit_copies >= 16 && clear_zero_bit_copies < 24)
2005 HOST_WIDE_INT shift_mask = (1 << clear_zero_bit_copies) - 1;
2007 if ((remainder | shift_mask) != 0xffffffff)
2009 if (generate)
2011 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2013 insns = arm_gen_constant (AND, mode, cond,
2014 remainder | shift_mask,
2015 new_src, source, subtargets, 1);
2016 source = new_src;
2018 else
2020 rtx targ = subtargets ? NULL_RTX : target;
2022 insns = arm_gen_constant (AND, mode, cond,
2023 remainder | shift_mask,
2024 targ, source, subtargets, 0);
2028 if (generate)
2030 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2031 rtx shift = GEN_INT (clear_zero_bit_copies);
2033 emit_insn (gen_lshrsi3 (new_src, source, shift));
2034 emit_insn (gen_ashlsi3 (target, new_src, shift));
2037 return insns + 2;
2040 break;
2042 default:
2043 break;
2046 for (i = 0; i < 32; i++)
2047 if (remainder & (1 << i))
2048 num_bits_set++;
2050 if (code == AND || (can_invert && num_bits_set > 16))
2051 remainder = (~remainder) & 0xffffffff;
2052 else if (code == PLUS && num_bits_set > 16)
2053 remainder = (-remainder) & 0xffffffff;
2054 else
2056 can_invert = 0;
2057 can_negate = 0;
2060 /* Now try and find a way of doing the job in either two or three
2061 instructions.
2062 We start by looking for the largest block of zeros that are aligned on
2063 a 2-bit boundary, we then fill up the temps, wrapping around to the
2064 top of the word when we drop off the bottom.
2065 In the worst case this code should produce no more than four insns. */
2067 int best_start = 0;
2068 int best_consecutive_zeros = 0;
2070 for (i = 0; i < 32; i += 2)
2072 int consecutive_zeros = 0;
2074 if (!(remainder & (3 << i)))
2076 while ((i < 32) && !(remainder & (3 << i)))
2078 consecutive_zeros += 2;
2079 i += 2;
2081 if (consecutive_zeros > best_consecutive_zeros)
2083 best_consecutive_zeros = consecutive_zeros;
2084 best_start = i - consecutive_zeros;
2086 i -= 2;
2090 /* So long as it won't require any more insns to do so, it's
2091 desirable to emit a small constant (in bits 0...9) in the last
2092 insn. This way there is more chance that it can be combined with
2093 a later addressing insn to form a pre-indexed load or store
2094 operation. Consider:
2096 *((volatile int *)0xe0000100) = 1;
2097 *((volatile int *)0xe0000110) = 2;
2099 We want this to wind up as:
2101 mov rA, #0xe0000000
2102 mov rB, #1
2103 str rB, [rA, #0x100]
2104 mov rB, #2
2105 str rB, [rA, #0x110]
2107 rather than having to synthesize both large constants from scratch.
2109 Therefore, we calculate how many insns would be required to emit
2110 the constant starting from `best_start', and also starting from
2111 zero (i.e. with bit 31 first to be output). If `best_start' doesn't
2112 yield a shorter sequence, we may as well use zero. */
2113 if (best_start != 0
2114 && ((((unsigned HOST_WIDE_INT) 1) << best_start) < remainder)
2115 && (count_insns_for_constant (remainder, 0) <=
2116 count_insns_for_constant (remainder, best_start)))
2117 best_start = 0;
2119 /* Now start emitting the insns. */
2120 i = best_start;
2123 int end;
2125 if (i <= 0)
2126 i += 32;
2127 if (remainder & (3 << (i - 2)))
2129 end = i - 8;
2130 if (end < 0)
2131 end += 32;
2132 temp1 = remainder & ((0x0ff << end)
2133 | ((i < end) ? (0xff >> (32 - end)) : 0));
2134 remainder &= ~temp1;
2136 if (generate)
2138 rtx new_src, temp1_rtx;
2140 if (code == SET || code == MINUS)
2142 new_src = (subtargets ? gen_reg_rtx (mode) : target);
2143 if (can_invert && code != MINUS)
2144 temp1 = ~temp1;
2146 else
2148 if (remainder && subtargets)
2149 new_src = gen_reg_rtx (mode);
2150 else
2151 new_src = target;
2152 if (can_invert)
2153 temp1 = ~temp1;
2154 else if (can_negate)
2155 temp1 = -temp1;
2158 temp1 = trunc_int_for_mode (temp1, mode);
2159 temp1_rtx = GEN_INT (temp1);
2161 if (code == SET)
2163 else if (code == MINUS)
2164 temp1_rtx = gen_rtx_MINUS (mode, temp1_rtx, source);
2165 else
2166 temp1_rtx = gen_rtx_fmt_ee (code, mode, source, temp1_rtx);
2168 emit_constant_insn (cond,
2169 gen_rtx_SET (VOIDmode, new_src,
2170 temp1_rtx));
2171 source = new_src;
2174 if (code == SET)
2176 can_invert = 0;
2177 code = PLUS;
2179 else if (code == MINUS)
2180 code = PLUS;
2182 insns++;
2183 i -= 6;
2185 i -= 2;
2187 while (remainder);
2190 return insns;
2193 /* Canonicalize a comparison so that we are more likely to recognize it.
2194 This can be done for a few constant compares, where we can make the
2195 immediate value easier to load. */
2197 enum rtx_code
2198 arm_canonicalize_comparison (enum rtx_code code, rtx * op1)
2200 unsigned HOST_WIDE_INT i = INTVAL (*op1);
2202 switch (code)
2204 case EQ:
2205 case NE:
2206 return code;
2208 case GT:
2209 case LE:
2210 if (i != ((((unsigned HOST_WIDE_INT) 1) << (HOST_BITS_PER_WIDE_INT - 1)) - 1)
2211 && (const_ok_for_arm (i + 1) || const_ok_for_arm (-(i + 1))))
2213 *op1 = GEN_INT (i + 1);
2214 return code == GT ? GE : LT;
2216 break;
2218 case GE:
2219 case LT:
2220 if (i != (((unsigned HOST_WIDE_INT) 1) << (HOST_BITS_PER_WIDE_INT - 1))
2221 && (const_ok_for_arm (i - 1) || const_ok_for_arm (-(i - 1))))
2223 *op1 = GEN_INT (i - 1);
2224 return code == GE ? GT : LE;
2226 break;
2228 case GTU:
2229 case LEU:
2230 if (i != ~((unsigned HOST_WIDE_INT) 0)
2231 && (const_ok_for_arm (i + 1) || const_ok_for_arm (-(i + 1))))
2233 *op1 = GEN_INT (i + 1);
2234 return code == GTU ? GEU : LTU;
2236 break;
2238 case GEU:
2239 case LTU:
2240 if (i != 0
2241 && (const_ok_for_arm (i - 1) || const_ok_for_arm (-(i - 1))))
2243 *op1 = GEN_INT (i - 1);
2244 return code == GEU ? GTU : LEU;
2246 break;
2248 default:
2249 abort ();
2252 return code;
2256 /* Define how to find the value returned by a function. */
2258 rtx arm_function_value(tree type, tree func ATTRIBUTE_UNUSED)
2260 enum machine_mode mode;
2261 int unsignedp ATTRIBUTE_UNUSED;
2262 rtx r ATTRIBUTE_UNUSED;
2265 mode = TYPE_MODE (type);
2266 /* Promote integer types. */
2267 if (INTEGRAL_TYPE_P (type))
2268 PROMOTE_FUNCTION_MODE (mode, unsignedp, type);
2269 return LIBCALL_VALUE(mode);
2273 /* Decide whether a type should be returned in memory (true)
2274 or in a register (false). This is called by the macro
2275 RETURN_IN_MEMORY. */
2277 arm_return_in_memory (tree type)
2279 HOST_WIDE_INT size;
2281 if (!AGGREGATE_TYPE_P (type))
2282 /* All simple types are returned in registers. */
2283 return 0;
2285 size = int_size_in_bytes (type);
2287 if (arm_abi != ARM_ABI_APCS)
2289 /* ATPCS and later return aggregate types in memory only if they are
2290 larger than a word (or are variable size). */
2291 return (size < 0 || size > UNITS_PER_WORD);
2294 /* For the arm-wince targets we choose to be compatible with Microsoft's
2295 ARM and Thumb compilers, which always return aggregates in memory. */
2296 #ifndef ARM_WINCE
2297 /* All structures/unions bigger than one word are returned in memory.
2298 Also catch the case where int_size_in_bytes returns -1. In this case
2299 the aggregate is either huge or of variable size, and in either case
2300 we will want to return it via memory and not in a register. */
2301 if (size < 0 || size > UNITS_PER_WORD)
2302 return 1;
2304 if (TREE_CODE (type) == RECORD_TYPE)
2306 tree field;
2308 /* For a struct the APCS says that we only return in a register
2309 if the type is 'integer like' and every addressable element
2310 has an offset of zero. For practical purposes this means
2311 that the structure can have at most one non bit-field element
2312 and that this element must be the first one in the structure. */
2314 /* Find the first field, ignoring non FIELD_DECL things which will
2315 have been created by C++. */
2316 for (field = TYPE_FIELDS (type);
2317 field && TREE_CODE (field) != FIELD_DECL;
2318 field = TREE_CHAIN (field))
2319 continue;
2321 if (field == NULL)
2322 return 0; /* An empty structure. Allowed by an extension to ANSI C. */
2324 /* Check that the first field is valid for returning in a register. */
2326 /* ... Floats are not allowed */
2327 if (FLOAT_TYPE_P (TREE_TYPE (field)))
2328 return 1;
2330 /* ... Aggregates that are not themselves valid for returning in
2331 a register are not allowed. */
2332 if (RETURN_IN_MEMORY (TREE_TYPE (field)))
2333 return 1;
2335 /* Now check the remaining fields, if any. Only bitfields are allowed,
2336 since they are not addressable. */
2337 for (field = TREE_CHAIN (field);
2338 field;
2339 field = TREE_CHAIN (field))
2341 if (TREE_CODE (field) != FIELD_DECL)
2342 continue;
2344 if (!DECL_BIT_FIELD_TYPE (field))
2345 return 1;
2348 return 0;
2351 if (TREE_CODE (type) == UNION_TYPE)
2353 tree field;
2355 /* Unions can be returned in registers if every element is
2356 integral, or can be returned in an integer register. */
2357 for (field = TYPE_FIELDS (type);
2358 field;
2359 field = TREE_CHAIN (field))
2361 if (TREE_CODE (field) != FIELD_DECL)
2362 continue;
2364 if (FLOAT_TYPE_P (TREE_TYPE (field)))
2365 return 1;
2367 if (RETURN_IN_MEMORY (TREE_TYPE (field)))
2368 return 1;
2371 return 0;
2373 #endif /* not ARM_WINCE */
2375 /* Return all other types in memory. */
2376 return 1;
2379 /* Indicate whether or not words of a double are in big-endian order. */
2382 arm_float_words_big_endian (void)
2384 if (TARGET_MAVERICK)
2385 return 0;
2387 /* For FPA, float words are always big-endian. For VFP, floats words
2388 follow the memory system mode. */
2390 if (TARGET_FPA)
2392 return 1;
2395 if (TARGET_VFP)
2396 return (TARGET_BIG_END ? 1 : 0);
2398 return 1;
2401 /* Initialize a variable CUM of type CUMULATIVE_ARGS
2402 for a call to a function whose data type is FNTYPE.
2403 For a library call, FNTYPE is NULL. */
2404 void
2405 arm_init_cumulative_args (CUMULATIVE_ARGS *pcum, tree fntype,
2406 rtx libname ATTRIBUTE_UNUSED,
2407 tree fndecl ATTRIBUTE_UNUSED)
2409 /* On the ARM, the offset starts at 0. */
2410 pcum->nregs = ((fntype && aggregate_value_p (TREE_TYPE (fntype), fntype)) ? 1 : 0);
2411 pcum->iwmmxt_nregs = 0;
2412 pcum->can_split = true;
2414 pcum->call_cookie = CALL_NORMAL;
2416 if (TARGET_LONG_CALLS)
2417 pcum->call_cookie = CALL_LONG;
2419 /* Check for long call/short call attributes. The attributes
2420 override any command line option. */
2421 if (fntype)
2423 if (lookup_attribute ("short_call", TYPE_ATTRIBUTES (fntype)))
2424 pcum->call_cookie = CALL_SHORT;
2425 else if (lookup_attribute ("long_call", TYPE_ATTRIBUTES (fntype)))
2426 pcum->call_cookie = CALL_LONG;
2429 /* Varargs vectors are treated the same as long long.
2430 named_count avoids having to change the way arm handles 'named' */
2431 pcum->named_count = 0;
2432 pcum->nargs = 0;
2434 if (TARGET_REALLY_IWMMXT && fntype)
2436 tree fn_arg;
2438 for (fn_arg = TYPE_ARG_TYPES (fntype);
2439 fn_arg;
2440 fn_arg = TREE_CHAIN (fn_arg))
2441 pcum->named_count += 1;
2443 if (! pcum->named_count)
2444 pcum->named_count = INT_MAX;
2449 /* Return true if mode/type need doubleword alignment. */
2450 bool
2451 arm_needs_doubleword_align (enum machine_mode mode, tree type)
2453 return (GET_MODE_ALIGNMENT (mode) > PARM_BOUNDARY
2454 || (type && TYPE_ALIGN (type) > PARM_BOUNDARY));
2458 /* Determine where to put an argument to a function.
2459 Value is zero to push the argument on the stack,
2460 or a hard register in which to store the argument.
2462 MODE is the argument's machine mode.
2463 TYPE is the data type of the argument (as a tree).
2464 This is null for libcalls where that information may
2465 not be available.
2466 CUM is a variable of type CUMULATIVE_ARGS which gives info about
2467 the preceding args and about the function being called.
2468 NAMED is nonzero if this argument is a named parameter
2469 (otherwise it is an extra parameter matching an ellipsis). */
2472 arm_function_arg (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
2473 tree type, int named)
2475 int nregs;
2477 /* Varargs vectors are treated the same as long long.
2478 named_count avoids having to change the way arm handles 'named' */
2479 if (TARGET_IWMMXT_ABI
2480 && arm_vector_mode_supported_p (mode)
2481 && pcum->named_count > pcum->nargs + 1)
2483 if (pcum->iwmmxt_nregs <= 9)
2484 return gen_rtx_REG (mode, pcum->iwmmxt_nregs + FIRST_IWMMXT_REGNUM);
2485 else
2487 pcum->can_split = false;
2488 return NULL_RTX;
2492 /* Put doubleword aligned quantities in even register pairs. */
2493 if (pcum->nregs & 1
2494 && ARM_DOUBLEWORD_ALIGN
2495 && arm_needs_doubleword_align (mode, type))
2496 pcum->nregs++;
2498 if (mode == VOIDmode)
2499 /* Compute operand 2 of the call insn. */
2500 return GEN_INT (pcum->call_cookie);
2502 /* Only allow splitting an arg between regs and memory if all preceding
2503 args were allocated to regs. For args passed by reference we only count
2504 the reference pointer. */
2505 if (pcum->can_split)
2506 nregs = 1;
2507 else
2508 nregs = ARM_NUM_REGS2 (mode, type);
2510 if (!named || pcum->nregs + nregs > NUM_ARG_REGS)
2511 return NULL_RTX;
2513 return gen_rtx_REG (mode, pcum->nregs);
2516 /* Variable sized types are passed by reference. This is a GCC
2517 extension to the ARM ABI. */
2519 static bool
2520 arm_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
2521 enum machine_mode mode ATTRIBUTE_UNUSED,
2522 tree type, bool named ATTRIBUTE_UNUSED)
2524 return type && TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST;
2527 /* Encode the current state of the #pragma [no_]long_calls. */
2528 typedef enum
2530 OFF, /* No #pramgma [no_]long_calls is in effect. */
2531 LONG, /* #pragma long_calls is in effect. */
2532 SHORT /* #pragma no_long_calls is in effect. */
2533 } arm_pragma_enum;
2535 static arm_pragma_enum arm_pragma_long_calls = OFF;
2537 void
2538 arm_pr_long_calls (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2540 arm_pragma_long_calls = LONG;
2543 void
2544 arm_pr_no_long_calls (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2546 arm_pragma_long_calls = SHORT;
2549 void
2550 arm_pr_long_calls_off (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2552 arm_pragma_long_calls = OFF;
2555 /* Table of machine attributes. */
2556 const struct attribute_spec arm_attribute_table[] =
2558 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
2559 /* Function calls made to this symbol must be done indirectly, because
2560 it may lie outside of the 26 bit addressing range of a normal function
2561 call. */
2562 { "long_call", 0, 0, false, true, true, NULL },
2563 /* Whereas these functions are always known to reside within the 26 bit
2564 addressing range. */
2565 { "short_call", 0, 0, false, true, true, NULL },
2566 /* Interrupt Service Routines have special prologue and epilogue requirements. */
2567 { "isr", 0, 1, false, false, false, arm_handle_isr_attribute },
2568 { "interrupt", 0, 1, false, false, false, arm_handle_isr_attribute },
2569 { "naked", 0, 0, true, false, false, arm_handle_fndecl_attribute },
2570 #ifdef ARM_PE
2571 /* ARM/PE has three new attributes:
2572 interfacearm - ?
2573 dllexport - for exporting a function/variable that will live in a dll
2574 dllimport - for importing a function/variable from a dll
2576 Microsoft allows multiple declspecs in one __declspec, separating
2577 them with spaces. We do NOT support this. Instead, use __declspec
2578 multiple times.
2580 { "dllimport", 0, 0, true, false, false, NULL },
2581 { "dllexport", 0, 0, true, false, false, NULL },
2582 { "interfacearm", 0, 0, true, false, false, arm_handle_fndecl_attribute },
2583 #elif TARGET_DLLIMPORT_DECL_ATTRIBUTES
2584 { "dllimport", 0, 0, false, false, false, handle_dll_attribute },
2585 { "dllexport", 0, 0, false, false, false, handle_dll_attribute },
2586 #endif
2587 { NULL, 0, 0, false, false, false, NULL }
2590 /* Handle an attribute requiring a FUNCTION_DECL;
2591 arguments as in struct attribute_spec.handler. */
2592 static tree
2593 arm_handle_fndecl_attribute (tree *node, tree name, tree args ATTRIBUTE_UNUSED,
2594 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
2596 if (TREE_CODE (*node) != FUNCTION_DECL)
2598 warning ("`%s' attribute only applies to functions",
2599 IDENTIFIER_POINTER (name));
2600 *no_add_attrs = true;
2603 return NULL_TREE;
2606 /* Handle an "interrupt" or "isr" attribute;
2607 arguments as in struct attribute_spec.handler. */
2608 static tree
2609 arm_handle_isr_attribute (tree *node, tree name, tree args, int flags,
2610 bool *no_add_attrs)
2612 if (DECL_P (*node))
2614 if (TREE_CODE (*node) != FUNCTION_DECL)
2616 warning ("`%s' attribute only applies to functions",
2617 IDENTIFIER_POINTER (name));
2618 *no_add_attrs = true;
2620 /* FIXME: the argument if any is checked for type attributes;
2621 should it be checked for decl ones? */
2623 else
2625 if (TREE_CODE (*node) == FUNCTION_TYPE
2626 || TREE_CODE (*node) == METHOD_TYPE)
2628 if (arm_isr_value (args) == ARM_FT_UNKNOWN)
2630 warning ("`%s' attribute ignored", IDENTIFIER_POINTER (name));
2631 *no_add_attrs = true;
2634 else if (TREE_CODE (*node) == POINTER_TYPE
2635 && (TREE_CODE (TREE_TYPE (*node)) == FUNCTION_TYPE
2636 || TREE_CODE (TREE_TYPE (*node)) == METHOD_TYPE)
2637 && arm_isr_value (args) != ARM_FT_UNKNOWN)
2639 *node = build_variant_type_copy (*node);
2640 TREE_TYPE (*node) = build_type_attribute_variant
2641 (TREE_TYPE (*node),
2642 tree_cons (name, args, TYPE_ATTRIBUTES (TREE_TYPE (*node))));
2643 *no_add_attrs = true;
2645 else
2647 /* Possibly pass this attribute on from the type to a decl. */
2648 if (flags & ((int) ATTR_FLAG_DECL_NEXT
2649 | (int) ATTR_FLAG_FUNCTION_NEXT
2650 | (int) ATTR_FLAG_ARRAY_NEXT))
2652 *no_add_attrs = true;
2653 return tree_cons (name, args, NULL_TREE);
2655 else
2657 warning ("`%s' attribute ignored", IDENTIFIER_POINTER (name));
2662 return NULL_TREE;
2665 /* Return 0 if the attributes for two types are incompatible, 1 if they
2666 are compatible, and 2 if they are nearly compatible (which causes a
2667 warning to be generated). */
2668 static int
2669 arm_comp_type_attributes (tree type1, tree type2)
2671 int l1, l2, s1, s2;
2673 /* Check for mismatch of non-default calling convention. */
2674 if (TREE_CODE (type1) != FUNCTION_TYPE)
2675 return 1;
2677 /* Check for mismatched call attributes. */
2678 l1 = lookup_attribute ("long_call", TYPE_ATTRIBUTES (type1)) != NULL;
2679 l2 = lookup_attribute ("long_call", TYPE_ATTRIBUTES (type2)) != NULL;
2680 s1 = lookup_attribute ("short_call", TYPE_ATTRIBUTES (type1)) != NULL;
2681 s2 = lookup_attribute ("short_call", TYPE_ATTRIBUTES (type2)) != NULL;
2683 /* Only bother to check if an attribute is defined. */
2684 if (l1 | l2 | s1 | s2)
2686 /* If one type has an attribute, the other must have the same attribute. */
2687 if ((l1 != l2) || (s1 != s2))
2688 return 0;
2690 /* Disallow mixed attributes. */
2691 if ((l1 & s2) || (l2 & s1))
2692 return 0;
2695 /* Check for mismatched ISR attribute. */
2696 l1 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type1)) != NULL;
2697 if (! l1)
2698 l1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type1)) != NULL;
2699 l2 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type2)) != NULL;
2700 if (! l2)
2701 l1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type2)) != NULL;
2702 if (l1 != l2)
2703 return 0;
2705 return 1;
2708 /* Encode long_call or short_call attribute by prefixing
2709 symbol name in DECL with a special character FLAG. */
2710 void
2711 arm_encode_call_attribute (tree decl, int flag)
2713 const char * str = XSTR (XEXP (DECL_RTL (decl), 0), 0);
2714 int len = strlen (str);
2715 char * newstr;
2717 /* Do not allow weak functions to be treated as short call. */
2718 if (DECL_WEAK (decl) && flag == SHORT_CALL_FLAG_CHAR)
2719 return;
2721 newstr = alloca (len + 2);
2722 newstr[0] = flag;
2723 strcpy (newstr + 1, str);
2725 newstr = (char *) ggc_alloc_string (newstr, len + 1);
2726 XSTR (XEXP (DECL_RTL (decl), 0), 0) = newstr;
2729 /* Assigns default attributes to newly defined type. This is used to
2730 set short_call/long_call attributes for function types of
2731 functions defined inside corresponding #pragma scopes. */
2732 static void
2733 arm_set_default_type_attributes (tree type)
2735 /* Add __attribute__ ((long_call)) to all functions, when
2736 inside #pragma long_calls or __attribute__ ((short_call)),
2737 when inside #pragma no_long_calls. */
2738 if (TREE_CODE (type) == FUNCTION_TYPE || TREE_CODE (type) == METHOD_TYPE)
2740 tree type_attr_list, attr_name;
2741 type_attr_list = TYPE_ATTRIBUTES (type);
2743 if (arm_pragma_long_calls == LONG)
2744 attr_name = get_identifier ("long_call");
2745 else if (arm_pragma_long_calls == SHORT)
2746 attr_name = get_identifier ("short_call");
2747 else
2748 return;
2750 type_attr_list = tree_cons (attr_name, NULL_TREE, type_attr_list);
2751 TYPE_ATTRIBUTES (type) = type_attr_list;
2755 /* Return 1 if the operand is a SYMBOL_REF for a function known to be
2756 defined within the current compilation unit. If this cannot be
2757 determined, then 0 is returned. */
2758 static int
2759 current_file_function_operand (rtx sym_ref)
2761 /* This is a bit of a fib. A function will have a short call flag
2762 applied to its name if it has the short call attribute, or it has
2763 already been defined within the current compilation unit. */
2764 if (ENCODED_SHORT_CALL_ATTR_P (XSTR (sym_ref, 0)))
2765 return 1;
2767 /* The current function is always defined within the current compilation
2768 unit. If it s a weak definition however, then this may not be the real
2769 definition of the function, and so we have to say no. */
2770 if (sym_ref == XEXP (DECL_RTL (current_function_decl), 0)
2771 && !DECL_WEAK (current_function_decl))
2772 return 1;
2774 /* We cannot make the determination - default to returning 0. */
2775 return 0;
2778 /* Return nonzero if a 32 bit "long_call" should be generated for
2779 this call. We generate a long_call if the function:
2781 a. has an __attribute__((long call))
2782 or b. is within the scope of a #pragma long_calls
2783 or c. the -mlong-calls command line switch has been specified
2784 . and either:
2785 1. -ffunction-sections is in effect
2786 or 2. the current function has __attribute__ ((section))
2787 or 3. the target function has __attribute__ ((section))
2789 However we do not generate a long call if the function:
2791 d. has an __attribute__ ((short_call))
2792 or e. is inside the scope of a #pragma no_long_calls
2793 or f. is defined within the current compilation unit.
2795 This function will be called by C fragments contained in the machine
2796 description file. SYM_REF and CALL_COOKIE correspond to the matched
2797 rtl operands. CALL_SYMBOL is used to distinguish between
2798 two different callers of the function. It is set to 1 in the
2799 "call_symbol" and "call_symbol_value" patterns and to 0 in the "call"
2800 and "call_value" patterns. This is because of the difference in the
2801 SYM_REFs passed by these patterns. */
2803 arm_is_longcall_p (rtx sym_ref, int call_cookie, int call_symbol)
2805 if (!call_symbol)
2807 if (GET_CODE (sym_ref) != MEM)
2808 return 0;
2810 sym_ref = XEXP (sym_ref, 0);
2813 if (GET_CODE (sym_ref) != SYMBOL_REF)
2814 return 0;
2816 if (call_cookie & CALL_SHORT)
2817 return 0;
2819 if (TARGET_LONG_CALLS)
2821 if (flag_function_sections
2822 || DECL_SECTION_NAME (current_function_decl))
2823 /* c.3 is handled by the definition of the
2824 ARM_DECLARE_FUNCTION_SIZE macro. */
2825 return 1;
2828 if (current_file_function_operand (sym_ref))
2829 return 0;
2831 return (call_cookie & CALL_LONG)
2832 || ENCODED_LONG_CALL_ATTR_P (XSTR (sym_ref, 0))
2833 || TARGET_LONG_CALLS;
2836 /* Return nonzero if it is ok to make a tail-call to DECL. */
2837 static bool
2838 arm_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
2840 int call_type = TARGET_LONG_CALLS ? CALL_LONG : CALL_NORMAL;
2842 if (cfun->machine->sibcall_blocked)
2843 return false;
2845 /* Never tailcall something for which we have no decl, or if we
2846 are in Thumb mode. */
2847 if (decl == NULL || TARGET_THUMB)
2848 return false;
2850 /* Get the calling method. */
2851 if (lookup_attribute ("short_call", TYPE_ATTRIBUTES (TREE_TYPE (decl))))
2852 call_type = CALL_SHORT;
2853 else if (lookup_attribute ("long_call", TYPE_ATTRIBUTES (TREE_TYPE (decl))))
2854 call_type = CALL_LONG;
2856 /* Cannot tail-call to long calls, since these are out of range of
2857 a branch instruction. However, if not compiling PIC, we know
2858 we can reach the symbol if it is in this compilation unit. */
2859 if (call_type == CALL_LONG && (flag_pic || !TREE_ASM_WRITTEN (decl)))
2860 return false;
2862 /* If we are interworking and the function is not declared static
2863 then we can't tail-call it unless we know that it exists in this
2864 compilation unit (since it might be a Thumb routine). */
2865 if (TARGET_INTERWORK && TREE_PUBLIC (decl) && !TREE_ASM_WRITTEN (decl))
2866 return false;
2868 /* Never tailcall from an ISR routine - it needs a special exit sequence. */
2869 if (IS_INTERRUPT (arm_current_func_type ()))
2870 return false;
2872 /* Everything else is ok. */
2873 return true;
2877 /* Addressing mode support functions. */
2879 /* Return nonzero if X is a legitimate immediate operand when compiling
2880 for PIC. */
2882 legitimate_pic_operand_p (rtx x)
2884 if (CONSTANT_P (x)
2885 && flag_pic
2886 && (GET_CODE (x) == SYMBOL_REF
2887 || (GET_CODE (x) == CONST
2888 && GET_CODE (XEXP (x, 0)) == PLUS
2889 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF)))
2890 return 0;
2892 return 1;
2896 legitimize_pic_address (rtx orig, enum machine_mode mode, rtx reg)
2898 if (GET_CODE (orig) == SYMBOL_REF
2899 || GET_CODE (orig) == LABEL_REF)
2901 #ifndef AOF_ASSEMBLER
2902 rtx pic_ref, address;
2903 #endif
2904 rtx insn;
2905 int subregs = 0;
2907 if (reg == 0)
2909 if (no_new_pseudos)
2910 abort ();
2911 else
2912 reg = gen_reg_rtx (Pmode);
2914 subregs = 1;
2917 #ifdef AOF_ASSEMBLER
2918 /* The AOF assembler can generate relocations for these directly, and
2919 understands that the PIC register has to be added into the offset. */
2920 insn = emit_insn (gen_pic_load_addr_based (reg, orig));
2921 #else
2922 if (subregs)
2923 address = gen_reg_rtx (Pmode);
2924 else
2925 address = reg;
2927 if (TARGET_ARM)
2928 emit_insn (gen_pic_load_addr_arm (address, orig));
2929 else
2930 emit_insn (gen_pic_load_addr_thumb (address, orig));
2932 if ((GET_CODE (orig) == LABEL_REF
2933 || (GET_CODE (orig) == SYMBOL_REF &&
2934 SYMBOL_REF_LOCAL_P (orig)))
2935 && NEED_GOT_RELOC)
2936 pic_ref = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, address);
2937 else
2939 pic_ref = gen_const_mem (Pmode,
2940 gen_rtx_PLUS (Pmode, pic_offset_table_rtx,
2941 address));
2944 insn = emit_move_insn (reg, pic_ref);
2945 #endif
2946 current_function_uses_pic_offset_table = 1;
2947 /* Put a REG_EQUAL note on this insn, so that it can be optimized
2948 by loop. */
2949 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EQUAL, orig,
2950 REG_NOTES (insn));
2951 return reg;
2953 else if (GET_CODE (orig) == CONST)
2955 rtx base, offset;
2957 if (GET_CODE (XEXP (orig, 0)) == PLUS
2958 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
2959 return orig;
2961 if (reg == 0)
2963 if (no_new_pseudos)
2964 abort ();
2965 else
2966 reg = gen_reg_rtx (Pmode);
2969 if (GET_CODE (XEXP (orig, 0)) == PLUS)
2971 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
2972 offset = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
2973 base == reg ? 0 : reg);
2975 else
2976 abort ();
2978 if (GET_CODE (offset) == CONST_INT)
2980 /* The base register doesn't really matter, we only want to
2981 test the index for the appropriate mode. */
2982 if (!arm_legitimate_index_p (mode, offset, SET, 0))
2984 if (!no_new_pseudos)
2985 offset = force_reg (Pmode, offset);
2986 else
2987 abort ();
2990 if (GET_CODE (offset) == CONST_INT)
2991 return plus_constant (base, INTVAL (offset));
2994 if (GET_MODE_SIZE (mode) > 4
2995 && (GET_MODE_CLASS (mode) == MODE_INT
2996 || TARGET_SOFT_FLOAT))
2998 emit_insn (gen_addsi3 (reg, base, offset));
2999 return reg;
3002 return gen_rtx_PLUS (Pmode, base, offset);
3005 return orig;
3009 /* Find a spare low register. */
3011 static int
3012 thumb_find_work_register (int live_regs_mask)
3014 int reg;
3016 /* Use a spare arg register. */
3017 if (!regs_ever_live[LAST_ARG_REGNUM])
3018 return LAST_ARG_REGNUM;
3020 /* Look for a pushed register. */
3021 for (reg = LAST_LO_REGNUM; reg >=0; reg--)
3022 if (live_regs_mask & (1 << reg))
3023 return reg;
3025 /* Something went wrong. */
3026 abort ();
3030 /* Generate code to load the PIC register. */
3032 void
3033 arm_load_pic_register (void)
3035 #ifndef AOF_ASSEMBLER
3036 rtx l1, pic_tmp, pic_tmp2, pic_rtx;
3037 rtx global_offset_table;
3039 if (current_function_uses_pic_offset_table == 0 || TARGET_SINGLE_PIC_BASE)
3040 return;
3042 if (!flag_pic)
3043 abort ();
3045 l1 = gen_label_rtx ();
3047 global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
3048 /* On the ARM the PC register contains 'dot + 8' at the time of the
3049 addition, on the Thumb it is 'dot + 4'. */
3050 pic_tmp = plus_constant (gen_rtx_LABEL_REF (Pmode, l1), TARGET_ARM ? 8 : 4);
3051 if (GOT_PCREL)
3052 pic_tmp2 = gen_rtx_CONST (VOIDmode,
3053 gen_rtx_PLUS (Pmode, global_offset_table, pc_rtx));
3054 else
3055 pic_tmp2 = gen_rtx_CONST (VOIDmode, global_offset_table);
3057 pic_rtx = gen_rtx_CONST (Pmode, gen_rtx_MINUS (Pmode, pic_tmp2, pic_tmp));
3059 if (TARGET_ARM)
3061 emit_insn (gen_pic_load_addr_arm (pic_offset_table_rtx, pic_rtx));
3062 emit_insn (gen_pic_add_dot_plus_eight (pic_offset_table_rtx, l1));
3064 else
3066 if (REGNO (pic_offset_table_rtx) > LAST_LO_REGNUM)
3068 int reg;
3070 /* We will have pushed the pic register, so should always be
3071 able to find a work register. */
3072 reg = thumb_find_work_register (thumb_compute_save_reg_mask ());
3073 pic_tmp = gen_rtx_REG (SImode, reg);
3074 emit_insn (gen_pic_load_addr_thumb (pic_tmp, pic_rtx));
3075 emit_insn (gen_movsi (pic_offset_table_rtx, pic_tmp));
3077 else
3078 emit_insn (gen_pic_load_addr_thumb (pic_offset_table_rtx, pic_rtx));
3079 emit_insn (gen_pic_add_dot_plus_four (pic_offset_table_rtx, l1));
3082 /* Need to emit this whether or not we obey regdecls,
3083 since setjmp/longjmp can cause life info to screw up. */
3084 emit_insn (gen_rtx_USE (VOIDmode, pic_offset_table_rtx));
3085 #endif /* AOF_ASSEMBLER */
3089 /* Return nonzero if X is valid as an ARM state addressing register. */
3090 static int
3091 arm_address_register_rtx_p (rtx x, int strict_p)
3093 int regno;
3095 if (GET_CODE (x) != REG)
3096 return 0;
3098 regno = REGNO (x);
3100 if (strict_p)
3101 return ARM_REGNO_OK_FOR_BASE_P (regno);
3103 return (regno <= LAST_ARM_REGNUM
3104 || regno >= FIRST_PSEUDO_REGISTER
3105 || regno == FRAME_POINTER_REGNUM
3106 || regno == ARG_POINTER_REGNUM);
3109 /* Return nonzero if X is a valid ARM state address operand. */
3111 arm_legitimate_address_p (enum machine_mode mode, rtx x, RTX_CODE outer,
3112 int strict_p)
3114 bool use_ldrd;
3115 enum rtx_code code = GET_CODE (x);
3117 if (arm_address_register_rtx_p (x, strict_p))
3118 return 1;
3120 use_ldrd = (TARGET_LDRD
3121 && (mode == DImode
3122 || (mode == DFmode && (TARGET_SOFT_FLOAT || TARGET_VFP))));
3124 if (code == POST_INC || code == PRE_DEC
3125 || ((code == PRE_INC || code == POST_DEC)
3126 && (use_ldrd || GET_MODE_SIZE (mode) <= 4)))
3127 return arm_address_register_rtx_p (XEXP (x, 0), strict_p);
3129 else if ((code == POST_MODIFY || code == PRE_MODIFY)
3130 && arm_address_register_rtx_p (XEXP (x, 0), strict_p)
3131 && GET_CODE (XEXP (x, 1)) == PLUS
3132 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
3134 rtx addend = XEXP (XEXP (x, 1), 1);
3136 /* Don't allow ldrd post increment by register because it's hard
3137 to fixup invalid register choices. */
3138 if (use_ldrd
3139 && GET_CODE (x) == POST_MODIFY
3140 && GET_CODE (addend) == REG)
3141 return 0;
3143 return ((use_ldrd || GET_MODE_SIZE (mode) <= 4)
3144 && arm_legitimate_index_p (mode, addend, outer, strict_p));
3147 /* After reload constants split into minipools will have addresses
3148 from a LABEL_REF. */
3149 else if (reload_completed
3150 && (code == LABEL_REF
3151 || (code == CONST
3152 && GET_CODE (XEXP (x, 0)) == PLUS
3153 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
3154 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
3155 return 1;
3157 else if (mode == TImode)
3158 return 0;
3160 else if (code == PLUS)
3162 rtx xop0 = XEXP (x, 0);
3163 rtx xop1 = XEXP (x, 1);
3165 return ((arm_address_register_rtx_p (xop0, strict_p)
3166 && arm_legitimate_index_p (mode, xop1, outer, strict_p))
3167 || (arm_address_register_rtx_p (xop1, strict_p)
3168 && arm_legitimate_index_p (mode, xop0, outer, strict_p)));
3171 #if 0
3172 /* Reload currently can't handle MINUS, so disable this for now */
3173 else if (GET_CODE (x) == MINUS)
3175 rtx xop0 = XEXP (x, 0);
3176 rtx xop1 = XEXP (x, 1);
3178 return (arm_address_register_rtx_p (xop0, strict_p)
3179 && arm_legitimate_index_p (mode, xop1, outer, strict_p));
3181 #endif
3183 else if (GET_MODE_CLASS (mode) != MODE_FLOAT
3184 && code == SYMBOL_REF
3185 && CONSTANT_POOL_ADDRESS_P (x)
3186 && ! (flag_pic
3187 && symbol_mentioned_p (get_pool_constant (x))))
3188 return 1;
3190 return 0;
3193 /* Return nonzero if INDEX is valid for an address index operand in
3194 ARM state. */
3195 static int
3196 arm_legitimate_index_p (enum machine_mode mode, rtx index, RTX_CODE outer,
3197 int strict_p)
3199 HOST_WIDE_INT range;
3200 enum rtx_code code = GET_CODE (index);
3202 /* Standard coprocessor addressing modes. */
3203 if (TARGET_HARD_FLOAT
3204 && (TARGET_FPA || TARGET_MAVERICK)
3205 && (GET_MODE_CLASS (mode) == MODE_FLOAT
3206 || (TARGET_MAVERICK && mode == DImode)))
3207 return (code == CONST_INT && INTVAL (index) < 1024
3208 && INTVAL (index) > -1024
3209 && (INTVAL (index) & 3) == 0);
3211 if (TARGET_REALLY_IWMMXT && VALID_IWMMXT_REG_MODE (mode))
3212 return (code == CONST_INT
3213 && INTVAL (index) < 1024
3214 && INTVAL (index) > -1024
3215 && (INTVAL (index) & 3) == 0);
3217 if (arm_address_register_rtx_p (index, strict_p)
3218 && (GET_MODE_SIZE (mode) <= 4))
3219 return 1;
3221 if (mode == DImode || mode == DFmode)
3223 if (code == CONST_INT)
3225 HOST_WIDE_INT val = INTVAL (index);
3227 if (TARGET_LDRD)
3228 return val > -256 && val < 256;
3229 else
3230 return val > -4096 && val < 4092;
3233 return TARGET_LDRD && arm_address_register_rtx_p (index, strict_p);
3236 if (GET_MODE_SIZE (mode) <= 4
3237 && ! (arm_arch4
3238 && (mode == HImode
3239 || (mode == QImode && outer == SIGN_EXTEND))))
3241 if (code == MULT)
3243 rtx xiop0 = XEXP (index, 0);
3244 rtx xiop1 = XEXP (index, 1);
3246 return ((arm_address_register_rtx_p (xiop0, strict_p)
3247 && power_of_two_operand (xiop1, SImode))
3248 || (arm_address_register_rtx_p (xiop1, strict_p)
3249 && power_of_two_operand (xiop0, SImode)));
3251 else if (code == LSHIFTRT || code == ASHIFTRT
3252 || code == ASHIFT || code == ROTATERT)
3254 rtx op = XEXP (index, 1);
3256 return (arm_address_register_rtx_p (XEXP (index, 0), strict_p)
3257 && GET_CODE (op) == CONST_INT
3258 && INTVAL (op) > 0
3259 && INTVAL (op) <= 31);
3263 /* For ARM v4 we may be doing a sign-extend operation during the
3264 load. */
3265 if (arm_arch4)
3267 if (mode == HImode || (outer == SIGN_EXTEND && mode == QImode))
3268 range = 256;
3269 else
3270 range = 4096;
3272 else
3273 range = (mode == HImode) ? 4095 : 4096;
3275 return (code == CONST_INT
3276 && INTVAL (index) < range
3277 && INTVAL (index) > -range);
3280 /* Return nonzero if X is valid as a Thumb state base register. */
3281 static int
3282 thumb_base_register_rtx_p (rtx x, enum machine_mode mode, int strict_p)
3284 int regno;
3286 if (GET_CODE (x) != REG)
3287 return 0;
3289 regno = REGNO (x);
3291 if (strict_p)
3292 return THUMB_REGNO_MODE_OK_FOR_BASE_P (regno, mode);
3294 return (regno <= LAST_LO_REGNUM
3295 || regno > LAST_VIRTUAL_REGISTER
3296 || regno == FRAME_POINTER_REGNUM
3297 || (GET_MODE_SIZE (mode) >= 4
3298 && (regno == STACK_POINTER_REGNUM
3299 || regno >= FIRST_PSEUDO_REGISTER
3300 || x == hard_frame_pointer_rtx
3301 || x == arg_pointer_rtx)));
3304 /* Return nonzero if x is a legitimate index register. This is the case
3305 for any base register that can access a QImode object. */
3306 inline static int
3307 thumb_index_register_rtx_p (rtx x, int strict_p)
3309 return thumb_base_register_rtx_p (x, QImode, strict_p);
3312 /* Return nonzero if x is a legitimate Thumb-state address.
3314 The AP may be eliminated to either the SP or the FP, so we use the
3315 least common denominator, e.g. SImode, and offsets from 0 to 64.
3317 ??? Verify whether the above is the right approach.
3319 ??? Also, the FP may be eliminated to the SP, so perhaps that
3320 needs special handling also.
3322 ??? Look at how the mips16 port solves this problem. It probably uses
3323 better ways to solve some of these problems.
3325 Although it is not incorrect, we don't accept QImode and HImode
3326 addresses based on the frame pointer or arg pointer until the
3327 reload pass starts. This is so that eliminating such addresses
3328 into stack based ones won't produce impossible code. */
3330 thumb_legitimate_address_p (enum machine_mode mode, rtx x, int strict_p)
3332 /* ??? Not clear if this is right. Experiment. */
3333 if (GET_MODE_SIZE (mode) < 4
3334 && !(reload_in_progress || reload_completed)
3335 && (reg_mentioned_p (frame_pointer_rtx, x)
3336 || reg_mentioned_p (arg_pointer_rtx, x)
3337 || reg_mentioned_p (virtual_incoming_args_rtx, x)
3338 || reg_mentioned_p (virtual_outgoing_args_rtx, x)
3339 || reg_mentioned_p (virtual_stack_dynamic_rtx, x)
3340 || reg_mentioned_p (virtual_stack_vars_rtx, x)))
3341 return 0;
3343 /* Accept any base register. SP only in SImode or larger. */
3344 else if (thumb_base_register_rtx_p (x, mode, strict_p))
3345 return 1;
3347 /* This is PC relative data before arm_reorg runs. */
3348 else if (GET_MODE_SIZE (mode) >= 4 && CONSTANT_P (x)
3349 && GET_CODE (x) == SYMBOL_REF
3350 && CONSTANT_POOL_ADDRESS_P (x) && ! flag_pic)
3351 return 1;
3353 /* This is PC relative data after arm_reorg runs. */
3354 else if (GET_MODE_SIZE (mode) >= 4 && reload_completed
3355 && (GET_CODE (x) == LABEL_REF
3356 || (GET_CODE (x) == CONST
3357 && GET_CODE (XEXP (x, 0)) == PLUS
3358 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
3359 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
3360 return 1;
3362 /* Post-inc indexing only supported for SImode and larger. */
3363 else if (GET_CODE (x) == POST_INC && GET_MODE_SIZE (mode) >= 4
3364 && thumb_index_register_rtx_p (XEXP (x, 0), strict_p))
3365 return 1;
3367 else if (GET_CODE (x) == PLUS)
3369 /* REG+REG address can be any two index registers. */
3370 /* We disallow FRAME+REG addressing since we know that FRAME
3371 will be replaced with STACK, and SP relative addressing only
3372 permits SP+OFFSET. */
3373 if (GET_MODE_SIZE (mode) <= 4
3374 && XEXP (x, 0) != frame_pointer_rtx
3375 && XEXP (x, 1) != frame_pointer_rtx
3376 && thumb_index_register_rtx_p (XEXP (x, 0), strict_p)
3377 && thumb_index_register_rtx_p (XEXP (x, 1), strict_p))
3378 return 1;
3380 /* REG+const has 5-7 bit offset for non-SP registers. */
3381 else if ((thumb_index_register_rtx_p (XEXP (x, 0), strict_p)
3382 || XEXP (x, 0) == arg_pointer_rtx)
3383 && GET_CODE (XEXP (x, 1)) == CONST_INT
3384 && thumb_legitimate_offset_p (mode, INTVAL (XEXP (x, 1))))
3385 return 1;
3387 /* REG+const has 10 bit offset for SP, but only SImode and
3388 larger is supported. */
3389 /* ??? Should probably check for DI/DFmode overflow here
3390 just like GO_IF_LEGITIMATE_OFFSET does. */
3391 else if (GET_CODE (XEXP (x, 0)) == REG
3392 && REGNO (XEXP (x, 0)) == STACK_POINTER_REGNUM
3393 && GET_MODE_SIZE (mode) >= 4
3394 && GET_CODE (XEXP (x, 1)) == CONST_INT
3395 && INTVAL (XEXP (x, 1)) >= 0
3396 && INTVAL (XEXP (x, 1)) + GET_MODE_SIZE (mode) <= 1024
3397 && (INTVAL (XEXP (x, 1)) & 3) == 0)
3398 return 1;
3400 else if (GET_CODE (XEXP (x, 0)) == REG
3401 && REGNO (XEXP (x, 0)) == FRAME_POINTER_REGNUM
3402 && GET_MODE_SIZE (mode) >= 4
3403 && GET_CODE (XEXP (x, 1)) == CONST_INT
3404 && (INTVAL (XEXP (x, 1)) & 3) == 0)
3405 return 1;
3408 else if (GET_MODE_CLASS (mode) != MODE_FLOAT
3409 && GET_MODE_SIZE (mode) == 4
3410 && GET_CODE (x) == SYMBOL_REF
3411 && CONSTANT_POOL_ADDRESS_P (x)
3412 && !(flag_pic
3413 && symbol_mentioned_p (get_pool_constant (x))))
3414 return 1;
3416 return 0;
3419 /* Return nonzero if VAL can be used as an offset in a Thumb-state address
3420 instruction of mode MODE. */
3422 thumb_legitimate_offset_p (enum machine_mode mode, HOST_WIDE_INT val)
3424 switch (GET_MODE_SIZE (mode))
3426 case 1:
3427 return val >= 0 && val < 32;
3429 case 2:
3430 return val >= 0 && val < 64 && (val & 1) == 0;
3432 default:
3433 return (val >= 0
3434 && (val + GET_MODE_SIZE (mode)) <= 128
3435 && (val & 3) == 0);
3439 /* Try machine-dependent ways of modifying an illegitimate address
3440 to be legitimate. If we find one, return the new, valid address. */
3442 arm_legitimize_address (rtx x, rtx orig_x, enum machine_mode mode)
3444 if (GET_CODE (x) == PLUS)
3446 rtx xop0 = XEXP (x, 0);
3447 rtx xop1 = XEXP (x, 1);
3449 if (CONSTANT_P (xop0) && !symbol_mentioned_p (xop0))
3450 xop0 = force_reg (SImode, xop0);
3452 if (CONSTANT_P (xop1) && !symbol_mentioned_p (xop1))
3453 xop1 = force_reg (SImode, xop1);
3455 if (ARM_BASE_REGISTER_RTX_P (xop0)
3456 && GET_CODE (xop1) == CONST_INT)
3458 HOST_WIDE_INT n, low_n;
3459 rtx base_reg, val;
3460 n = INTVAL (xop1);
3462 /* VFP addressing modes actually allow greater offsets, but for
3463 now we just stick with the lowest common denominator. */
3464 if (mode == DImode
3465 || ((TARGET_SOFT_FLOAT || TARGET_VFP) && mode == DFmode))
3467 low_n = n & 0x0f;
3468 n &= ~0x0f;
3469 if (low_n > 4)
3471 n += 16;
3472 low_n -= 16;
3475 else
3477 low_n = ((mode) == TImode ? 0
3478 : n >= 0 ? (n & 0xfff) : -((-n) & 0xfff));
3479 n -= low_n;
3482 base_reg = gen_reg_rtx (SImode);
3483 val = force_operand (gen_rtx_PLUS (SImode, xop0,
3484 GEN_INT (n)), NULL_RTX);
3485 emit_move_insn (base_reg, val);
3486 x = (low_n == 0 ? base_reg
3487 : gen_rtx_PLUS (SImode, base_reg, GEN_INT (low_n)));
3489 else if (xop0 != XEXP (x, 0) || xop1 != XEXP (x, 1))
3490 x = gen_rtx_PLUS (SImode, xop0, xop1);
3493 /* XXX We don't allow MINUS any more -- see comment in
3494 arm_legitimate_address_p (). */
3495 else if (GET_CODE (x) == MINUS)
3497 rtx xop0 = XEXP (x, 0);
3498 rtx xop1 = XEXP (x, 1);
3500 if (CONSTANT_P (xop0))
3501 xop0 = force_reg (SImode, xop0);
3503 if (CONSTANT_P (xop1) && ! symbol_mentioned_p (xop1))
3504 xop1 = force_reg (SImode, xop1);
3506 if (xop0 != XEXP (x, 0) || xop1 != XEXP (x, 1))
3507 x = gen_rtx_MINUS (SImode, xop0, xop1);
3510 if (flag_pic)
3512 /* We need to find and carefully transform any SYMBOL and LABEL
3513 references; so go back to the original address expression. */
3514 rtx new_x = legitimize_pic_address (orig_x, mode, NULL_RTX);
3516 if (new_x != orig_x)
3517 x = new_x;
3520 return x;
3524 /* Try machine-dependent ways of modifying an illegitimate Thumb address
3525 to be legitimate. If we find one, return the new, valid address. */
3527 thumb_legitimize_address (rtx x, rtx orig_x, enum machine_mode mode)
3529 if (GET_CODE (x) == PLUS
3530 && GET_CODE (XEXP (x, 1)) == CONST_INT
3531 && (INTVAL (XEXP (x, 1)) >= 32 * GET_MODE_SIZE (mode)
3532 || INTVAL (XEXP (x, 1)) < 0))
3534 rtx xop0 = XEXP (x, 0);
3535 rtx xop1 = XEXP (x, 1);
3536 HOST_WIDE_INT offset = INTVAL (xop1);
3538 /* Try and fold the offset into a biasing of the base register and
3539 then offsetting that. Don't do this when optimizing for space
3540 since it can cause too many CSEs. */
3541 if (optimize_size && offset >= 0
3542 && offset < 256 + 31 * GET_MODE_SIZE (mode))
3544 HOST_WIDE_INT delta;
3546 if (offset >= 256)
3547 delta = offset - (256 - GET_MODE_SIZE (mode));
3548 else if (offset < 32 * GET_MODE_SIZE (mode) + 8)
3549 delta = 31 * GET_MODE_SIZE (mode);
3550 else
3551 delta = offset & (~31 * GET_MODE_SIZE (mode));
3553 xop0 = force_operand (plus_constant (xop0, offset - delta),
3554 NULL_RTX);
3555 x = plus_constant (xop0, delta);
3557 else if (offset < 0 && offset > -256)
3558 /* Small negative offsets are best done with a subtract before the
3559 dereference, forcing these into a register normally takes two
3560 instructions. */
3561 x = force_operand (x, NULL_RTX);
3562 else
3564 /* For the remaining cases, force the constant into a register. */
3565 xop1 = force_reg (SImode, xop1);
3566 x = gen_rtx_PLUS (SImode, xop0, xop1);
3569 else if (GET_CODE (x) == PLUS
3570 && s_register_operand (XEXP (x, 1), SImode)
3571 && !s_register_operand (XEXP (x, 0), SImode))
3573 rtx xop0 = force_operand (XEXP (x, 0), NULL_RTX);
3575 x = gen_rtx_PLUS (SImode, xop0, XEXP (x, 1));
3578 if (flag_pic)
3580 /* We need to find and carefully transform any SYMBOL and LABEL
3581 references; so go back to the original address expression. */
3582 rtx new_x = legitimize_pic_address (orig_x, mode, NULL_RTX);
3584 if (new_x != orig_x)
3585 x = new_x;
3588 return x;
3593 #define REG_OR_SUBREG_REG(X) \
3594 (GET_CODE (X) == REG \
3595 || (GET_CODE (X) == SUBREG && GET_CODE (SUBREG_REG (X)) == REG))
3597 #define REG_OR_SUBREG_RTX(X) \
3598 (GET_CODE (X) == REG ? (X) : SUBREG_REG (X))
3600 #ifndef COSTS_N_INSNS
3601 #define COSTS_N_INSNS(N) ((N) * 4 - 2)
3602 #endif
3603 static inline int
3604 thumb_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer)
3606 enum machine_mode mode = GET_MODE (x);
3608 switch (code)
3610 case ASHIFT:
3611 case ASHIFTRT:
3612 case LSHIFTRT:
3613 case ROTATERT:
3614 case PLUS:
3615 case MINUS:
3616 case COMPARE:
3617 case NEG:
3618 case NOT:
3619 return COSTS_N_INSNS (1);
3621 case MULT:
3622 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
3624 int cycles = 0;
3625 unsigned HOST_WIDE_INT i = INTVAL (XEXP (x, 1));
3627 while (i)
3629 i >>= 2;
3630 cycles++;
3632 return COSTS_N_INSNS (2) + cycles;
3634 return COSTS_N_INSNS (1) + 16;
3636 case SET:
3637 return (COSTS_N_INSNS (1)
3638 + 4 * ((GET_CODE (SET_SRC (x)) == MEM)
3639 + GET_CODE (SET_DEST (x)) == MEM));
3641 case CONST_INT:
3642 if (outer == SET)
3644 if ((unsigned HOST_WIDE_INT) INTVAL (x) < 256)
3645 return 0;
3646 if (thumb_shiftable_const (INTVAL (x)))
3647 return COSTS_N_INSNS (2);
3648 return COSTS_N_INSNS (3);
3650 else if ((outer == PLUS || outer == COMPARE)
3651 && INTVAL (x) < 256 && INTVAL (x) > -256)
3652 return 0;
3653 else if (outer == AND
3654 && INTVAL (x) < 256 && INTVAL (x) >= -256)
3655 return COSTS_N_INSNS (1);
3656 else if (outer == ASHIFT || outer == ASHIFTRT
3657 || outer == LSHIFTRT)
3658 return 0;
3659 return COSTS_N_INSNS (2);
3661 case CONST:
3662 case CONST_DOUBLE:
3663 case LABEL_REF:
3664 case SYMBOL_REF:
3665 return COSTS_N_INSNS (3);
3667 case UDIV:
3668 case UMOD:
3669 case DIV:
3670 case MOD:
3671 return 100;
3673 case TRUNCATE:
3674 return 99;
3676 case AND:
3677 case XOR:
3678 case IOR:
3679 /* XXX guess. */
3680 return 8;
3682 case MEM:
3683 /* XXX another guess. */
3684 /* Memory costs quite a lot for the first word, but subsequent words
3685 load at the equivalent of a single insn each. */
3686 return (10 + 4 * ((GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD)
3687 + ((GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
3688 ? 4 : 0));
3690 case IF_THEN_ELSE:
3691 /* XXX a guess. */
3692 if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
3693 return 14;
3694 return 2;
3696 case ZERO_EXTEND:
3697 /* XXX still guessing. */
3698 switch (GET_MODE (XEXP (x, 0)))
3700 case QImode:
3701 return (1 + (mode == DImode ? 4 : 0)
3702 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3704 case HImode:
3705 return (4 + (mode == DImode ? 4 : 0)
3706 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3708 case SImode:
3709 return (1 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3711 default:
3712 return 99;
3715 default:
3716 return 99;
3721 /* Worker routine for arm_rtx_costs. */
3722 static inline int
3723 arm_rtx_costs_1 (rtx x, enum rtx_code code, enum rtx_code outer)
3725 enum machine_mode mode = GET_MODE (x);
3726 enum rtx_code subcode;
3727 int extra_cost;
3729 switch (code)
3731 case MEM:
3732 /* Memory costs quite a lot for the first word, but subsequent words
3733 load at the equivalent of a single insn each. */
3734 return (10 + 4 * ((GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD)
3735 + (GET_CODE (x) == SYMBOL_REF
3736 && CONSTANT_POOL_ADDRESS_P (x) ? 4 : 0));
3738 case DIV:
3739 case MOD:
3740 case UDIV:
3741 case UMOD:
3742 return optimize_size ? COSTS_N_INSNS (2) : 100;
3744 case ROTATE:
3745 if (mode == SImode && GET_CODE (XEXP (x, 1)) == REG)
3746 return 4;
3747 /* Fall through */
3748 case ROTATERT:
3749 if (mode != SImode)
3750 return 8;
3751 /* Fall through */
3752 case ASHIFT: case LSHIFTRT: case ASHIFTRT:
3753 if (mode == DImode)
3754 return (8 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : 8)
3755 + ((GET_CODE (XEXP (x, 0)) == REG
3756 || (GET_CODE (XEXP (x, 0)) == SUBREG
3757 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG))
3758 ? 0 : 8));
3759 return (1 + ((GET_CODE (XEXP (x, 0)) == REG
3760 || (GET_CODE (XEXP (x, 0)) == SUBREG
3761 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG))
3762 ? 0 : 4)
3763 + ((GET_CODE (XEXP (x, 1)) == REG
3764 || (GET_CODE (XEXP (x, 1)) == SUBREG
3765 && GET_CODE (SUBREG_REG (XEXP (x, 1))) == REG)
3766 || (GET_CODE (XEXP (x, 1)) == CONST_INT))
3767 ? 0 : 4));
3769 case MINUS:
3770 if (mode == DImode)
3771 return (4 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 8)
3772 + ((REG_OR_SUBREG_REG (XEXP (x, 0))
3773 || (GET_CODE (XEXP (x, 0)) == CONST_INT
3774 && const_ok_for_arm (INTVAL (XEXP (x, 0)))))
3775 ? 0 : 8));
3777 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
3778 return (2 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
3779 || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
3780 && arm_const_double_rtx (XEXP (x, 1))))
3781 ? 0 : 8)
3782 + ((REG_OR_SUBREG_REG (XEXP (x, 0))
3783 || (GET_CODE (XEXP (x, 0)) == CONST_DOUBLE
3784 && arm_const_double_rtx (XEXP (x, 0))))
3785 ? 0 : 8));
3787 if (((GET_CODE (XEXP (x, 0)) == CONST_INT
3788 && const_ok_for_arm (INTVAL (XEXP (x, 0)))
3789 && REG_OR_SUBREG_REG (XEXP (x, 1))))
3790 || (((subcode = GET_CODE (XEXP (x, 1))) == ASHIFT
3791 || subcode == ASHIFTRT || subcode == LSHIFTRT
3792 || subcode == ROTATE || subcode == ROTATERT
3793 || (subcode == MULT
3794 && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
3795 && ((INTVAL (XEXP (XEXP (x, 1), 1)) &
3796 (INTVAL (XEXP (XEXP (x, 1), 1)) - 1)) == 0)))
3797 && REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 0))
3798 && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 1))
3799 || GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT)
3800 && REG_OR_SUBREG_REG (XEXP (x, 0))))
3801 return 1;
3802 /* Fall through */
3804 case PLUS:
3805 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
3806 return (2 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8)
3807 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
3808 || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
3809 && arm_const_double_rtx (XEXP (x, 1))))
3810 ? 0 : 8));
3812 /* Fall through */
3813 case AND: case XOR: case IOR:
3814 extra_cost = 0;
3816 /* Normally the frame registers will be spilt into reg+const during
3817 reload, so it is a bad idea to combine them with other instructions,
3818 since then they might not be moved outside of loops. As a compromise
3819 we allow integration with ops that have a constant as their second
3820 operand. */
3821 if ((REG_OR_SUBREG_REG (XEXP (x, 0))
3822 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0)))
3823 && GET_CODE (XEXP (x, 1)) != CONST_INT)
3824 || (REG_OR_SUBREG_REG (XEXP (x, 0))
3825 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0)))))
3826 extra_cost = 4;
3828 if (mode == DImode)
3829 return (4 + extra_cost + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8)
3830 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
3831 || (GET_CODE (XEXP (x, 1)) == CONST_INT
3832 && const_ok_for_op (INTVAL (XEXP (x, 1)), code)))
3833 ? 0 : 8));
3835 if (REG_OR_SUBREG_REG (XEXP (x, 0)))
3836 return (1 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : extra_cost)
3837 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
3838 || (GET_CODE (XEXP (x, 1)) == CONST_INT
3839 && const_ok_for_op (INTVAL (XEXP (x, 1)), code)))
3840 ? 0 : 4));
3842 else if (REG_OR_SUBREG_REG (XEXP (x, 1)))
3843 return (1 + extra_cost
3844 + ((((subcode = GET_CODE (XEXP (x, 0))) == ASHIFT
3845 || subcode == LSHIFTRT || subcode == ASHIFTRT
3846 || subcode == ROTATE || subcode == ROTATERT
3847 || (subcode == MULT
3848 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
3849 && ((INTVAL (XEXP (XEXP (x, 0), 1)) &
3850 (INTVAL (XEXP (XEXP (x, 0), 1)) - 1)) == 0)))
3851 && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 0)))
3852 && ((REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 1)))
3853 || GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT))
3854 ? 0 : 4));
3856 return 8;
3858 case MULT:
3859 /* This should have been handled by the CPU specific routines. */
3860 abort ();
3862 case TRUNCATE:
3863 if (arm_arch3m && mode == SImode
3864 && GET_CODE (XEXP (x, 0)) == LSHIFTRT
3865 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
3866 && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0))
3867 == GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)))
3868 && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == ZERO_EXTEND
3869 || GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == SIGN_EXTEND))
3870 return 8;
3871 return 99;
3873 case NEG:
3874 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
3875 return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 6);
3876 /* Fall through */
3877 case NOT:
3878 if (mode == DImode)
3879 return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4);
3881 return 1 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4);
3883 case IF_THEN_ELSE:
3884 if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
3885 return 14;
3886 return 2;
3888 case COMPARE:
3889 return 1;
3891 case ABS:
3892 return 4 + (mode == DImode ? 4 : 0);
3894 case SIGN_EXTEND:
3895 if (GET_MODE (XEXP (x, 0)) == QImode)
3896 return (4 + (mode == DImode ? 4 : 0)
3897 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3898 /* Fall through */
3899 case ZERO_EXTEND:
3900 switch (GET_MODE (XEXP (x, 0)))
3902 case QImode:
3903 return (1 + (mode == DImode ? 4 : 0)
3904 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3906 case HImode:
3907 return (4 + (mode == DImode ? 4 : 0)
3908 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3910 case SImode:
3911 return (1 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3913 case V8QImode:
3914 case V4HImode:
3915 case V2SImode:
3916 case V4QImode:
3917 case V2HImode:
3918 return 1;
3920 default:
3921 break;
3923 abort ();
3925 case CONST_INT:
3926 if (const_ok_for_arm (INTVAL (x)))
3927 return outer == SET ? 2 : -1;
3928 else if (outer == AND
3929 && const_ok_for_arm (~INTVAL (x)))
3930 return -1;
3931 else if ((outer == COMPARE
3932 || outer == PLUS || outer == MINUS)
3933 && const_ok_for_arm (-INTVAL (x)))
3934 return -1;
3935 else
3936 return 5;
3938 case CONST:
3939 case LABEL_REF:
3940 case SYMBOL_REF:
3941 return 6;
3943 case CONST_DOUBLE:
3944 if (arm_const_double_rtx (x))
3945 return outer == SET ? 2 : -1;
3946 else if ((outer == COMPARE || outer == PLUS)
3947 && neg_const_double_rtx_ok_for_fpa (x))
3948 return -1;
3949 return 7;
3951 default:
3952 return 99;
3956 /* RTX costs when optimizing for size. */
3957 static bool
3958 arm_size_rtx_costs (rtx x, int code, int outer_code, int *total)
3960 enum machine_mode mode = GET_MODE (x);
3962 if (TARGET_THUMB)
3964 /* XXX TBD. For now, use the standard costs. */
3965 *total = thumb_rtx_costs (x, code, outer_code);
3966 return true;
3969 switch (code)
3971 case MEM:
3972 /* A memory access costs 1 insn if the mode is small, or the address is
3973 a single register, otherwise it costs one insn per word. */
3974 if (REG_P (XEXP (x, 0)))
3975 *total = COSTS_N_INSNS (1);
3976 else
3977 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
3978 return true;
3980 case DIV:
3981 case MOD:
3982 case UDIV:
3983 case UMOD:
3984 /* Needs a libcall, so it costs about this. */
3985 *total = COSTS_N_INSNS (2);
3986 return false;
3988 case ROTATE:
3989 if (mode == SImode && GET_CODE (XEXP (x, 1)) == REG)
3991 *total = COSTS_N_INSNS (2) + rtx_cost (XEXP (x, 0), code);
3992 return true;
3994 /* Fall through */
3995 case ROTATERT:
3996 case ASHIFT:
3997 case LSHIFTRT:
3998 case ASHIFTRT:
3999 if (mode == DImode && GET_CODE (XEXP (x, 1)) == CONST_INT)
4001 *total = COSTS_N_INSNS (3) + rtx_cost (XEXP (x, 0), code);
4002 return true;
4004 else if (mode == SImode)
4006 *total = COSTS_N_INSNS (1) + rtx_cost (XEXP (x, 0), code);
4007 /* Slightly disparage register shifts, but not by much. */
4008 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
4009 *total += 1 + rtx_cost (XEXP (x, 1), code);
4010 return true;
4013 /* Needs a libcall. */
4014 *total = COSTS_N_INSNS (2);
4015 return false;
4017 case MINUS:
4018 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4020 *total = COSTS_N_INSNS (1);
4021 return false;
4024 if (mode == SImode)
4026 enum rtx_code subcode0 = GET_CODE (XEXP (x, 0));
4027 enum rtx_code subcode1 = GET_CODE (XEXP (x, 1));
4029 if (subcode0 == ROTATE || subcode0 == ROTATERT || subcode0 == ASHIFT
4030 || subcode0 == LSHIFTRT || subcode0 == ASHIFTRT
4031 || subcode1 == ROTATE || subcode1 == ROTATERT
4032 || subcode1 == ASHIFT || subcode1 == LSHIFTRT
4033 || subcode1 == ASHIFTRT)
4035 /* It's just the cost of the two operands. */
4036 *total = 0;
4037 return false;
4040 *total = COSTS_N_INSNS (1);
4041 return false;
4044 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4045 return false;
4047 case PLUS:
4048 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4050 *total = COSTS_N_INSNS (1);
4051 return false;
4054 /* Fall through */
4055 case AND: case XOR: case IOR:
4056 if (mode == SImode)
4058 enum rtx_code subcode = GET_CODE (XEXP (x, 0));
4060 if (subcode == ROTATE || subcode == ROTATERT || subcode == ASHIFT
4061 || subcode == LSHIFTRT || subcode == ASHIFTRT
4062 || (code == AND && subcode == NOT))
4064 /* It's just the cost of the two operands. */
4065 *total = 0;
4066 return false;
4070 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4071 return false;
4073 case MULT:
4074 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4075 return false;
4077 case NEG:
4078 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4079 *total = COSTS_N_INSNS (1);
4080 /* Fall through */
4081 case NOT:
4082 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4084 return false;
4086 case IF_THEN_ELSE:
4087 *total = 0;
4088 return false;
4090 case COMPARE:
4091 if (cc_register (XEXP (x, 0), VOIDmode))
4092 * total = 0;
4093 else
4094 *total = COSTS_N_INSNS (1);
4095 return false;
4097 case ABS:
4098 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
4099 *total = COSTS_N_INSNS (1);
4100 else
4101 *total = COSTS_N_INSNS (1 + ARM_NUM_REGS (mode));
4102 return false;
4104 case SIGN_EXTEND:
4105 *total = 0;
4106 if (GET_MODE_SIZE (GET_MODE (XEXP (x, 0))) < 4)
4108 if (!(arm_arch4 && MEM_P (XEXP (x, 0))))
4109 *total += COSTS_N_INSNS (arm_arch6 ? 1 : 2);
4111 if (mode == DImode)
4112 *total += COSTS_N_INSNS (1);
4113 return false;
4115 case ZERO_EXTEND:
4116 *total = 0;
4117 if (!(arm_arch4 && MEM_P (XEXP (x, 0))))
4119 switch (GET_MODE (XEXP (x, 0)))
4121 case QImode:
4122 *total += COSTS_N_INSNS (1);
4123 break;
4125 case HImode:
4126 *total += COSTS_N_INSNS (arm_arch6 ? 1 : 2);
4128 case SImode:
4129 break;
4131 default:
4132 *total += COSTS_N_INSNS (2);
4136 if (mode == DImode)
4137 *total += COSTS_N_INSNS (1);
4139 return false;
4141 case CONST_INT:
4142 if (const_ok_for_arm (INTVAL (x)))
4143 *total = COSTS_N_INSNS (outer_code == SET ? 1 : 0);
4144 else if (const_ok_for_arm (~INTVAL (x)))
4145 *total = COSTS_N_INSNS (outer_code == AND ? 0 : 1);
4146 else if (const_ok_for_arm (-INTVAL (x)))
4148 if (outer_code == COMPARE || outer_code == PLUS
4149 || outer_code == MINUS)
4150 *total = 0;
4151 else
4152 *total = COSTS_N_INSNS (1);
4154 else
4155 *total = COSTS_N_INSNS (2);
4156 return true;
4158 case CONST:
4159 case LABEL_REF:
4160 case SYMBOL_REF:
4161 *total = COSTS_N_INSNS (2);
4162 return true;
4164 case CONST_DOUBLE:
4165 *total = COSTS_N_INSNS (4);
4166 return true;
4168 default:
4169 if (mode != VOIDmode)
4170 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4171 else
4172 *total = COSTS_N_INSNS (4); /* How knows? */
4173 return false;
4177 /* RTX costs for cores with a slow MUL implementation. */
4179 static bool
4180 arm_slowmul_rtx_costs (rtx x, int code, int outer_code, int *total)
4182 enum machine_mode mode = GET_MODE (x);
4184 if (TARGET_THUMB)
4186 *total = thumb_rtx_costs (x, code, outer_code);
4187 return true;
4190 switch (code)
4192 case MULT:
4193 if (GET_MODE_CLASS (mode) == MODE_FLOAT
4194 || mode == DImode)
4196 *total = 30;
4197 return true;
4200 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
4202 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
4203 & (unsigned HOST_WIDE_INT) 0xffffffff);
4204 int cost, const_ok = const_ok_for_arm (i);
4205 int j, booth_unit_size;
4207 /* Tune as appropriate. */
4208 cost = const_ok ? 4 : 8;
4209 booth_unit_size = 2;
4210 for (j = 0; i && j < 32; j += booth_unit_size)
4212 i >>= booth_unit_size;
4213 cost += 2;
4216 *total = cost;
4217 return true;
4220 *total = 30 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
4221 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
4222 return true;
4224 default:
4225 *total = arm_rtx_costs_1 (x, code, outer_code);
4226 return true;
4231 /* RTX cost for cores with a fast multiply unit (M variants). */
4233 static bool
4234 arm_fastmul_rtx_costs (rtx x, int code, int outer_code, int *total)
4236 enum machine_mode mode = GET_MODE (x);
4238 if (TARGET_THUMB)
4240 *total = thumb_rtx_costs (x, code, outer_code);
4241 return true;
4244 switch (code)
4246 case MULT:
4247 /* There is no point basing this on the tuning, since it is always the
4248 fast variant if it exists at all. */
4249 if (mode == DImode
4250 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
4251 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
4252 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
4254 *total = 8;
4255 return true;
4259 if (GET_MODE_CLASS (mode) == MODE_FLOAT
4260 || mode == DImode)
4262 *total = 30;
4263 return true;
4266 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
4268 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
4269 & (unsigned HOST_WIDE_INT) 0xffffffff);
4270 int cost, const_ok = const_ok_for_arm (i);
4271 int j, booth_unit_size;
4273 /* Tune as appropriate. */
4274 cost = const_ok ? 4 : 8;
4275 booth_unit_size = 8;
4276 for (j = 0; i && j < 32; j += booth_unit_size)
4278 i >>= booth_unit_size;
4279 cost += 2;
4282 *total = cost;
4283 return true;
4286 *total = 8 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
4287 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
4288 return true;
4290 default:
4291 *total = arm_rtx_costs_1 (x, code, outer_code);
4292 return true;
4297 /* RTX cost for XScale CPUs. */
4299 static bool
4300 arm_xscale_rtx_costs (rtx x, int code, int outer_code, int *total)
4302 enum machine_mode mode = GET_MODE (x);
4304 if (TARGET_THUMB)
4306 *total = thumb_rtx_costs (x, code, outer_code);
4307 return true;
4310 switch (code)
4312 case MULT:
4313 /* There is no point basing this on the tuning, since it is always the
4314 fast variant if it exists at all. */
4315 if (mode == DImode
4316 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
4317 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
4318 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
4320 *total = 8;
4321 return true;
4325 if (GET_MODE_CLASS (mode) == MODE_FLOAT
4326 || mode == DImode)
4328 *total = 30;
4329 return true;
4332 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
4334 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
4335 & (unsigned HOST_WIDE_INT) 0xffffffff);
4336 int cost, const_ok = const_ok_for_arm (i);
4337 unsigned HOST_WIDE_INT masked_const;
4339 /* The cost will be related to two insns.
4340 First a load of the constant (MOV or LDR), then a multiply. */
4341 cost = 2;
4342 if (! const_ok)
4343 cost += 1; /* LDR is probably more expensive because
4344 of longer result latency. */
4345 masked_const = i & 0xffff8000;
4346 if (masked_const != 0 && masked_const != 0xffff8000)
4348 masked_const = i & 0xf8000000;
4349 if (masked_const == 0 || masked_const == 0xf8000000)
4350 cost += 1;
4351 else
4352 cost += 2;
4354 *total = cost;
4355 return true;
4358 *total = 8 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
4359 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
4360 return true;
4362 default:
4363 *total = arm_rtx_costs_1 (x, code, outer_code);
4364 return true;
4369 /* RTX costs for 9e (and later) cores. */
4371 static bool
4372 arm_9e_rtx_costs (rtx x, int code, int outer_code, int *total)
4374 enum machine_mode mode = GET_MODE (x);
4375 int nonreg_cost;
4376 int cost;
4378 if (TARGET_THUMB)
4380 switch (code)
4382 case MULT:
4383 *total = COSTS_N_INSNS (3);
4384 return true;
4386 default:
4387 *total = thumb_rtx_costs (x, code, outer_code);
4388 return true;
4392 switch (code)
4394 case MULT:
4395 /* There is no point basing this on the tuning, since it is always the
4396 fast variant if it exists at all. */
4397 if (mode == DImode
4398 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
4399 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
4400 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
4402 *total = 3;
4403 return true;
4407 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
4409 *total = 30;
4410 return true;
4412 if (mode == DImode)
4414 cost = 7;
4415 nonreg_cost = 8;
4417 else
4419 cost = 2;
4420 nonreg_cost = 4;
4424 *total = cost + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : nonreg_cost)
4425 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : nonreg_cost);
4426 return true;
4428 default:
4429 *total = arm_rtx_costs_1 (x, code, outer_code);
4430 return true;
4433 /* All address computations that can be done are free, but rtx cost returns
4434 the same for practically all of them. So we weight the different types
4435 of address here in the order (most pref first):
4436 PRE/POST_INC/DEC, SHIFT or NON-INT sum, INT sum, REG, MEM or LABEL. */
4437 static inline int
4438 arm_arm_address_cost (rtx x)
4440 enum rtx_code c = GET_CODE (x);
4442 if (c == PRE_INC || c == PRE_DEC || c == POST_INC || c == POST_DEC)
4443 return 0;
4444 if (c == MEM || c == LABEL_REF || c == SYMBOL_REF)
4445 return 10;
4447 if (c == PLUS || c == MINUS)
4449 if (GET_CODE (XEXP (x, 0)) == CONST_INT)
4450 return 2;
4452 if (ARITHMETIC_P (XEXP (x, 0)) || ARITHMETIC_P (XEXP (x, 1)))
4453 return 3;
4455 return 4;
4458 return 6;
4461 static inline int
4462 arm_thumb_address_cost (rtx x)
4464 enum rtx_code c = GET_CODE (x);
4466 if (c == REG)
4467 return 1;
4468 if (c == PLUS
4469 && GET_CODE (XEXP (x, 0)) == REG
4470 && GET_CODE (XEXP (x, 1)) == CONST_INT)
4471 return 1;
4473 return 2;
4476 static int
4477 arm_address_cost (rtx x)
4479 return TARGET_ARM ? arm_arm_address_cost (x) : arm_thumb_address_cost (x);
4482 static int
4483 arm_adjust_cost (rtx insn, rtx link, rtx dep, int cost)
4485 rtx i_pat, d_pat;
4487 /* Some true dependencies can have a higher cost depending
4488 on precisely how certain input operands are used. */
4489 if (arm_tune_xscale
4490 && REG_NOTE_KIND (link) == 0
4491 && recog_memoized (insn) >= 0
4492 && recog_memoized (dep) >= 0)
4494 int shift_opnum = get_attr_shift (insn);
4495 enum attr_type attr_type = get_attr_type (dep);
4497 /* If nonzero, SHIFT_OPNUM contains the operand number of a shifted
4498 operand for INSN. If we have a shifted input operand and the
4499 instruction we depend on is another ALU instruction, then we may
4500 have to account for an additional stall. */
4501 if (shift_opnum != 0
4502 && (attr_type == TYPE_ALU_SHIFT || attr_type == TYPE_ALU_SHIFT_REG))
4504 rtx shifted_operand;
4505 int opno;
4507 /* Get the shifted operand. */
4508 extract_insn (insn);
4509 shifted_operand = recog_data.operand[shift_opnum];
4511 /* Iterate over all the operands in DEP. If we write an operand
4512 that overlaps with SHIFTED_OPERAND, then we have increase the
4513 cost of this dependency. */
4514 extract_insn (dep);
4515 preprocess_constraints ();
4516 for (opno = 0; opno < recog_data.n_operands; opno++)
4518 /* We can ignore strict inputs. */
4519 if (recog_data.operand_type[opno] == OP_IN)
4520 continue;
4522 if (reg_overlap_mentioned_p (recog_data.operand[opno],
4523 shifted_operand))
4524 return 2;
4529 /* XXX This is not strictly true for the FPA. */
4530 if (REG_NOTE_KIND (link) == REG_DEP_ANTI
4531 || REG_NOTE_KIND (link) == REG_DEP_OUTPUT)
4532 return 0;
4534 /* Call insns don't incur a stall, even if they follow a load. */
4535 if (REG_NOTE_KIND (link) == 0
4536 && GET_CODE (insn) == CALL_INSN)
4537 return 1;
4539 if ((i_pat = single_set (insn)) != NULL
4540 && GET_CODE (SET_SRC (i_pat)) == MEM
4541 && (d_pat = single_set (dep)) != NULL
4542 && GET_CODE (SET_DEST (d_pat)) == MEM)
4544 rtx src_mem = XEXP (SET_SRC (i_pat), 0);
4545 /* This is a load after a store, there is no conflict if the load reads
4546 from a cached area. Assume that loads from the stack, and from the
4547 constant pool are cached, and that others will miss. This is a
4548 hack. */
4550 if ((GET_CODE (src_mem) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (src_mem))
4551 || reg_mentioned_p (stack_pointer_rtx, src_mem)
4552 || reg_mentioned_p (frame_pointer_rtx, src_mem)
4553 || reg_mentioned_p (hard_frame_pointer_rtx, src_mem))
4554 return 1;
4557 return cost;
4560 static int fp_consts_inited = 0;
4562 /* Only zero is valid for VFP. Other values are also valid for FPA. */
4563 static const char * const strings_fp[8] =
4565 "0", "1", "2", "3",
4566 "4", "5", "0.5", "10"
4569 static REAL_VALUE_TYPE values_fp[8];
4571 static void
4572 init_fp_table (void)
4574 int i;
4575 REAL_VALUE_TYPE r;
4577 if (TARGET_VFP)
4578 fp_consts_inited = 1;
4579 else
4580 fp_consts_inited = 8;
4582 for (i = 0; i < fp_consts_inited; i++)
4584 r = REAL_VALUE_ATOF (strings_fp[i], DFmode);
4585 values_fp[i] = r;
4589 /* Return TRUE if rtx X is a valid immediate FP constant. */
4591 arm_const_double_rtx (rtx x)
4593 REAL_VALUE_TYPE r;
4594 int i;
4596 if (!fp_consts_inited)
4597 init_fp_table ();
4599 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
4600 if (REAL_VALUE_MINUS_ZERO (r))
4601 return 0;
4603 for (i = 0; i < fp_consts_inited; i++)
4604 if (REAL_VALUES_EQUAL (r, values_fp[i]))
4605 return 1;
4607 return 0;
4610 /* Return TRUE if rtx X is a valid immediate FPA constant. */
4612 neg_const_double_rtx_ok_for_fpa (rtx x)
4614 REAL_VALUE_TYPE r;
4615 int i;
4617 if (!fp_consts_inited)
4618 init_fp_table ();
4620 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
4621 r = REAL_VALUE_NEGATE (r);
4622 if (REAL_VALUE_MINUS_ZERO (r))
4623 return 0;
4625 for (i = 0; i < 8; i++)
4626 if (REAL_VALUES_EQUAL (r, values_fp[i]))
4627 return 1;
4629 return 0;
4632 /* Predicates for `match_operand' and `match_operator'. */
4634 /* Return nonzero if OP is a valid Cirrus memory address pattern. */
4636 cirrus_memory_offset (rtx op)
4638 /* Reject eliminable registers. */
4639 if (! (reload_in_progress || reload_completed)
4640 && ( reg_mentioned_p (frame_pointer_rtx, op)
4641 || reg_mentioned_p (arg_pointer_rtx, op)
4642 || reg_mentioned_p (virtual_incoming_args_rtx, op)
4643 || reg_mentioned_p (virtual_outgoing_args_rtx, op)
4644 || reg_mentioned_p (virtual_stack_dynamic_rtx, op)
4645 || reg_mentioned_p (virtual_stack_vars_rtx, op)))
4646 return 0;
4648 if (GET_CODE (op) == MEM)
4650 rtx ind;
4652 ind = XEXP (op, 0);
4654 /* Match: (mem (reg)). */
4655 if (GET_CODE (ind) == REG)
4656 return 1;
4658 /* Match:
4659 (mem (plus (reg)
4660 (const))). */
4661 if (GET_CODE (ind) == PLUS
4662 && GET_CODE (XEXP (ind, 0)) == REG
4663 && REG_MODE_OK_FOR_BASE_P (XEXP (ind, 0), VOIDmode)
4664 && GET_CODE (XEXP (ind, 1)) == CONST_INT)
4665 return 1;
4668 return 0;
4671 /* Return TRUE if OP is a valid VFP memory address pattern.
4672 WB if true if writeback address modes are allowed. */
4675 arm_coproc_mem_operand (rtx op, bool wb)
4677 rtx ind;
4679 /* Reject eliminable registers. */
4680 if (! (reload_in_progress || reload_completed)
4681 && ( reg_mentioned_p (frame_pointer_rtx, op)
4682 || reg_mentioned_p (arg_pointer_rtx, op)
4683 || reg_mentioned_p (virtual_incoming_args_rtx, op)
4684 || reg_mentioned_p (virtual_outgoing_args_rtx, op)
4685 || reg_mentioned_p (virtual_stack_dynamic_rtx, op)
4686 || reg_mentioned_p (virtual_stack_vars_rtx, op)))
4687 return FALSE;
4689 /* Constants are converted into offsets from labels. */
4690 if (GET_CODE (op) != MEM)
4691 return FALSE;
4693 ind = XEXP (op, 0);
4695 if (reload_completed
4696 && (GET_CODE (ind) == LABEL_REF
4697 || (GET_CODE (ind) == CONST
4698 && GET_CODE (XEXP (ind, 0)) == PLUS
4699 && GET_CODE (XEXP (XEXP (ind, 0), 0)) == LABEL_REF
4700 && GET_CODE (XEXP (XEXP (ind, 0), 1)) == CONST_INT)))
4701 return TRUE;
4703 /* Match: (mem (reg)). */
4704 if (GET_CODE (ind) == REG)
4705 return arm_address_register_rtx_p (ind, 0);
4707 /* Autoincremment addressing modes. */
4708 if (wb
4709 && (GET_CODE (ind) == PRE_INC
4710 || GET_CODE (ind) == POST_INC
4711 || GET_CODE (ind) == PRE_DEC
4712 || GET_CODE (ind) == POST_DEC))
4713 return arm_address_register_rtx_p (XEXP (ind, 0), 0);
4715 if (wb
4716 && (GET_CODE (ind) == POST_MODIFY || GET_CODE (ind) == PRE_MODIFY)
4717 && arm_address_register_rtx_p (XEXP (ind, 0), 0)
4718 && GET_CODE (XEXP (ind, 1)) == PLUS
4719 && rtx_equal_p (XEXP (XEXP (ind, 1), 0), XEXP (ind, 0)))
4720 ind = XEXP (ind, 1);
4722 /* Match:
4723 (plus (reg)
4724 (const)). */
4725 if (GET_CODE (ind) == PLUS
4726 && GET_CODE (XEXP (ind, 0)) == REG
4727 && REG_MODE_OK_FOR_BASE_P (XEXP (ind, 0), VOIDmode)
4728 && GET_CODE (XEXP (ind, 1)) == CONST_INT
4729 && INTVAL (XEXP (ind, 1)) > -1024
4730 && INTVAL (XEXP (ind, 1)) < 1024
4731 && (INTVAL (XEXP (ind, 1)) & 3) == 0)
4732 return TRUE;
4734 return FALSE;
4738 /* Return GENERAL_REGS if a scratch register required to reload x to/from
4739 VFP registers. Otherwise return NO_REGS. */
4741 enum reg_class
4742 vfp_secondary_reload_class (enum machine_mode mode, rtx x)
4744 if (arm_coproc_mem_operand (x, FALSE) || s_register_operand (x, mode))
4745 return NO_REGS;
4747 return GENERAL_REGS;
4751 /* Returns TRUE if INSN is an "LDR REG, ADDR" instruction.
4752 Use by the Cirrus Maverick code which has to workaround
4753 a hardware bug triggered by such instructions. */
4754 static bool
4755 arm_memory_load_p (rtx insn)
4757 rtx body, lhs, rhs;;
4759 if (insn == NULL_RTX || GET_CODE (insn) != INSN)
4760 return false;
4762 body = PATTERN (insn);
4764 if (GET_CODE (body) != SET)
4765 return false;
4767 lhs = XEXP (body, 0);
4768 rhs = XEXP (body, 1);
4770 lhs = REG_OR_SUBREG_RTX (lhs);
4772 /* If the destination is not a general purpose
4773 register we do not have to worry. */
4774 if (GET_CODE (lhs) != REG
4775 || REGNO_REG_CLASS (REGNO (lhs)) != GENERAL_REGS)
4776 return false;
4778 /* As well as loads from memory we also have to react
4779 to loads of invalid constants which will be turned
4780 into loads from the minipool. */
4781 return (GET_CODE (rhs) == MEM
4782 || GET_CODE (rhs) == SYMBOL_REF
4783 || note_invalid_constants (insn, -1, false));
4786 /* Return TRUE if INSN is a Cirrus instruction. */
4787 static bool
4788 arm_cirrus_insn_p (rtx insn)
4790 enum attr_cirrus attr;
4792 /* get_attr aborts on USE and CLOBBER. */
4793 if (!insn
4794 || GET_CODE (insn) != INSN
4795 || GET_CODE (PATTERN (insn)) == USE
4796 || GET_CODE (PATTERN (insn)) == CLOBBER)
4797 return 0;
4799 attr = get_attr_cirrus (insn);
4801 return attr != CIRRUS_NOT;
4804 /* Cirrus reorg for invalid instruction combinations. */
4805 static void
4806 cirrus_reorg (rtx first)
4808 enum attr_cirrus attr;
4809 rtx body = PATTERN (first);
4810 rtx t;
4811 int nops;
4813 /* Any branch must be followed by 2 non Cirrus instructions. */
4814 if (GET_CODE (first) == JUMP_INSN && GET_CODE (body) != RETURN)
4816 nops = 0;
4817 t = next_nonnote_insn (first);
4819 if (arm_cirrus_insn_p (t))
4820 ++ nops;
4822 if (arm_cirrus_insn_p (next_nonnote_insn (t)))
4823 ++ nops;
4825 while (nops --)
4826 emit_insn_after (gen_nop (), first);
4828 return;
4831 /* (float (blah)) is in parallel with a clobber. */
4832 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) > 0)
4833 body = XVECEXP (body, 0, 0);
4835 if (GET_CODE (body) == SET)
4837 rtx lhs = XEXP (body, 0), rhs = XEXP (body, 1);
4839 /* cfldrd, cfldr64, cfstrd, cfstr64 must
4840 be followed by a non Cirrus insn. */
4841 if (get_attr_cirrus (first) == CIRRUS_DOUBLE)
4843 if (arm_cirrus_insn_p (next_nonnote_insn (first)))
4844 emit_insn_after (gen_nop (), first);
4846 return;
4848 else if (arm_memory_load_p (first))
4850 unsigned int arm_regno;
4852 /* Any ldr/cfmvdlr, ldr/cfmvdhr, ldr/cfmvsr, ldr/cfmv64lr,
4853 ldr/cfmv64hr combination where the Rd field is the same
4854 in both instructions must be split with a non Cirrus
4855 insn. Example:
4857 ldr r0, blah
4859 cfmvsr mvf0, r0. */
4861 /* Get Arm register number for ldr insn. */
4862 if (GET_CODE (lhs) == REG)
4863 arm_regno = REGNO (lhs);
4864 else if (GET_CODE (rhs) == REG)
4865 arm_regno = REGNO (rhs);
4866 else
4867 abort ();
4869 /* Next insn. */
4870 first = next_nonnote_insn (first);
4872 if (! arm_cirrus_insn_p (first))
4873 return;
4875 body = PATTERN (first);
4877 /* (float (blah)) is in parallel with a clobber. */
4878 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0))
4879 body = XVECEXP (body, 0, 0);
4881 if (GET_CODE (body) == FLOAT)
4882 body = XEXP (body, 0);
4884 if (get_attr_cirrus (first) == CIRRUS_MOVE
4885 && GET_CODE (XEXP (body, 1)) == REG
4886 && arm_regno == REGNO (XEXP (body, 1)))
4887 emit_insn_after (gen_nop (), first);
4889 return;
4893 /* get_attr aborts on USE and CLOBBER. */
4894 if (!first
4895 || GET_CODE (first) != INSN
4896 || GET_CODE (PATTERN (first)) == USE
4897 || GET_CODE (PATTERN (first)) == CLOBBER)
4898 return;
4900 attr = get_attr_cirrus (first);
4902 /* Any coprocessor compare instruction (cfcmps, cfcmpd, ...)
4903 must be followed by a non-coprocessor instruction. */
4904 if (attr == CIRRUS_COMPARE)
4906 nops = 0;
4908 t = next_nonnote_insn (first);
4910 if (arm_cirrus_insn_p (t))
4911 ++ nops;
4913 if (arm_cirrus_insn_p (next_nonnote_insn (t)))
4914 ++ nops;
4916 while (nops --)
4917 emit_insn_after (gen_nop (), first);
4919 return;
4923 /* Return TRUE if X references a SYMBOL_REF. */
4925 symbol_mentioned_p (rtx x)
4927 const char * fmt;
4928 int i;
4930 if (GET_CODE (x) == SYMBOL_REF)
4931 return 1;
4933 fmt = GET_RTX_FORMAT (GET_CODE (x));
4935 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
4937 if (fmt[i] == 'E')
4939 int j;
4941 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
4942 if (symbol_mentioned_p (XVECEXP (x, i, j)))
4943 return 1;
4945 else if (fmt[i] == 'e' && symbol_mentioned_p (XEXP (x, i)))
4946 return 1;
4949 return 0;
4952 /* Return TRUE if X references a LABEL_REF. */
4954 label_mentioned_p (rtx x)
4956 const char * fmt;
4957 int i;
4959 if (GET_CODE (x) == LABEL_REF)
4960 return 1;
4962 fmt = GET_RTX_FORMAT (GET_CODE (x));
4963 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
4965 if (fmt[i] == 'E')
4967 int j;
4969 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
4970 if (label_mentioned_p (XVECEXP (x, i, j)))
4971 return 1;
4973 else if (fmt[i] == 'e' && label_mentioned_p (XEXP (x, i)))
4974 return 1;
4977 return 0;
4980 enum rtx_code
4981 minmax_code (rtx x)
4983 enum rtx_code code = GET_CODE (x);
4985 if (code == SMAX)
4986 return GE;
4987 else if (code == SMIN)
4988 return LE;
4989 else if (code == UMIN)
4990 return LEU;
4991 else if (code == UMAX)
4992 return GEU;
4994 abort ();
4997 /* Return 1 if memory locations are adjacent. */
4999 adjacent_mem_locations (rtx a, rtx b)
5001 if ((GET_CODE (XEXP (a, 0)) == REG
5002 || (GET_CODE (XEXP (a, 0)) == PLUS
5003 && GET_CODE (XEXP (XEXP (a, 0), 1)) == CONST_INT))
5004 && (GET_CODE (XEXP (b, 0)) == REG
5005 || (GET_CODE (XEXP (b, 0)) == PLUS
5006 && GET_CODE (XEXP (XEXP (b, 0), 1)) == CONST_INT)))
5008 int val0 = 0, val1 = 0;
5009 int reg0, reg1;
5011 if (GET_CODE (XEXP (a, 0)) == PLUS)
5013 reg0 = REGNO (XEXP (XEXP (a, 0), 0));
5014 val0 = INTVAL (XEXP (XEXP (a, 0), 1));
5016 else
5017 reg0 = REGNO (XEXP (a, 0));
5019 if (GET_CODE (XEXP (b, 0)) == PLUS)
5021 reg1 = REGNO (XEXP (XEXP (b, 0), 0));
5022 val1 = INTVAL (XEXP (XEXP (b, 0), 1));
5024 else
5025 reg1 = REGNO (XEXP (b, 0));
5027 /* Don't accept any offset that will require multiple
5028 instructions to handle, since this would cause the
5029 arith_adjacentmem pattern to output an overlong sequence. */
5030 if (!const_ok_for_op (PLUS, val0) || !const_ok_for_op (PLUS, val1))
5031 return 0;
5033 return (reg0 == reg1) && ((val1 - val0) == 4 || (val0 - val1) == 4);
5035 return 0;
5039 load_multiple_sequence (rtx *operands, int nops, int *regs, int *base,
5040 HOST_WIDE_INT *load_offset)
5042 int unsorted_regs[4];
5043 HOST_WIDE_INT unsorted_offsets[4];
5044 int order[4];
5045 int base_reg = -1;
5046 int i;
5048 /* Can only handle 2, 3, or 4 insns at present,
5049 though could be easily extended if required. */
5050 if (nops < 2 || nops > 4)
5051 abort ();
5053 /* Loop over the operands and check that the memory references are
5054 suitable (i.e. immediate offsets from the same base register). At
5055 the same time, extract the target register, and the memory
5056 offsets. */
5057 for (i = 0; i < nops; i++)
5059 rtx reg;
5060 rtx offset;
5062 /* Convert a subreg of a mem into the mem itself. */
5063 if (GET_CODE (operands[nops + i]) == SUBREG)
5064 operands[nops + i] = alter_subreg (operands + (nops + i));
5066 if (GET_CODE (operands[nops + i]) != MEM)
5067 abort ();
5069 /* Don't reorder volatile memory references; it doesn't seem worth
5070 looking for the case where the order is ok anyway. */
5071 if (MEM_VOLATILE_P (operands[nops + i]))
5072 return 0;
5074 offset = const0_rtx;
5076 if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG
5077 || (GET_CODE (reg) == SUBREG
5078 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5079 || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
5080 && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0))
5081 == REG)
5082 || (GET_CODE (reg) == SUBREG
5083 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5084 && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1))
5085 == CONST_INT)))
5087 if (i == 0)
5089 base_reg = REGNO (reg);
5090 unsorted_regs[0] = (GET_CODE (operands[i]) == REG
5091 ? REGNO (operands[i])
5092 : REGNO (SUBREG_REG (operands[i])));
5093 order[0] = 0;
5095 else
5097 if (base_reg != (int) REGNO (reg))
5098 /* Not addressed from the same base register. */
5099 return 0;
5101 unsorted_regs[i] = (GET_CODE (operands[i]) == REG
5102 ? REGNO (operands[i])
5103 : REGNO (SUBREG_REG (operands[i])));
5104 if (unsorted_regs[i] < unsorted_regs[order[0]])
5105 order[0] = i;
5108 /* If it isn't an integer register, or if it overwrites the
5109 base register but isn't the last insn in the list, then
5110 we can't do this. */
5111 if (unsorted_regs[i] < 0 || unsorted_regs[i] > 14
5112 || (i != nops - 1 && unsorted_regs[i] == base_reg))
5113 return 0;
5115 unsorted_offsets[i] = INTVAL (offset);
5117 else
5118 /* Not a suitable memory address. */
5119 return 0;
5122 /* All the useful information has now been extracted from the
5123 operands into unsorted_regs and unsorted_offsets; additionally,
5124 order[0] has been set to the lowest numbered register in the
5125 list. Sort the registers into order, and check that the memory
5126 offsets are ascending and adjacent. */
5128 for (i = 1; i < nops; i++)
5130 int j;
5132 order[i] = order[i - 1];
5133 for (j = 0; j < nops; j++)
5134 if (unsorted_regs[j] > unsorted_regs[order[i - 1]]
5135 && (order[i] == order[i - 1]
5136 || unsorted_regs[j] < unsorted_regs[order[i]]))
5137 order[i] = j;
5139 /* Have we found a suitable register? if not, one must be used more
5140 than once. */
5141 if (order[i] == order[i - 1])
5142 return 0;
5144 /* Is the memory address adjacent and ascending? */
5145 if (unsorted_offsets[order[i]] != unsorted_offsets[order[i - 1]] + 4)
5146 return 0;
5149 if (base)
5151 *base = base_reg;
5153 for (i = 0; i < nops; i++)
5154 regs[i] = unsorted_regs[order[i]];
5156 *load_offset = unsorted_offsets[order[0]];
5159 if (unsorted_offsets[order[0]] == 0)
5160 return 1; /* ldmia */
5162 if (unsorted_offsets[order[0]] == 4)
5163 return 2; /* ldmib */
5165 if (unsorted_offsets[order[nops - 1]] == 0)
5166 return 3; /* ldmda */
5168 if (unsorted_offsets[order[nops - 1]] == -4)
5169 return 4; /* ldmdb */
5171 /* For ARM8,9 & StrongARM, 2 ldr instructions are faster than an ldm
5172 if the offset isn't small enough. The reason 2 ldrs are faster
5173 is because these ARMs are able to do more than one cache access
5174 in a single cycle. The ARM9 and StrongARM have Harvard caches,
5175 whilst the ARM8 has a double bandwidth cache. This means that
5176 these cores can do both an instruction fetch and a data fetch in
5177 a single cycle, so the trick of calculating the address into a
5178 scratch register (one of the result regs) and then doing a load
5179 multiple actually becomes slower (and no smaller in code size).
5180 That is the transformation
5182 ldr rd1, [rbase + offset]
5183 ldr rd2, [rbase + offset + 4]
5187 add rd1, rbase, offset
5188 ldmia rd1, {rd1, rd2}
5190 produces worse code -- '3 cycles + any stalls on rd2' instead of
5191 '2 cycles + any stalls on rd2'. On ARMs with only one cache
5192 access per cycle, the first sequence could never complete in less
5193 than 6 cycles, whereas the ldm sequence would only take 5 and
5194 would make better use of sequential accesses if not hitting the
5195 cache.
5197 We cheat here and test 'arm_ld_sched' which we currently know to
5198 only be true for the ARM8, ARM9 and StrongARM. If this ever
5199 changes, then the test below needs to be reworked. */
5200 if (nops == 2 && arm_ld_sched)
5201 return 0;
5203 /* Can't do it without setting up the offset, only do this if it takes
5204 no more than one insn. */
5205 return (const_ok_for_arm (unsorted_offsets[order[0]])
5206 || const_ok_for_arm (-unsorted_offsets[order[0]])) ? 5 : 0;
5209 const char *
5210 emit_ldm_seq (rtx *operands, int nops)
5212 int regs[4];
5213 int base_reg;
5214 HOST_WIDE_INT offset;
5215 char buf[100];
5216 int i;
5218 switch (load_multiple_sequence (operands, nops, regs, &base_reg, &offset))
5220 case 1:
5221 strcpy (buf, "ldm%?ia\t");
5222 break;
5224 case 2:
5225 strcpy (buf, "ldm%?ib\t");
5226 break;
5228 case 3:
5229 strcpy (buf, "ldm%?da\t");
5230 break;
5232 case 4:
5233 strcpy (buf, "ldm%?db\t");
5234 break;
5236 case 5:
5237 if (offset >= 0)
5238 sprintf (buf, "add%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX,
5239 reg_names[regs[0]], REGISTER_PREFIX, reg_names[base_reg],
5240 (long) offset);
5241 else
5242 sprintf (buf, "sub%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX,
5243 reg_names[regs[0]], REGISTER_PREFIX, reg_names[base_reg],
5244 (long) -offset);
5245 output_asm_insn (buf, operands);
5246 base_reg = regs[0];
5247 strcpy (buf, "ldm%?ia\t");
5248 break;
5250 default:
5251 abort ();
5254 sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
5255 reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
5257 for (i = 1; i < nops; i++)
5258 sprintf (buf + strlen (buf), ", %s%s", REGISTER_PREFIX,
5259 reg_names[regs[i]]);
5261 strcat (buf, "}\t%@ phole ldm");
5263 output_asm_insn (buf, operands);
5264 return "";
5268 store_multiple_sequence (rtx *operands, int nops, int *regs, int *base,
5269 HOST_WIDE_INT * load_offset)
5271 int unsorted_regs[4];
5272 HOST_WIDE_INT unsorted_offsets[4];
5273 int order[4];
5274 int base_reg = -1;
5275 int i;
5277 /* Can only handle 2, 3, or 4 insns at present, though could be easily
5278 extended if required. */
5279 if (nops < 2 || nops > 4)
5280 abort ();
5282 /* Loop over the operands and check that the memory references are
5283 suitable (i.e. immediate offsets from the same base register). At
5284 the same time, extract the target register, and the memory
5285 offsets. */
5286 for (i = 0; i < nops; i++)
5288 rtx reg;
5289 rtx offset;
5291 /* Convert a subreg of a mem into the mem itself. */
5292 if (GET_CODE (operands[nops + i]) == SUBREG)
5293 operands[nops + i] = alter_subreg (operands + (nops + i));
5295 if (GET_CODE (operands[nops + i]) != MEM)
5296 abort ();
5298 /* Don't reorder volatile memory references; it doesn't seem worth
5299 looking for the case where the order is ok anyway. */
5300 if (MEM_VOLATILE_P (operands[nops + i]))
5301 return 0;
5303 offset = const0_rtx;
5305 if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG
5306 || (GET_CODE (reg) == SUBREG
5307 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5308 || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
5309 && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0))
5310 == REG)
5311 || (GET_CODE (reg) == SUBREG
5312 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5313 && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1))
5314 == CONST_INT)))
5316 if (i == 0)
5318 base_reg = REGNO (reg);
5319 unsorted_regs[0] = (GET_CODE (operands[i]) == REG
5320 ? REGNO (operands[i])
5321 : REGNO (SUBREG_REG (operands[i])));
5322 order[0] = 0;
5324 else
5326 if (base_reg != (int) REGNO (reg))
5327 /* Not addressed from the same base register. */
5328 return 0;
5330 unsorted_regs[i] = (GET_CODE (operands[i]) == REG
5331 ? REGNO (operands[i])
5332 : REGNO (SUBREG_REG (operands[i])));
5333 if (unsorted_regs[i] < unsorted_regs[order[0]])
5334 order[0] = i;
5337 /* If it isn't an integer register, then we can't do this. */
5338 if (unsorted_regs[i] < 0 || unsorted_regs[i] > 14)
5339 return 0;
5341 unsorted_offsets[i] = INTVAL (offset);
5343 else
5344 /* Not a suitable memory address. */
5345 return 0;
5348 /* All the useful information has now been extracted from the
5349 operands into unsorted_regs and unsorted_offsets; additionally,
5350 order[0] has been set to the lowest numbered register in the
5351 list. Sort the registers into order, and check that the memory
5352 offsets are ascending and adjacent. */
5354 for (i = 1; i < nops; i++)
5356 int j;
5358 order[i] = order[i - 1];
5359 for (j = 0; j < nops; j++)
5360 if (unsorted_regs[j] > unsorted_regs[order[i - 1]]
5361 && (order[i] == order[i - 1]
5362 || unsorted_regs[j] < unsorted_regs[order[i]]))
5363 order[i] = j;
5365 /* Have we found a suitable register? if not, one must be used more
5366 than once. */
5367 if (order[i] == order[i - 1])
5368 return 0;
5370 /* Is the memory address adjacent and ascending? */
5371 if (unsorted_offsets[order[i]] != unsorted_offsets[order[i - 1]] + 4)
5372 return 0;
5375 if (base)
5377 *base = base_reg;
5379 for (i = 0; i < nops; i++)
5380 regs[i] = unsorted_regs[order[i]];
5382 *load_offset = unsorted_offsets[order[0]];
5385 if (unsorted_offsets[order[0]] == 0)
5386 return 1; /* stmia */
5388 if (unsorted_offsets[order[0]] == 4)
5389 return 2; /* stmib */
5391 if (unsorted_offsets[order[nops - 1]] == 0)
5392 return 3; /* stmda */
5394 if (unsorted_offsets[order[nops - 1]] == -4)
5395 return 4; /* stmdb */
5397 return 0;
5400 const char *
5401 emit_stm_seq (rtx *operands, int nops)
5403 int regs[4];
5404 int base_reg;
5405 HOST_WIDE_INT offset;
5406 char buf[100];
5407 int i;
5409 switch (store_multiple_sequence (operands, nops, regs, &base_reg, &offset))
5411 case 1:
5412 strcpy (buf, "stm%?ia\t");
5413 break;
5415 case 2:
5416 strcpy (buf, "stm%?ib\t");
5417 break;
5419 case 3:
5420 strcpy (buf, "stm%?da\t");
5421 break;
5423 case 4:
5424 strcpy (buf, "stm%?db\t");
5425 break;
5427 default:
5428 abort ();
5431 sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
5432 reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
5434 for (i = 1; i < nops; i++)
5435 sprintf (buf + strlen (buf), ", %s%s", REGISTER_PREFIX,
5436 reg_names[regs[i]]);
5438 strcat (buf, "}\t%@ phole stm");
5440 output_asm_insn (buf, operands);
5441 return "";
5445 /* Routines for use in generating RTL. */
5448 arm_gen_load_multiple (int base_regno, int count, rtx from, int up,
5449 int write_back, rtx basemem, HOST_WIDE_INT *offsetp)
5451 HOST_WIDE_INT offset = *offsetp;
5452 int i = 0, j;
5453 rtx result;
5454 int sign = up ? 1 : -1;
5455 rtx mem, addr;
5457 /* XScale has load-store double instructions, but they have stricter
5458 alignment requirements than load-store multiple, so we cannot
5459 use them.
5461 For XScale ldm requires 2 + NREGS cycles to complete and blocks
5462 the pipeline until completion.
5464 NREGS CYCLES
5470 An ldr instruction takes 1-3 cycles, but does not block the
5471 pipeline.
5473 NREGS CYCLES
5474 1 1-3
5475 2 2-6
5476 3 3-9
5477 4 4-12
5479 Best case ldr will always win. However, the more ldr instructions
5480 we issue, the less likely we are to be able to schedule them well.
5481 Using ldr instructions also increases code size.
5483 As a compromise, we use ldr for counts of 1 or 2 regs, and ldm
5484 for counts of 3 or 4 regs. */
5485 if (arm_tune_xscale && count <= 2 && ! optimize_size)
5487 rtx seq;
5489 start_sequence ();
5491 for (i = 0; i < count; i++)
5493 addr = plus_constant (from, i * 4 * sign);
5494 mem = adjust_automodify_address (basemem, SImode, addr, offset);
5495 emit_move_insn (gen_rtx_REG (SImode, base_regno + i), mem);
5496 offset += 4 * sign;
5499 if (write_back)
5501 emit_move_insn (from, plus_constant (from, count * 4 * sign));
5502 *offsetp = offset;
5505 seq = get_insns ();
5506 end_sequence ();
5508 return seq;
5511 result = gen_rtx_PARALLEL (VOIDmode,
5512 rtvec_alloc (count + (write_back ? 1 : 0)));
5513 if (write_back)
5515 XVECEXP (result, 0, 0)
5516 = gen_rtx_SET (GET_MODE (from), from,
5517 plus_constant (from, count * 4 * sign));
5518 i = 1;
5519 count++;
5522 for (j = 0; i < count; i++, j++)
5524 addr = plus_constant (from, j * 4 * sign);
5525 mem = adjust_automodify_address_nv (basemem, SImode, addr, offset);
5526 XVECEXP (result, 0, i)
5527 = gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, base_regno + j), mem);
5528 offset += 4 * sign;
5531 if (write_back)
5532 *offsetp = offset;
5534 return result;
5538 arm_gen_store_multiple (int base_regno, int count, rtx to, int up,
5539 int write_back, rtx basemem, HOST_WIDE_INT *offsetp)
5541 HOST_WIDE_INT offset = *offsetp;
5542 int i = 0, j;
5543 rtx result;
5544 int sign = up ? 1 : -1;
5545 rtx mem, addr;
5547 /* See arm_gen_load_multiple for discussion of
5548 the pros/cons of ldm/stm usage for XScale. */
5549 if (arm_tune_xscale && count <= 2 && ! optimize_size)
5551 rtx seq;
5553 start_sequence ();
5555 for (i = 0; i < count; i++)
5557 addr = plus_constant (to, i * 4 * sign);
5558 mem = adjust_automodify_address (basemem, SImode, addr, offset);
5559 emit_move_insn (mem, gen_rtx_REG (SImode, base_regno + i));
5560 offset += 4 * sign;
5563 if (write_back)
5565 emit_move_insn (to, plus_constant (to, count * 4 * sign));
5566 *offsetp = offset;
5569 seq = get_insns ();
5570 end_sequence ();
5572 return seq;
5575 result = gen_rtx_PARALLEL (VOIDmode,
5576 rtvec_alloc (count + (write_back ? 1 : 0)));
5577 if (write_back)
5579 XVECEXP (result, 0, 0)
5580 = gen_rtx_SET (GET_MODE (to), to,
5581 plus_constant (to, count * 4 * sign));
5582 i = 1;
5583 count++;
5586 for (j = 0; i < count; i++, j++)
5588 addr = plus_constant (to, j * 4 * sign);
5589 mem = adjust_automodify_address_nv (basemem, SImode, addr, offset);
5590 XVECEXP (result, 0, i)
5591 = gen_rtx_SET (VOIDmode, mem, gen_rtx_REG (SImode, base_regno + j));
5592 offset += 4 * sign;
5595 if (write_back)
5596 *offsetp = offset;
5598 return result;
5602 arm_gen_movmemqi (rtx *operands)
5604 HOST_WIDE_INT in_words_to_go, out_words_to_go, last_bytes;
5605 HOST_WIDE_INT srcoffset, dstoffset;
5606 int i;
5607 rtx src, dst, srcbase, dstbase;
5608 rtx part_bytes_reg = NULL;
5609 rtx mem;
5611 if (GET_CODE (operands[2]) != CONST_INT
5612 || GET_CODE (operands[3]) != CONST_INT
5613 || INTVAL (operands[2]) > 64
5614 || INTVAL (operands[3]) & 3)
5615 return 0;
5617 dstbase = operands[0];
5618 srcbase = operands[1];
5620 dst = copy_to_mode_reg (SImode, XEXP (dstbase, 0));
5621 src = copy_to_mode_reg (SImode, XEXP (srcbase, 0));
5623 in_words_to_go = ARM_NUM_INTS (INTVAL (operands[2]));
5624 out_words_to_go = INTVAL (operands[2]) / 4;
5625 last_bytes = INTVAL (operands[2]) & 3;
5626 dstoffset = srcoffset = 0;
5628 if (out_words_to_go != in_words_to_go && ((in_words_to_go - 1) & 3) != 0)
5629 part_bytes_reg = gen_rtx_REG (SImode, (in_words_to_go - 1) & 3);
5631 for (i = 0; in_words_to_go >= 2; i+=4)
5633 if (in_words_to_go > 4)
5634 emit_insn (arm_gen_load_multiple (0, 4, src, TRUE, TRUE,
5635 srcbase, &srcoffset));
5636 else
5637 emit_insn (arm_gen_load_multiple (0, in_words_to_go, src, TRUE,
5638 FALSE, srcbase, &srcoffset));
5640 if (out_words_to_go)
5642 if (out_words_to_go > 4)
5643 emit_insn (arm_gen_store_multiple (0, 4, dst, TRUE, TRUE,
5644 dstbase, &dstoffset));
5645 else if (out_words_to_go != 1)
5646 emit_insn (arm_gen_store_multiple (0, out_words_to_go,
5647 dst, TRUE,
5648 (last_bytes == 0
5649 ? FALSE : TRUE),
5650 dstbase, &dstoffset));
5651 else
5653 mem = adjust_automodify_address (dstbase, SImode, dst, dstoffset);
5654 emit_move_insn (mem, gen_rtx_REG (SImode, 0));
5655 if (last_bytes != 0)
5657 emit_insn (gen_addsi3 (dst, dst, GEN_INT (4)));
5658 dstoffset += 4;
5663 in_words_to_go -= in_words_to_go < 4 ? in_words_to_go : 4;
5664 out_words_to_go -= out_words_to_go < 4 ? out_words_to_go : 4;
5667 /* OUT_WORDS_TO_GO will be zero here if there are byte stores to do. */
5668 if (out_words_to_go)
5670 rtx sreg;
5672 mem = adjust_automodify_address (srcbase, SImode, src, srcoffset);
5673 sreg = copy_to_reg (mem);
5675 mem = adjust_automodify_address (dstbase, SImode, dst, dstoffset);
5676 emit_move_insn (mem, sreg);
5677 in_words_to_go--;
5679 if (in_words_to_go) /* Sanity check */
5680 abort ();
5683 if (in_words_to_go)
5685 if (in_words_to_go < 0)
5686 abort ();
5688 mem = adjust_automodify_address (srcbase, SImode, src, srcoffset);
5689 part_bytes_reg = copy_to_mode_reg (SImode, mem);
5692 if (last_bytes && part_bytes_reg == NULL)
5693 abort ();
5695 if (BYTES_BIG_ENDIAN && last_bytes)
5697 rtx tmp = gen_reg_rtx (SImode);
5699 /* The bytes we want are in the top end of the word. */
5700 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg,
5701 GEN_INT (8 * (4 - last_bytes))));
5702 part_bytes_reg = tmp;
5704 while (last_bytes)
5706 mem = adjust_automodify_address (dstbase, QImode,
5707 plus_constant (dst, last_bytes - 1),
5708 dstoffset + last_bytes - 1);
5709 emit_move_insn (mem, gen_lowpart (QImode, part_bytes_reg));
5711 if (--last_bytes)
5713 tmp = gen_reg_rtx (SImode);
5714 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (8)));
5715 part_bytes_reg = tmp;
5720 else
5722 if (last_bytes > 1)
5724 mem = adjust_automodify_address (dstbase, HImode, dst, dstoffset);
5725 emit_move_insn (mem, gen_lowpart (HImode, part_bytes_reg));
5726 last_bytes -= 2;
5727 if (last_bytes)
5729 rtx tmp = gen_reg_rtx (SImode);
5730 emit_insn (gen_addsi3 (dst, dst, const2_rtx));
5731 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (16)));
5732 part_bytes_reg = tmp;
5733 dstoffset += 2;
5737 if (last_bytes)
5739 mem = adjust_automodify_address (dstbase, QImode, dst, dstoffset);
5740 emit_move_insn (mem, gen_lowpart (QImode, part_bytes_reg));
5744 return 1;
5747 /* Generate a memory reference for a half word, such that it will be loaded
5748 into the top 16 bits of the word. We can assume that the address is
5749 known to be alignable and of the form reg, or plus (reg, const). */
5752 arm_gen_rotated_half_load (rtx memref)
5754 HOST_WIDE_INT offset = 0;
5755 rtx base = XEXP (memref, 0);
5757 if (GET_CODE (base) == PLUS)
5759 offset = INTVAL (XEXP (base, 1));
5760 base = XEXP (base, 0);
5763 /* If we aren't allowed to generate unaligned addresses, then fail. */
5764 if ((BYTES_BIG_ENDIAN ? 1 : 0) ^ ((offset & 2) == 0))
5765 return NULL;
5767 base = gen_rtx_MEM (SImode, plus_constant (base, offset & ~2));
5769 if ((BYTES_BIG_ENDIAN ? 1 : 0) ^ ((offset & 2) == 2))
5770 return base;
5772 return gen_rtx_ROTATE (SImode, base, GEN_INT (16));
5775 /* Select a dominance comparison mode if possible for a test of the general
5776 form (OP (COND_OR (X) (Y)) (const_int 0)). We support three forms.
5777 COND_OR == DOM_CC_X_AND_Y => (X && Y)
5778 COND_OR == DOM_CC_NX_OR_Y => ((! X) || Y)
5779 COND_OR == DOM_CC_X_OR_Y => (X || Y)
5780 In all cases OP will be either EQ or NE, but we don't need to know which
5781 here. If we are unable to support a dominance comparison we return
5782 CC mode. This will then fail to match for the RTL expressions that
5783 generate this call. */
5784 enum machine_mode
5785 arm_select_dominance_cc_mode (rtx x, rtx y, HOST_WIDE_INT cond_or)
5787 enum rtx_code cond1, cond2;
5788 int swapped = 0;
5790 /* Currently we will probably get the wrong result if the individual
5791 comparisons are not simple. This also ensures that it is safe to
5792 reverse a comparison if necessary. */
5793 if ((arm_select_cc_mode (cond1 = GET_CODE (x), XEXP (x, 0), XEXP (x, 1))
5794 != CCmode)
5795 || (arm_select_cc_mode (cond2 = GET_CODE (y), XEXP (y, 0), XEXP (y, 1))
5796 != CCmode))
5797 return CCmode;
5799 /* The if_then_else variant of this tests the second condition if the
5800 first passes, but is true if the first fails. Reverse the first
5801 condition to get a true "inclusive-or" expression. */
5802 if (cond_or == DOM_CC_NX_OR_Y)
5803 cond1 = reverse_condition (cond1);
5805 /* If the comparisons are not equal, and one doesn't dominate the other,
5806 then we can't do this. */
5807 if (cond1 != cond2
5808 && !comparison_dominates_p (cond1, cond2)
5809 && (swapped = 1, !comparison_dominates_p (cond2, cond1)))
5810 return CCmode;
5812 if (swapped)
5814 enum rtx_code temp = cond1;
5815 cond1 = cond2;
5816 cond2 = temp;
5819 switch (cond1)
5821 case EQ:
5822 if (cond2 == EQ || cond_or == DOM_CC_X_AND_Y)
5823 return CC_DEQmode;
5825 switch (cond2)
5827 case LE: return CC_DLEmode;
5828 case LEU: return CC_DLEUmode;
5829 case GE: return CC_DGEmode;
5830 case GEU: return CC_DGEUmode;
5831 default: break;
5834 break;
5836 case LT:
5837 if (cond2 == LT || cond_or == DOM_CC_X_AND_Y)
5838 return CC_DLTmode;
5839 if (cond2 == LE)
5840 return CC_DLEmode;
5841 if (cond2 == NE)
5842 return CC_DNEmode;
5843 break;
5845 case GT:
5846 if (cond2 == GT || cond_or == DOM_CC_X_AND_Y)
5847 return CC_DGTmode;
5848 if (cond2 == GE)
5849 return CC_DGEmode;
5850 if (cond2 == NE)
5851 return CC_DNEmode;
5852 break;
5854 case LTU:
5855 if (cond2 == LTU || cond_or == DOM_CC_X_AND_Y)
5856 return CC_DLTUmode;
5857 if (cond2 == LEU)
5858 return CC_DLEUmode;
5859 if (cond2 == NE)
5860 return CC_DNEmode;
5861 break;
5863 case GTU:
5864 if (cond2 == GTU || cond_or == DOM_CC_X_AND_Y)
5865 return CC_DGTUmode;
5866 if (cond2 == GEU)
5867 return CC_DGEUmode;
5868 if (cond2 == NE)
5869 return CC_DNEmode;
5870 break;
5872 /* The remaining cases only occur when both comparisons are the
5873 same. */
5874 case NE:
5875 return CC_DNEmode;
5877 case LE:
5878 return CC_DLEmode;
5880 case GE:
5881 return CC_DGEmode;
5883 case LEU:
5884 return CC_DLEUmode;
5886 case GEU:
5887 return CC_DGEUmode;
5889 default:
5890 break;
5893 abort ();
5896 enum machine_mode
5897 arm_select_cc_mode (enum rtx_code op, rtx x, rtx y)
5899 /* All floating point compares return CCFP if it is an equality
5900 comparison, and CCFPE otherwise. */
5901 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
5903 switch (op)
5905 case EQ:
5906 case NE:
5907 case UNORDERED:
5908 case ORDERED:
5909 case UNLT:
5910 case UNLE:
5911 case UNGT:
5912 case UNGE:
5913 case UNEQ:
5914 case LTGT:
5915 return CCFPmode;
5917 case LT:
5918 case LE:
5919 case GT:
5920 case GE:
5921 if (TARGET_HARD_FLOAT && TARGET_MAVERICK)
5922 return CCFPmode;
5923 return CCFPEmode;
5925 default:
5926 abort ();
5930 /* A compare with a shifted operand. Because of canonicalization, the
5931 comparison will have to be swapped when we emit the assembler. */
5932 if (GET_MODE (y) == SImode && GET_CODE (y) == REG
5933 && (GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
5934 || GET_CODE (x) == LSHIFTRT || GET_CODE (x) == ROTATE
5935 || GET_CODE (x) == ROTATERT))
5936 return CC_SWPmode;
5938 /* This is a special case that is used by combine to allow a
5939 comparison of a shifted byte load to be split into a zero-extend
5940 followed by a comparison of the shifted integer (only valid for
5941 equalities and unsigned inequalities). */
5942 if (GET_MODE (x) == SImode
5943 && GET_CODE (x) == ASHIFT
5944 && GET_CODE (XEXP (x, 1)) == CONST_INT && INTVAL (XEXP (x, 1)) == 24
5945 && GET_CODE (XEXP (x, 0)) == SUBREG
5946 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == MEM
5947 && GET_MODE (SUBREG_REG (XEXP (x, 0))) == QImode
5948 && (op == EQ || op == NE
5949 || op == GEU || op == GTU || op == LTU || op == LEU)
5950 && GET_CODE (y) == CONST_INT)
5951 return CC_Zmode;
5953 /* A construct for a conditional compare, if the false arm contains
5954 0, then both conditions must be true, otherwise either condition
5955 must be true. Not all conditions are possible, so CCmode is
5956 returned if it can't be done. */
5957 if (GET_CODE (x) == IF_THEN_ELSE
5958 && (XEXP (x, 2) == const0_rtx
5959 || XEXP (x, 2) == const1_rtx)
5960 && COMPARISON_P (XEXP (x, 0))
5961 && COMPARISON_P (XEXP (x, 1)))
5962 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
5963 INTVAL (XEXP (x, 2)));
5965 /* Alternate canonicalizations of the above. These are somewhat cleaner. */
5966 if (GET_CODE (x) == AND
5967 && COMPARISON_P (XEXP (x, 0))
5968 && COMPARISON_P (XEXP (x, 1)))
5969 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
5970 DOM_CC_X_AND_Y);
5972 if (GET_CODE (x) == IOR
5973 && COMPARISON_P (XEXP (x, 0))
5974 && COMPARISON_P (XEXP (x, 1)))
5975 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
5976 DOM_CC_X_OR_Y);
5978 /* An operation (on Thumb) where we want to test for a single bit.
5979 This is done by shifting that bit up into the top bit of a
5980 scratch register; we can then branch on the sign bit. */
5981 if (TARGET_THUMB
5982 && GET_MODE (x) == SImode
5983 && (op == EQ || op == NE)
5984 && (GET_CODE (x) == ZERO_EXTRACT))
5985 return CC_Nmode;
5987 /* An operation that sets the condition codes as a side-effect, the
5988 V flag is not set correctly, so we can only use comparisons where
5989 this doesn't matter. (For LT and GE we can use "mi" and "pl"
5990 instead.) */
5991 if (GET_MODE (x) == SImode
5992 && y == const0_rtx
5993 && (op == EQ || op == NE || op == LT || op == GE)
5994 && (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
5995 || GET_CODE (x) == AND || GET_CODE (x) == IOR
5996 || GET_CODE (x) == XOR || GET_CODE (x) == MULT
5997 || GET_CODE (x) == NOT || GET_CODE (x) == NEG
5998 || GET_CODE (x) == LSHIFTRT
5999 || GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
6000 || GET_CODE (x) == ROTATERT
6001 || (TARGET_ARM && GET_CODE (x) == ZERO_EXTRACT)))
6002 return CC_NOOVmode;
6004 if (GET_MODE (x) == QImode && (op == EQ || op == NE))
6005 return CC_Zmode;
6007 if (GET_MODE (x) == SImode && (op == LTU || op == GEU)
6008 && GET_CODE (x) == PLUS
6009 && (rtx_equal_p (XEXP (x, 0), y) || rtx_equal_p (XEXP (x, 1), y)))
6010 return CC_Cmode;
6012 return CCmode;
6015 /* X and Y are two things to compare using CODE. Emit the compare insn and
6016 return the rtx for register 0 in the proper mode. FP means this is a
6017 floating point compare: I don't think that it is needed on the arm. */
6019 arm_gen_compare_reg (enum rtx_code code, rtx x, rtx y)
6021 enum machine_mode mode = SELECT_CC_MODE (code, x, y);
6022 rtx cc_reg = gen_rtx_REG (mode, CC_REGNUM);
6024 emit_insn (gen_rtx_SET (VOIDmode, cc_reg,
6025 gen_rtx_COMPARE (mode, x, y)));
6027 return cc_reg;
6030 /* Generate a sequence of insns that will generate the correct return
6031 address mask depending on the physical architecture that the program
6032 is running on. */
6034 arm_gen_return_addr_mask (void)
6036 rtx reg = gen_reg_rtx (Pmode);
6038 emit_insn (gen_return_addr_mask (reg));
6039 return reg;
6042 void
6043 arm_reload_in_hi (rtx *operands)
6045 rtx ref = operands[1];
6046 rtx base, scratch;
6047 HOST_WIDE_INT offset = 0;
6049 if (GET_CODE (ref) == SUBREG)
6051 offset = SUBREG_BYTE (ref);
6052 ref = SUBREG_REG (ref);
6055 if (GET_CODE (ref) == REG)
6057 /* We have a pseudo which has been spilt onto the stack; there
6058 are two cases here: the first where there is a simple
6059 stack-slot replacement and a second where the stack-slot is
6060 out of range, or is used as a subreg. */
6061 if (reg_equiv_mem[REGNO (ref)])
6063 ref = reg_equiv_mem[REGNO (ref)];
6064 base = find_replacement (&XEXP (ref, 0));
6066 else
6067 /* The slot is out of range, or was dressed up in a SUBREG. */
6068 base = reg_equiv_address[REGNO (ref)];
6070 else
6071 base = find_replacement (&XEXP (ref, 0));
6073 /* Handle the case where the address is too complex to be offset by 1. */
6074 if (GET_CODE (base) == MINUS
6075 || (GET_CODE (base) == PLUS && GET_CODE (XEXP (base, 1)) != CONST_INT))
6077 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6079 emit_insn (gen_rtx_SET (VOIDmode, base_plus, base));
6080 base = base_plus;
6082 else if (GET_CODE (base) == PLUS)
6084 /* The addend must be CONST_INT, or we would have dealt with it above. */
6085 HOST_WIDE_INT hi, lo;
6087 offset += INTVAL (XEXP (base, 1));
6088 base = XEXP (base, 0);
6090 /* Rework the address into a legal sequence of insns. */
6091 /* Valid range for lo is -4095 -> 4095 */
6092 lo = (offset >= 0
6093 ? (offset & 0xfff)
6094 : -((-offset) & 0xfff));
6096 /* Corner case, if lo is the max offset then we would be out of range
6097 once we have added the additional 1 below, so bump the msb into the
6098 pre-loading insn(s). */
6099 if (lo == 4095)
6100 lo &= 0x7ff;
6102 hi = ((((offset - lo) & (HOST_WIDE_INT) 0xffffffff)
6103 ^ (HOST_WIDE_INT) 0x80000000)
6104 - (HOST_WIDE_INT) 0x80000000);
6106 if (hi + lo != offset)
6107 abort ();
6109 if (hi != 0)
6111 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6113 /* Get the base address; addsi3 knows how to handle constants
6114 that require more than one insn. */
6115 emit_insn (gen_addsi3 (base_plus, base, GEN_INT (hi)));
6116 base = base_plus;
6117 offset = lo;
6121 /* Operands[2] may overlap operands[0] (though it won't overlap
6122 operands[1]), that's why we asked for a DImode reg -- so we can
6123 use the bit that does not overlap. */
6124 if (REGNO (operands[2]) == REGNO (operands[0]))
6125 scratch = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6126 else
6127 scratch = gen_rtx_REG (SImode, REGNO (operands[2]));
6129 emit_insn (gen_zero_extendqisi2 (scratch,
6130 gen_rtx_MEM (QImode,
6131 plus_constant (base,
6132 offset))));
6133 emit_insn (gen_zero_extendqisi2 (gen_rtx_SUBREG (SImode, operands[0], 0),
6134 gen_rtx_MEM (QImode,
6135 plus_constant (base,
6136 offset + 1))));
6137 if (!BYTES_BIG_ENDIAN)
6138 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_SUBREG (SImode, operands[0], 0),
6139 gen_rtx_IOR (SImode,
6140 gen_rtx_ASHIFT
6141 (SImode,
6142 gen_rtx_SUBREG (SImode, operands[0], 0),
6143 GEN_INT (8)),
6144 scratch)));
6145 else
6146 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_SUBREG (SImode, operands[0], 0),
6147 gen_rtx_IOR (SImode,
6148 gen_rtx_ASHIFT (SImode, scratch,
6149 GEN_INT (8)),
6150 gen_rtx_SUBREG (SImode, operands[0],
6151 0))));
6154 /* Handle storing a half-word to memory during reload by synthesizing as two
6155 byte stores. Take care not to clobber the input values until after we
6156 have moved them somewhere safe. This code assumes that if the DImode
6157 scratch in operands[2] overlaps either the input value or output address
6158 in some way, then that value must die in this insn (we absolutely need
6159 two scratch registers for some corner cases). */
6160 void
6161 arm_reload_out_hi (rtx *operands)
6163 rtx ref = operands[0];
6164 rtx outval = operands[1];
6165 rtx base, scratch;
6166 HOST_WIDE_INT offset = 0;
6168 if (GET_CODE (ref) == SUBREG)
6170 offset = SUBREG_BYTE (ref);
6171 ref = SUBREG_REG (ref);
6174 if (GET_CODE (ref) == REG)
6176 /* We have a pseudo which has been spilt onto the stack; there
6177 are two cases here: the first where there is a simple
6178 stack-slot replacement and a second where the stack-slot is
6179 out of range, or is used as a subreg. */
6180 if (reg_equiv_mem[REGNO (ref)])
6182 ref = reg_equiv_mem[REGNO (ref)];
6183 base = find_replacement (&XEXP (ref, 0));
6185 else
6186 /* The slot is out of range, or was dressed up in a SUBREG. */
6187 base = reg_equiv_address[REGNO (ref)];
6189 else
6190 base = find_replacement (&XEXP (ref, 0));
6192 scratch = gen_rtx_REG (SImode, REGNO (operands[2]));
6194 /* Handle the case where the address is too complex to be offset by 1. */
6195 if (GET_CODE (base) == MINUS
6196 || (GET_CODE (base) == PLUS && GET_CODE (XEXP (base, 1)) != CONST_INT))
6198 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6200 /* Be careful not to destroy OUTVAL. */
6201 if (reg_overlap_mentioned_p (base_plus, outval))
6203 /* Updating base_plus might destroy outval, see if we can
6204 swap the scratch and base_plus. */
6205 if (!reg_overlap_mentioned_p (scratch, outval))
6207 rtx tmp = scratch;
6208 scratch = base_plus;
6209 base_plus = tmp;
6211 else
6213 rtx scratch_hi = gen_rtx_REG (HImode, REGNO (operands[2]));
6215 /* Be conservative and copy OUTVAL into the scratch now,
6216 this should only be necessary if outval is a subreg
6217 of something larger than a word. */
6218 /* XXX Might this clobber base? I can't see how it can,
6219 since scratch is known to overlap with OUTVAL, and
6220 must be wider than a word. */
6221 emit_insn (gen_movhi (scratch_hi, outval));
6222 outval = scratch_hi;
6226 emit_insn (gen_rtx_SET (VOIDmode, base_plus, base));
6227 base = base_plus;
6229 else if (GET_CODE (base) == PLUS)
6231 /* The addend must be CONST_INT, or we would have dealt with it above. */
6232 HOST_WIDE_INT hi, lo;
6234 offset += INTVAL (XEXP (base, 1));
6235 base = XEXP (base, 0);
6237 /* Rework the address into a legal sequence of insns. */
6238 /* Valid range for lo is -4095 -> 4095 */
6239 lo = (offset >= 0
6240 ? (offset & 0xfff)
6241 : -((-offset) & 0xfff));
6243 /* Corner case, if lo is the max offset then we would be out of range
6244 once we have added the additional 1 below, so bump the msb into the
6245 pre-loading insn(s). */
6246 if (lo == 4095)
6247 lo &= 0x7ff;
6249 hi = ((((offset - lo) & (HOST_WIDE_INT) 0xffffffff)
6250 ^ (HOST_WIDE_INT) 0x80000000)
6251 - (HOST_WIDE_INT) 0x80000000);
6253 if (hi + lo != offset)
6254 abort ();
6256 if (hi != 0)
6258 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6260 /* Be careful not to destroy OUTVAL. */
6261 if (reg_overlap_mentioned_p (base_plus, outval))
6263 /* Updating base_plus might destroy outval, see if we
6264 can swap the scratch and base_plus. */
6265 if (!reg_overlap_mentioned_p (scratch, outval))
6267 rtx tmp = scratch;
6268 scratch = base_plus;
6269 base_plus = tmp;
6271 else
6273 rtx scratch_hi = gen_rtx_REG (HImode, REGNO (operands[2]));
6275 /* Be conservative and copy outval into scratch now,
6276 this should only be necessary if outval is a
6277 subreg of something larger than a word. */
6278 /* XXX Might this clobber base? I can't see how it
6279 can, since scratch is known to overlap with
6280 outval. */
6281 emit_insn (gen_movhi (scratch_hi, outval));
6282 outval = scratch_hi;
6286 /* Get the base address; addsi3 knows how to handle constants
6287 that require more than one insn. */
6288 emit_insn (gen_addsi3 (base_plus, base, GEN_INT (hi)));
6289 base = base_plus;
6290 offset = lo;
6294 if (BYTES_BIG_ENDIAN)
6296 emit_insn (gen_movqi (gen_rtx_MEM (QImode,
6297 plus_constant (base, offset + 1)),
6298 gen_lowpart (QImode, outval)));
6299 emit_insn (gen_lshrsi3 (scratch,
6300 gen_rtx_SUBREG (SImode, outval, 0),
6301 GEN_INT (8)));
6302 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (base, offset)),
6303 gen_lowpart (QImode, scratch)));
6305 else
6307 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (base, offset)),
6308 gen_lowpart (QImode, outval)));
6309 emit_insn (gen_lshrsi3 (scratch,
6310 gen_rtx_SUBREG (SImode, outval, 0),
6311 GEN_INT (8)));
6312 emit_insn (gen_movqi (gen_rtx_MEM (QImode,
6313 plus_constant (base, offset + 1)),
6314 gen_lowpart (QImode, scratch)));
6318 /* Print a symbolic form of X to the debug file, F. */
6319 static void
6320 arm_print_value (FILE *f, rtx x)
6322 switch (GET_CODE (x))
6324 case CONST_INT:
6325 fprintf (f, HOST_WIDE_INT_PRINT_HEX, INTVAL (x));
6326 return;
6328 case CONST_DOUBLE:
6329 fprintf (f, "<0x%lx,0x%lx>", (long)XWINT (x, 2), (long)XWINT (x, 3));
6330 return;
6332 case CONST_VECTOR:
6334 int i;
6336 fprintf (f, "<");
6337 for (i = 0; i < CONST_VECTOR_NUNITS (x); i++)
6339 fprintf (f, HOST_WIDE_INT_PRINT_HEX, INTVAL (CONST_VECTOR_ELT (x, i)));
6340 if (i < (CONST_VECTOR_NUNITS (x) - 1))
6341 fputc (',', f);
6343 fprintf (f, ">");
6345 return;
6347 case CONST_STRING:
6348 fprintf (f, "\"%s\"", XSTR (x, 0));
6349 return;
6351 case SYMBOL_REF:
6352 fprintf (f, "`%s'", XSTR (x, 0));
6353 return;
6355 case LABEL_REF:
6356 fprintf (f, "L%d", INSN_UID (XEXP (x, 0)));
6357 return;
6359 case CONST:
6360 arm_print_value (f, XEXP (x, 0));
6361 return;
6363 case PLUS:
6364 arm_print_value (f, XEXP (x, 0));
6365 fprintf (f, "+");
6366 arm_print_value (f, XEXP (x, 1));
6367 return;
6369 case PC:
6370 fprintf (f, "pc");
6371 return;
6373 default:
6374 fprintf (f, "????");
6375 return;
6379 /* Routines for manipulation of the constant pool. */
6381 /* Arm instructions cannot load a large constant directly into a
6382 register; they have to come from a pc relative load. The constant
6383 must therefore be placed in the addressable range of the pc
6384 relative load. Depending on the precise pc relative load
6385 instruction the range is somewhere between 256 bytes and 4k. This
6386 means that we often have to dump a constant inside a function, and
6387 generate code to branch around it.
6389 It is important to minimize this, since the branches will slow
6390 things down and make the code larger.
6392 Normally we can hide the table after an existing unconditional
6393 branch so that there is no interruption of the flow, but in the
6394 worst case the code looks like this:
6396 ldr rn, L1
6398 b L2
6399 align
6400 L1: .long value
6404 ldr rn, L3
6406 b L4
6407 align
6408 L3: .long value
6412 We fix this by performing a scan after scheduling, which notices
6413 which instructions need to have their operands fetched from the
6414 constant table and builds the table.
6416 The algorithm starts by building a table of all the constants that
6417 need fixing up and all the natural barriers in the function (places
6418 where a constant table can be dropped without breaking the flow).
6419 For each fixup we note how far the pc-relative replacement will be
6420 able to reach and the offset of the instruction into the function.
6422 Having built the table we then group the fixes together to form
6423 tables that are as large as possible (subject to addressing
6424 constraints) and emit each table of constants after the last
6425 barrier that is within range of all the instructions in the group.
6426 If a group does not contain a barrier, then we forcibly create one
6427 by inserting a jump instruction into the flow. Once the table has
6428 been inserted, the insns are then modified to reference the
6429 relevant entry in the pool.
6431 Possible enhancements to the algorithm (not implemented) are:
6433 1) For some processors and object formats, there may be benefit in
6434 aligning the pools to the start of cache lines; this alignment
6435 would need to be taken into account when calculating addressability
6436 of a pool. */
6438 /* These typedefs are located at the start of this file, so that
6439 they can be used in the prototypes there. This comment is to
6440 remind readers of that fact so that the following structures
6441 can be understood more easily.
6443 typedef struct minipool_node Mnode;
6444 typedef struct minipool_fixup Mfix; */
6446 struct minipool_node
6448 /* Doubly linked chain of entries. */
6449 Mnode * next;
6450 Mnode * prev;
6451 /* The maximum offset into the code that this entry can be placed. While
6452 pushing fixes for forward references, all entries are sorted in order
6453 of increasing max_address. */
6454 HOST_WIDE_INT max_address;
6455 /* Similarly for an entry inserted for a backwards ref. */
6456 HOST_WIDE_INT min_address;
6457 /* The number of fixes referencing this entry. This can become zero
6458 if we "unpush" an entry. In this case we ignore the entry when we
6459 come to emit the code. */
6460 int refcount;
6461 /* The offset from the start of the minipool. */
6462 HOST_WIDE_INT offset;
6463 /* The value in table. */
6464 rtx value;
6465 /* The mode of value. */
6466 enum machine_mode mode;
6467 /* The size of the value. With iWMMXt enabled
6468 sizes > 4 also imply an alignment of 8-bytes. */
6469 int fix_size;
6472 struct minipool_fixup
6474 Mfix * next;
6475 rtx insn;
6476 HOST_WIDE_INT address;
6477 rtx * loc;
6478 enum machine_mode mode;
6479 int fix_size;
6480 rtx value;
6481 Mnode * minipool;
6482 HOST_WIDE_INT forwards;
6483 HOST_WIDE_INT backwards;
6486 /* Fixes less than a word need padding out to a word boundary. */
6487 #define MINIPOOL_FIX_SIZE(mode) \
6488 (GET_MODE_SIZE ((mode)) >= 4 ? GET_MODE_SIZE ((mode)) : 4)
6490 static Mnode * minipool_vector_head;
6491 static Mnode * minipool_vector_tail;
6492 static rtx minipool_vector_label;
6494 /* The linked list of all minipool fixes required for this function. */
6495 Mfix * minipool_fix_head;
6496 Mfix * minipool_fix_tail;
6497 /* The fix entry for the current minipool, once it has been placed. */
6498 Mfix * minipool_barrier;
6500 /* Determines if INSN is the start of a jump table. Returns the end
6501 of the TABLE or NULL_RTX. */
6502 static rtx
6503 is_jump_table (rtx insn)
6505 rtx table;
6507 if (GET_CODE (insn) == JUMP_INSN
6508 && JUMP_LABEL (insn) != NULL
6509 && ((table = next_real_insn (JUMP_LABEL (insn)))
6510 == next_real_insn (insn))
6511 && table != NULL
6512 && GET_CODE (table) == JUMP_INSN
6513 && (GET_CODE (PATTERN (table)) == ADDR_VEC
6514 || GET_CODE (PATTERN (table)) == ADDR_DIFF_VEC))
6515 return table;
6517 return NULL_RTX;
6520 #ifndef JUMP_TABLES_IN_TEXT_SECTION
6521 #define JUMP_TABLES_IN_TEXT_SECTION 0
6522 #endif
6524 static HOST_WIDE_INT
6525 get_jump_table_size (rtx insn)
6527 /* ADDR_VECs only take room if read-only data does into the text
6528 section. */
6529 if (JUMP_TABLES_IN_TEXT_SECTION
6530 #if !defined(READONLY_DATA_SECTION) && !defined(READONLY_DATA_SECTION_ASM_OP)
6531 || 1
6532 #endif
6535 rtx body = PATTERN (insn);
6536 int elt = GET_CODE (body) == ADDR_DIFF_VEC ? 1 : 0;
6538 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, elt);
6541 return 0;
6544 /* Move a minipool fix MP from its current location to before MAX_MP.
6545 If MAX_MP is NULL, then MP doesn't need moving, but the addressing
6546 constraints may need updating. */
6547 static Mnode *
6548 move_minipool_fix_forward_ref (Mnode *mp, Mnode *max_mp,
6549 HOST_WIDE_INT max_address)
6551 /* This should never be true and the code below assumes these are
6552 different. */
6553 if (mp == max_mp)
6554 abort ();
6556 if (max_mp == NULL)
6558 if (max_address < mp->max_address)
6559 mp->max_address = max_address;
6561 else
6563 if (max_address > max_mp->max_address - mp->fix_size)
6564 mp->max_address = max_mp->max_address - mp->fix_size;
6565 else
6566 mp->max_address = max_address;
6568 /* Unlink MP from its current position. Since max_mp is non-null,
6569 mp->prev must be non-null. */
6570 mp->prev->next = mp->next;
6571 if (mp->next != NULL)
6572 mp->next->prev = mp->prev;
6573 else
6574 minipool_vector_tail = mp->prev;
6576 /* Re-insert it before MAX_MP. */
6577 mp->next = max_mp;
6578 mp->prev = max_mp->prev;
6579 max_mp->prev = mp;
6581 if (mp->prev != NULL)
6582 mp->prev->next = mp;
6583 else
6584 minipool_vector_head = mp;
6587 /* Save the new entry. */
6588 max_mp = mp;
6590 /* Scan over the preceding entries and adjust their addresses as
6591 required. */
6592 while (mp->prev != NULL
6593 && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
6595 mp->prev->max_address = mp->max_address - mp->prev->fix_size;
6596 mp = mp->prev;
6599 return max_mp;
6602 /* Add a constant to the minipool for a forward reference. Returns the
6603 node added or NULL if the constant will not fit in this pool. */
6604 static Mnode *
6605 add_minipool_forward_ref (Mfix *fix)
6607 /* If set, max_mp is the first pool_entry that has a lower
6608 constraint than the one we are trying to add. */
6609 Mnode * max_mp = NULL;
6610 HOST_WIDE_INT max_address = fix->address + fix->forwards;
6611 Mnode * mp;
6613 /* If this fix's address is greater than the address of the first
6614 entry, then we can't put the fix in this pool. We subtract the
6615 size of the current fix to ensure that if the table is fully
6616 packed we still have enough room to insert this value by suffling
6617 the other fixes forwards. */
6618 if (minipool_vector_head &&
6619 fix->address >= minipool_vector_head->max_address - fix->fix_size)
6620 return NULL;
6622 /* Scan the pool to see if a constant with the same value has
6623 already been added. While we are doing this, also note the
6624 location where we must insert the constant if it doesn't already
6625 exist. */
6626 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
6628 if (GET_CODE (fix->value) == GET_CODE (mp->value)
6629 && fix->mode == mp->mode
6630 && (GET_CODE (fix->value) != CODE_LABEL
6631 || (CODE_LABEL_NUMBER (fix->value)
6632 == CODE_LABEL_NUMBER (mp->value)))
6633 && rtx_equal_p (fix->value, mp->value))
6635 /* More than one fix references this entry. */
6636 mp->refcount++;
6637 return move_minipool_fix_forward_ref (mp, max_mp, max_address);
6640 /* Note the insertion point if necessary. */
6641 if (max_mp == NULL
6642 && mp->max_address > max_address)
6643 max_mp = mp;
6645 /* If we are inserting an 8-bytes aligned quantity and
6646 we have not already found an insertion point, then
6647 make sure that all such 8-byte aligned quantities are
6648 placed at the start of the pool. */
6649 if (ARM_DOUBLEWORD_ALIGN
6650 && max_mp == NULL
6651 && fix->fix_size == 8
6652 && mp->fix_size != 8)
6654 max_mp = mp;
6655 max_address = mp->max_address;
6659 /* The value is not currently in the minipool, so we need to create
6660 a new entry for it. If MAX_MP is NULL, the entry will be put on
6661 the end of the list since the placement is less constrained than
6662 any existing entry. Otherwise, we insert the new fix before
6663 MAX_MP and, if necessary, adjust the constraints on the other
6664 entries. */
6665 mp = xmalloc (sizeof (* mp));
6666 mp->fix_size = fix->fix_size;
6667 mp->mode = fix->mode;
6668 mp->value = fix->value;
6669 mp->refcount = 1;
6670 /* Not yet required for a backwards ref. */
6671 mp->min_address = -65536;
6673 if (max_mp == NULL)
6675 mp->max_address = max_address;
6676 mp->next = NULL;
6677 mp->prev = minipool_vector_tail;
6679 if (mp->prev == NULL)
6681 minipool_vector_head = mp;
6682 minipool_vector_label = gen_label_rtx ();
6684 else
6685 mp->prev->next = mp;
6687 minipool_vector_tail = mp;
6689 else
6691 if (max_address > max_mp->max_address - mp->fix_size)
6692 mp->max_address = max_mp->max_address - mp->fix_size;
6693 else
6694 mp->max_address = max_address;
6696 mp->next = max_mp;
6697 mp->prev = max_mp->prev;
6698 max_mp->prev = mp;
6699 if (mp->prev != NULL)
6700 mp->prev->next = mp;
6701 else
6702 minipool_vector_head = mp;
6705 /* Save the new entry. */
6706 max_mp = mp;
6708 /* Scan over the preceding entries and adjust their addresses as
6709 required. */
6710 while (mp->prev != NULL
6711 && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
6713 mp->prev->max_address = mp->max_address - mp->prev->fix_size;
6714 mp = mp->prev;
6717 return max_mp;
6720 static Mnode *
6721 move_minipool_fix_backward_ref (Mnode *mp, Mnode *min_mp,
6722 HOST_WIDE_INT min_address)
6724 HOST_WIDE_INT offset;
6726 /* This should never be true, and the code below assumes these are
6727 different. */
6728 if (mp == min_mp)
6729 abort ();
6731 if (min_mp == NULL)
6733 if (min_address > mp->min_address)
6734 mp->min_address = min_address;
6736 else
6738 /* We will adjust this below if it is too loose. */
6739 mp->min_address = min_address;
6741 /* Unlink MP from its current position. Since min_mp is non-null,
6742 mp->next must be non-null. */
6743 mp->next->prev = mp->prev;
6744 if (mp->prev != NULL)
6745 mp->prev->next = mp->next;
6746 else
6747 minipool_vector_head = mp->next;
6749 /* Reinsert it after MIN_MP. */
6750 mp->prev = min_mp;
6751 mp->next = min_mp->next;
6752 min_mp->next = mp;
6753 if (mp->next != NULL)
6754 mp->next->prev = mp;
6755 else
6756 minipool_vector_tail = mp;
6759 min_mp = mp;
6761 offset = 0;
6762 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
6764 mp->offset = offset;
6765 if (mp->refcount > 0)
6766 offset += mp->fix_size;
6768 if (mp->next && mp->next->min_address < mp->min_address + mp->fix_size)
6769 mp->next->min_address = mp->min_address + mp->fix_size;
6772 return min_mp;
6775 /* Add a constant to the minipool for a backward reference. Returns the
6776 node added or NULL if the constant will not fit in this pool.
6778 Note that the code for insertion for a backwards reference can be
6779 somewhat confusing because the calculated offsets for each fix do
6780 not take into account the size of the pool (which is still under
6781 construction. */
6782 static Mnode *
6783 add_minipool_backward_ref (Mfix *fix)
6785 /* If set, min_mp is the last pool_entry that has a lower constraint
6786 than the one we are trying to add. */
6787 Mnode *min_mp = NULL;
6788 /* This can be negative, since it is only a constraint. */
6789 HOST_WIDE_INT min_address = fix->address - fix->backwards;
6790 Mnode *mp;
6792 /* If we can't reach the current pool from this insn, or if we can't
6793 insert this entry at the end of the pool without pushing other
6794 fixes out of range, then we don't try. This ensures that we
6795 can't fail later on. */
6796 if (min_address >= minipool_barrier->address
6797 || (minipool_vector_tail->min_address + fix->fix_size
6798 >= minipool_barrier->address))
6799 return NULL;
6801 /* Scan the pool to see if a constant with the same value has
6802 already been added. While we are doing this, also note the
6803 location where we must insert the constant if it doesn't already
6804 exist. */
6805 for (mp = minipool_vector_tail; mp != NULL; mp = mp->prev)
6807 if (GET_CODE (fix->value) == GET_CODE (mp->value)
6808 && fix->mode == mp->mode
6809 && (GET_CODE (fix->value) != CODE_LABEL
6810 || (CODE_LABEL_NUMBER (fix->value)
6811 == CODE_LABEL_NUMBER (mp->value)))
6812 && rtx_equal_p (fix->value, mp->value)
6813 /* Check that there is enough slack to move this entry to the
6814 end of the table (this is conservative). */
6815 && (mp->max_address
6816 > (minipool_barrier->address
6817 + minipool_vector_tail->offset
6818 + minipool_vector_tail->fix_size)))
6820 mp->refcount++;
6821 return move_minipool_fix_backward_ref (mp, min_mp, min_address);
6824 if (min_mp != NULL)
6825 mp->min_address += fix->fix_size;
6826 else
6828 /* Note the insertion point if necessary. */
6829 if (mp->min_address < min_address)
6831 /* For now, we do not allow the insertion of 8-byte alignment
6832 requiring nodes anywhere but at the start of the pool. */
6833 if (ARM_DOUBLEWORD_ALIGN
6834 && fix->fix_size == 8 && mp->fix_size != 8)
6835 return NULL;
6836 else
6837 min_mp = mp;
6839 else if (mp->max_address
6840 < minipool_barrier->address + mp->offset + fix->fix_size)
6842 /* Inserting before this entry would push the fix beyond
6843 its maximum address (which can happen if we have
6844 re-located a forwards fix); force the new fix to come
6845 after it. */
6846 min_mp = mp;
6847 min_address = mp->min_address + fix->fix_size;
6849 /* If we are inserting an 8-bytes aligned quantity and
6850 we have not already found an insertion point, then
6851 make sure that all such 8-byte aligned quantities are
6852 placed at the start of the pool. */
6853 else if (ARM_DOUBLEWORD_ALIGN
6854 && min_mp == NULL
6855 && fix->fix_size == 8
6856 && mp->fix_size < 8)
6858 min_mp = mp;
6859 min_address = mp->min_address + fix->fix_size;
6864 /* We need to create a new entry. */
6865 mp = xmalloc (sizeof (* mp));
6866 mp->fix_size = fix->fix_size;
6867 mp->mode = fix->mode;
6868 mp->value = fix->value;
6869 mp->refcount = 1;
6870 mp->max_address = minipool_barrier->address + 65536;
6872 mp->min_address = min_address;
6874 if (min_mp == NULL)
6876 mp->prev = NULL;
6877 mp->next = minipool_vector_head;
6879 if (mp->next == NULL)
6881 minipool_vector_tail = mp;
6882 minipool_vector_label = gen_label_rtx ();
6884 else
6885 mp->next->prev = mp;
6887 minipool_vector_head = mp;
6889 else
6891 mp->next = min_mp->next;
6892 mp->prev = min_mp;
6893 min_mp->next = mp;
6895 if (mp->next != NULL)
6896 mp->next->prev = mp;
6897 else
6898 minipool_vector_tail = mp;
6901 /* Save the new entry. */
6902 min_mp = mp;
6904 if (mp->prev)
6905 mp = mp->prev;
6906 else
6907 mp->offset = 0;
6909 /* Scan over the following entries and adjust their offsets. */
6910 while (mp->next != NULL)
6912 if (mp->next->min_address < mp->min_address + mp->fix_size)
6913 mp->next->min_address = mp->min_address + mp->fix_size;
6915 if (mp->refcount)
6916 mp->next->offset = mp->offset + mp->fix_size;
6917 else
6918 mp->next->offset = mp->offset;
6920 mp = mp->next;
6923 return min_mp;
6926 static void
6927 assign_minipool_offsets (Mfix *barrier)
6929 HOST_WIDE_INT offset = 0;
6930 Mnode *mp;
6932 minipool_barrier = barrier;
6934 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
6936 mp->offset = offset;
6938 if (mp->refcount > 0)
6939 offset += mp->fix_size;
6943 /* Output the literal table */
6944 static void
6945 dump_minipool (rtx scan)
6947 Mnode * mp;
6948 Mnode * nmp;
6949 int align64 = 0;
6951 if (ARM_DOUBLEWORD_ALIGN)
6952 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
6953 if (mp->refcount > 0 && mp->fix_size == 8)
6955 align64 = 1;
6956 break;
6959 if (dump_file)
6960 fprintf (dump_file,
6961 ";; Emitting minipool after insn %u; address %ld; align %d (bytes)\n",
6962 INSN_UID (scan), (unsigned long) minipool_barrier->address, align64 ? 8 : 4);
6964 scan = emit_label_after (gen_label_rtx (), scan);
6965 scan = emit_insn_after (align64 ? gen_align_8 () : gen_align_4 (), scan);
6966 scan = emit_label_after (minipool_vector_label, scan);
6968 for (mp = minipool_vector_head; mp != NULL; mp = nmp)
6970 if (mp->refcount > 0)
6972 if (dump_file)
6974 fprintf (dump_file,
6975 ";; Offset %u, min %ld, max %ld ",
6976 (unsigned) mp->offset, (unsigned long) mp->min_address,
6977 (unsigned long) mp->max_address);
6978 arm_print_value (dump_file, mp->value);
6979 fputc ('\n', dump_file);
6982 switch (mp->fix_size)
6984 #ifdef HAVE_consttable_1
6985 case 1:
6986 scan = emit_insn_after (gen_consttable_1 (mp->value), scan);
6987 break;
6989 #endif
6990 #ifdef HAVE_consttable_2
6991 case 2:
6992 scan = emit_insn_after (gen_consttable_2 (mp->value), scan);
6993 break;
6995 #endif
6996 #ifdef HAVE_consttable_4
6997 case 4:
6998 scan = emit_insn_after (gen_consttable_4 (mp->value), scan);
6999 break;
7001 #endif
7002 #ifdef HAVE_consttable_8
7003 case 8:
7004 scan = emit_insn_after (gen_consttable_8 (mp->value), scan);
7005 break;
7007 #endif
7008 default:
7009 abort ();
7010 break;
7014 nmp = mp->next;
7015 free (mp);
7018 minipool_vector_head = minipool_vector_tail = NULL;
7019 scan = emit_insn_after (gen_consttable_end (), scan);
7020 scan = emit_barrier_after (scan);
7023 /* Return the cost of forcibly inserting a barrier after INSN. */
7024 static int
7025 arm_barrier_cost (rtx insn)
7027 /* Basing the location of the pool on the loop depth is preferable,
7028 but at the moment, the basic block information seems to be
7029 corrupt by this stage of the compilation. */
7030 int base_cost = 50;
7031 rtx next = next_nonnote_insn (insn);
7033 if (next != NULL && GET_CODE (next) == CODE_LABEL)
7034 base_cost -= 20;
7036 switch (GET_CODE (insn))
7038 case CODE_LABEL:
7039 /* It will always be better to place the table before the label, rather
7040 than after it. */
7041 return 50;
7043 case INSN:
7044 case CALL_INSN:
7045 return base_cost;
7047 case JUMP_INSN:
7048 return base_cost - 10;
7050 default:
7051 return base_cost + 10;
7055 /* Find the best place in the insn stream in the range
7056 (FIX->address,MAX_ADDRESS) to forcibly insert a minipool barrier.
7057 Create the barrier by inserting a jump and add a new fix entry for
7058 it. */
7059 static Mfix *
7060 create_fix_barrier (Mfix *fix, HOST_WIDE_INT max_address)
7062 HOST_WIDE_INT count = 0;
7063 rtx barrier;
7064 rtx from = fix->insn;
7065 rtx selected = from;
7066 int selected_cost;
7067 HOST_WIDE_INT selected_address;
7068 Mfix * new_fix;
7069 HOST_WIDE_INT max_count = max_address - fix->address;
7070 rtx label = gen_label_rtx ();
7072 selected_cost = arm_barrier_cost (from);
7073 selected_address = fix->address;
7075 while (from && count < max_count)
7077 rtx tmp;
7078 int new_cost;
7080 /* This code shouldn't have been called if there was a natural barrier
7081 within range. */
7082 if (GET_CODE (from) == BARRIER)
7083 abort ();
7085 /* Count the length of this insn. */
7086 count += get_attr_length (from);
7088 /* If there is a jump table, add its length. */
7089 tmp = is_jump_table (from);
7090 if (tmp != NULL)
7092 count += get_jump_table_size (tmp);
7094 /* Jump tables aren't in a basic block, so base the cost on
7095 the dispatch insn. If we select this location, we will
7096 still put the pool after the table. */
7097 new_cost = arm_barrier_cost (from);
7099 if (count < max_count && new_cost <= selected_cost)
7101 selected = tmp;
7102 selected_cost = new_cost;
7103 selected_address = fix->address + count;
7106 /* Continue after the dispatch table. */
7107 from = NEXT_INSN (tmp);
7108 continue;
7111 new_cost = arm_barrier_cost (from);
7113 if (count < max_count && new_cost <= selected_cost)
7115 selected = from;
7116 selected_cost = new_cost;
7117 selected_address = fix->address + count;
7120 from = NEXT_INSN (from);
7123 /* Create a new JUMP_INSN that branches around a barrier. */
7124 from = emit_jump_insn_after (gen_jump (label), selected);
7125 JUMP_LABEL (from) = label;
7126 barrier = emit_barrier_after (from);
7127 emit_label_after (label, barrier);
7129 /* Create a minipool barrier entry for the new barrier. */
7130 new_fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* new_fix));
7131 new_fix->insn = barrier;
7132 new_fix->address = selected_address;
7133 new_fix->next = fix->next;
7134 fix->next = new_fix;
7136 return new_fix;
7139 /* Record that there is a natural barrier in the insn stream at
7140 ADDRESS. */
7141 static void
7142 push_minipool_barrier (rtx insn, HOST_WIDE_INT address)
7144 Mfix * fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* fix));
7146 fix->insn = insn;
7147 fix->address = address;
7149 fix->next = NULL;
7150 if (minipool_fix_head != NULL)
7151 minipool_fix_tail->next = fix;
7152 else
7153 minipool_fix_head = fix;
7155 minipool_fix_tail = fix;
7158 /* Record INSN, which will need fixing up to load a value from the
7159 minipool. ADDRESS is the offset of the insn since the start of the
7160 function; LOC is a pointer to the part of the insn which requires
7161 fixing; VALUE is the constant that must be loaded, which is of type
7162 MODE. */
7163 static void
7164 push_minipool_fix (rtx insn, HOST_WIDE_INT address, rtx *loc,
7165 enum machine_mode mode, rtx value)
7167 Mfix * fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* fix));
7169 #ifdef AOF_ASSEMBLER
7170 /* PIC symbol references need to be converted into offsets into the
7171 based area. */
7172 /* XXX This shouldn't be done here. */
7173 if (flag_pic && GET_CODE (value) == SYMBOL_REF)
7174 value = aof_pic_entry (value);
7175 #endif /* AOF_ASSEMBLER */
7177 fix->insn = insn;
7178 fix->address = address;
7179 fix->loc = loc;
7180 fix->mode = mode;
7181 fix->fix_size = MINIPOOL_FIX_SIZE (mode);
7182 fix->value = value;
7183 fix->forwards = get_attr_pool_range (insn);
7184 fix->backwards = get_attr_neg_pool_range (insn);
7185 fix->minipool = NULL;
7187 /* If an insn doesn't have a range defined for it, then it isn't
7188 expecting to be reworked by this code. Better to abort now than
7189 to generate duff assembly code. */
7190 if (fix->forwards == 0 && fix->backwards == 0)
7191 abort ();
7193 /* With AAPCS/iWMMXt enabled, the pool is aligned to an 8-byte boundary.
7194 So there might be an empty word before the start of the pool.
7195 Hence we reduce the forward range by 4 to allow for this
7196 possibility. */
7197 if (ARM_DOUBLEWORD_ALIGN && fix->fix_size == 8)
7198 fix->forwards -= 4;
7200 if (dump_file)
7202 fprintf (dump_file,
7203 ";; %smode fixup for i%d; addr %lu, range (%ld,%ld): ",
7204 GET_MODE_NAME (mode),
7205 INSN_UID (insn), (unsigned long) address,
7206 -1 * (long)fix->backwards, (long)fix->forwards);
7207 arm_print_value (dump_file, fix->value);
7208 fprintf (dump_file, "\n");
7211 /* Add it to the chain of fixes. */
7212 fix->next = NULL;
7214 if (minipool_fix_head != NULL)
7215 minipool_fix_tail->next = fix;
7216 else
7217 minipool_fix_head = fix;
7219 minipool_fix_tail = fix;
7222 /* Scan INSN and note any of its operands that need fixing.
7223 If DO_PUSHES is false we do not actually push any of the fixups
7224 needed. The function returns TRUE is any fixups were needed/pushed.
7225 This is used by arm_memory_load_p() which needs to know about loads
7226 of constants that will be converted into minipool loads. */
7227 static bool
7228 note_invalid_constants (rtx insn, HOST_WIDE_INT address, int do_pushes)
7230 bool result = false;
7231 int opno;
7233 extract_insn (insn);
7235 if (!constrain_operands (1))
7236 fatal_insn_not_found (insn);
7238 if (recog_data.n_alternatives == 0)
7239 return false;
7241 /* Fill in recog_op_alt with information about the constraints of this insn. */
7242 preprocess_constraints ();
7244 for (opno = 0; opno < recog_data.n_operands; opno++)
7246 /* Things we need to fix can only occur in inputs. */
7247 if (recog_data.operand_type[opno] != OP_IN)
7248 continue;
7250 /* If this alternative is a memory reference, then any mention
7251 of constants in this alternative is really to fool reload
7252 into allowing us to accept one there. We need to fix them up
7253 now so that we output the right code. */
7254 if (recog_op_alt[opno][which_alternative].memory_ok)
7256 rtx op = recog_data.operand[opno];
7258 if (CONSTANT_P (op))
7260 if (do_pushes)
7261 push_minipool_fix (insn, address, recog_data.operand_loc[opno],
7262 recog_data.operand_mode[opno], op);
7263 result = true;
7265 else if (GET_CODE (op) == MEM
7266 && GET_CODE (XEXP (op, 0)) == SYMBOL_REF
7267 && CONSTANT_POOL_ADDRESS_P (XEXP (op, 0)))
7269 if (do_pushes)
7271 rtx cop = avoid_constant_pool_reference (op);
7273 /* Casting the address of something to a mode narrower
7274 than a word can cause avoid_constant_pool_reference()
7275 to return the pool reference itself. That's no good to
7276 us here. Lets just hope that we can use the
7277 constant pool value directly. */
7278 if (op == cop)
7279 cop = get_pool_constant (XEXP (op, 0));
7281 push_minipool_fix (insn, address,
7282 recog_data.operand_loc[opno],
7283 recog_data.operand_mode[opno], cop);
7286 result = true;
7291 return result;
7294 /* Gcc puts the pool in the wrong place for ARM, since we can only
7295 load addresses a limited distance around the pc. We do some
7296 special munging to move the constant pool values to the correct
7297 point in the code. */
7298 static void
7299 arm_reorg (void)
7301 rtx insn;
7302 HOST_WIDE_INT address = 0;
7303 Mfix * fix;
7305 minipool_fix_head = minipool_fix_tail = NULL;
7307 /* The first insn must always be a note, or the code below won't
7308 scan it properly. */
7309 insn = get_insns ();
7310 if (GET_CODE (insn) != NOTE)
7311 abort ();
7313 /* Scan all the insns and record the operands that will need fixing. */
7314 for (insn = next_nonnote_insn (insn); insn; insn = next_nonnote_insn (insn))
7316 if (TARGET_CIRRUS_FIX_INVALID_INSNS
7317 && (arm_cirrus_insn_p (insn)
7318 || GET_CODE (insn) == JUMP_INSN
7319 || arm_memory_load_p (insn)))
7320 cirrus_reorg (insn);
7322 if (GET_CODE (insn) == BARRIER)
7323 push_minipool_barrier (insn, address);
7324 else if (INSN_P (insn))
7326 rtx table;
7328 note_invalid_constants (insn, address, true);
7329 address += get_attr_length (insn);
7331 /* If the insn is a vector jump, add the size of the table
7332 and skip the table. */
7333 if ((table = is_jump_table (insn)) != NULL)
7335 address += get_jump_table_size (table);
7336 insn = table;
7341 fix = minipool_fix_head;
7343 /* Now scan the fixups and perform the required changes. */
7344 while (fix)
7346 Mfix * ftmp;
7347 Mfix * fdel;
7348 Mfix * last_added_fix;
7349 Mfix * last_barrier = NULL;
7350 Mfix * this_fix;
7352 /* Skip any further barriers before the next fix. */
7353 while (fix && GET_CODE (fix->insn) == BARRIER)
7354 fix = fix->next;
7356 /* No more fixes. */
7357 if (fix == NULL)
7358 break;
7360 last_added_fix = NULL;
7362 for (ftmp = fix; ftmp; ftmp = ftmp->next)
7364 if (GET_CODE (ftmp->insn) == BARRIER)
7366 if (ftmp->address >= minipool_vector_head->max_address)
7367 break;
7369 last_barrier = ftmp;
7371 else if ((ftmp->minipool = add_minipool_forward_ref (ftmp)) == NULL)
7372 break;
7374 last_added_fix = ftmp; /* Keep track of the last fix added. */
7377 /* If we found a barrier, drop back to that; any fixes that we
7378 could have reached but come after the barrier will now go in
7379 the next mini-pool. */
7380 if (last_barrier != NULL)
7382 /* Reduce the refcount for those fixes that won't go into this
7383 pool after all. */
7384 for (fdel = last_barrier->next;
7385 fdel && fdel != ftmp;
7386 fdel = fdel->next)
7388 fdel->minipool->refcount--;
7389 fdel->minipool = NULL;
7392 ftmp = last_barrier;
7394 else
7396 /* ftmp is first fix that we can't fit into this pool and
7397 there no natural barriers that we could use. Insert a
7398 new barrier in the code somewhere between the previous
7399 fix and this one, and arrange to jump around it. */
7400 HOST_WIDE_INT max_address;
7402 /* The last item on the list of fixes must be a barrier, so
7403 we can never run off the end of the list of fixes without
7404 last_barrier being set. */
7405 if (ftmp == NULL)
7406 abort ();
7408 max_address = minipool_vector_head->max_address;
7409 /* Check that there isn't another fix that is in range that
7410 we couldn't fit into this pool because the pool was
7411 already too large: we need to put the pool before such an
7412 instruction. */
7413 if (ftmp->address < max_address)
7414 max_address = ftmp->address;
7416 last_barrier = create_fix_barrier (last_added_fix, max_address);
7419 assign_minipool_offsets (last_barrier);
7421 while (ftmp)
7423 if (GET_CODE (ftmp->insn) != BARRIER
7424 && ((ftmp->minipool = add_minipool_backward_ref (ftmp))
7425 == NULL))
7426 break;
7428 ftmp = ftmp->next;
7431 /* Scan over the fixes we have identified for this pool, fixing them
7432 up and adding the constants to the pool itself. */
7433 for (this_fix = fix; this_fix && ftmp != this_fix;
7434 this_fix = this_fix->next)
7435 if (GET_CODE (this_fix->insn) != BARRIER)
7437 rtx addr
7438 = plus_constant (gen_rtx_LABEL_REF (VOIDmode,
7439 minipool_vector_label),
7440 this_fix->minipool->offset);
7441 *this_fix->loc = gen_rtx_MEM (this_fix->mode, addr);
7444 dump_minipool (last_barrier->insn);
7445 fix = ftmp;
7448 /* From now on we must synthesize any constants that we can't handle
7449 directly. This can happen if the RTL gets split during final
7450 instruction generation. */
7451 after_arm_reorg = 1;
7453 /* Free the minipool memory. */
7454 obstack_free (&minipool_obstack, minipool_startobj);
7457 /* Routines to output assembly language. */
7459 /* If the rtx is the correct value then return the string of the number.
7460 In this way we can ensure that valid double constants are generated even
7461 when cross compiling. */
7462 const char *
7463 fp_immediate_constant (rtx x)
7465 REAL_VALUE_TYPE r;
7466 int i;
7468 if (!fp_consts_inited)
7469 init_fp_table ();
7471 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
7472 for (i = 0; i < 8; i++)
7473 if (REAL_VALUES_EQUAL (r, values_fp[i]))
7474 return strings_fp[i];
7476 abort ();
7479 /* As for fp_immediate_constant, but value is passed directly, not in rtx. */
7480 static const char *
7481 fp_const_from_val (REAL_VALUE_TYPE *r)
7483 int i;
7485 if (!fp_consts_inited)
7486 init_fp_table ();
7488 for (i = 0; i < 8; i++)
7489 if (REAL_VALUES_EQUAL (*r, values_fp[i]))
7490 return strings_fp[i];
7492 abort ();
7495 /* Output the operands of a LDM/STM instruction to STREAM.
7496 MASK is the ARM register set mask of which only bits 0-15 are important.
7497 REG is the base register, either the frame pointer or the stack pointer,
7498 INSTR is the possibly suffixed load or store instruction. */
7499 static void
7500 print_multi_reg (FILE *stream, const char *instr, int reg, int mask)
7502 int i;
7503 int not_first = FALSE;
7505 fputc ('\t', stream);
7506 asm_fprintf (stream, instr, reg);
7507 fputs (", {", stream);
7509 for (i = 0; i <= LAST_ARM_REGNUM; i++)
7510 if (mask & (1 << i))
7512 if (not_first)
7513 fprintf (stream, ", ");
7515 asm_fprintf (stream, "%r", i);
7516 not_first = TRUE;
7519 fprintf (stream, "}\n");
7523 /* Output a FLDMX instruction to STREAM.
7524 BASE if the register containing the address.
7525 REG and COUNT specify the register range.
7526 Extra registers may be added to avoid hardware bugs. */
7528 static void
7529 arm_output_fldmx (FILE * stream, unsigned int base, int reg, int count)
7531 int i;
7533 /* Workaround ARM10 VFPr1 bug. */
7534 if (count == 2 && !arm_arch6)
7536 if (reg == 15)
7537 reg--;
7538 count++;
7541 fputc ('\t', stream);
7542 asm_fprintf (stream, "fldmfdx\t%r!, {", base);
7544 for (i = reg; i < reg + count; i++)
7546 if (i > reg)
7547 fputs (", ", stream);
7548 asm_fprintf (stream, "d%d", i);
7550 fputs ("}\n", stream);
7555 /* Output the assembly for a store multiple. */
7557 const char *
7558 vfp_output_fstmx (rtx * operands)
7560 char pattern[100];
7561 int p;
7562 int base;
7563 int i;
7565 strcpy (pattern, "fstmfdx\t%m0!, {%P1");
7566 p = strlen (pattern);
7568 if (GET_CODE (operands[1]) != REG)
7569 abort ();
7571 base = (REGNO (operands[1]) - FIRST_VFP_REGNUM) / 2;
7572 for (i = 1; i < XVECLEN (operands[2], 0); i++)
7574 p += sprintf (&pattern[p], ", d%d", base + i);
7576 strcpy (&pattern[p], "}");
7578 output_asm_insn (pattern, operands);
7579 return "";
7583 /* Emit RTL to save block of VFP register pairs to the stack. Returns the
7584 number of bytes pushed. */
7586 static int
7587 vfp_emit_fstmx (int base_reg, int count)
7589 rtx par;
7590 rtx dwarf;
7591 rtx tmp, reg;
7592 int i;
7594 /* Workaround ARM10 VFPr1 bug. Data corruption can occur when exactly two
7595 register pairs are stored by a store multiple insn. We avoid this
7596 by pushing an extra pair. */
7597 if (count == 2 && !arm_arch6)
7599 if (base_reg == LAST_VFP_REGNUM - 3)
7600 base_reg -= 2;
7601 count++;
7604 /* ??? The frame layout is implementation defined. We describe
7605 standard format 1 (equivalent to a FSTMD insn and unused pad word).
7606 We really need some way of representing the whole block so that the
7607 unwinder can figure it out at runtime. */
7608 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
7609 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (count + 1));
7611 reg = gen_rtx_REG (DFmode, base_reg);
7612 base_reg += 2;
7614 XVECEXP (par, 0, 0)
7615 = gen_rtx_SET (VOIDmode,
7616 gen_rtx_MEM (BLKmode,
7617 gen_rtx_PRE_DEC (BLKmode, stack_pointer_rtx)),
7618 gen_rtx_UNSPEC (BLKmode,
7619 gen_rtvec (1, reg),
7620 UNSPEC_PUSH_MULT));
7622 tmp = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7623 gen_rtx_PLUS (SImode, stack_pointer_rtx,
7624 GEN_INT (-(count * 8 + 4))));
7625 RTX_FRAME_RELATED_P (tmp) = 1;
7626 XVECEXP (dwarf, 0, 0) = tmp;
7628 tmp = gen_rtx_SET (VOIDmode,
7629 gen_rtx_MEM (DFmode, stack_pointer_rtx),
7630 reg);
7631 RTX_FRAME_RELATED_P (tmp) = 1;
7632 XVECEXP (dwarf, 0, 1) = tmp;
7634 for (i = 1; i < count; i++)
7636 reg = gen_rtx_REG (DFmode, base_reg);
7637 base_reg += 2;
7638 XVECEXP (par, 0, i) = gen_rtx_USE (VOIDmode, reg);
7640 tmp = gen_rtx_SET (VOIDmode,
7641 gen_rtx_MEM (DFmode,
7642 gen_rtx_PLUS (SImode,
7643 stack_pointer_rtx,
7644 GEN_INT (i * 8))),
7645 reg);
7646 RTX_FRAME_RELATED_P (tmp) = 1;
7647 XVECEXP (dwarf, 0, i + 1) = tmp;
7650 par = emit_insn (par);
7651 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
7652 REG_NOTES (par));
7653 RTX_FRAME_RELATED_P (par) = 1;
7655 return count * 8 + 4;
7659 /* Output a 'call' insn. */
7660 const char *
7661 output_call (rtx *operands)
7663 if (arm_arch5)
7664 abort (); /* Patterns should call blx <reg> directly. */
7666 /* Handle calls to lr using ip (which may be clobbered in subr anyway). */
7667 if (REGNO (operands[0]) == LR_REGNUM)
7669 operands[0] = gen_rtx_REG (SImode, IP_REGNUM);
7670 output_asm_insn ("mov%?\t%0, %|lr", operands);
7673 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
7675 if (TARGET_INTERWORK || arm_arch4t)
7676 output_asm_insn ("bx%?\t%0", operands);
7677 else
7678 output_asm_insn ("mov%?\t%|pc, %0", operands);
7680 return "";
7683 /* Output a 'call' insn that is a reference in memory. */
7684 const char *
7685 output_call_mem (rtx *operands)
7687 if (TARGET_INTERWORK && !arm_arch5)
7689 output_asm_insn ("ldr%?\t%|ip, %0", operands);
7690 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
7691 output_asm_insn ("bx%?\t%|ip", operands);
7693 else if (regno_use_in (LR_REGNUM, operands[0]))
7695 /* LR is used in the memory address. We load the address in the
7696 first instruction. It's safe to use IP as the target of the
7697 load since the call will kill it anyway. */
7698 output_asm_insn ("ldr%?\t%|ip, %0", operands);
7699 if (arm_arch5)
7700 output_asm_insn ("blx%?\t%|ip", operands);
7701 else
7703 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
7704 if (arm_arch4t)
7705 output_asm_insn ("bx%?\t%|ip", operands);
7706 else
7707 output_asm_insn ("mov%?\t%|pc, %|ip", operands);
7710 else
7712 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
7713 output_asm_insn ("ldr%?\t%|pc, %0", operands);
7716 return "";
7720 /* Output a move from arm registers to an fpa registers.
7721 OPERANDS[0] is an fpa register.
7722 OPERANDS[1] is the first registers of an arm register pair. */
7723 const char *
7724 output_mov_long_double_fpa_from_arm (rtx *operands)
7726 int arm_reg0 = REGNO (operands[1]);
7727 rtx ops[3];
7729 if (arm_reg0 == IP_REGNUM)
7730 abort ();
7732 ops[0] = gen_rtx_REG (SImode, arm_reg0);
7733 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
7734 ops[2] = gen_rtx_REG (SImode, 2 + arm_reg0);
7736 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1, %2}", ops);
7737 output_asm_insn ("ldf%?e\t%0, [%|sp], #12", operands);
7739 return "";
7742 /* Output a move from an fpa register to arm registers.
7743 OPERANDS[0] is the first registers of an arm register pair.
7744 OPERANDS[1] is an fpa register. */
7745 const char *
7746 output_mov_long_double_arm_from_fpa (rtx *operands)
7748 int arm_reg0 = REGNO (operands[0]);
7749 rtx ops[3];
7751 if (arm_reg0 == IP_REGNUM)
7752 abort ();
7754 ops[0] = gen_rtx_REG (SImode, arm_reg0);
7755 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
7756 ops[2] = gen_rtx_REG (SImode, 2 + arm_reg0);
7758 output_asm_insn ("stf%?e\t%1, [%|sp, #-12]!", operands);
7759 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1, %2}", ops);
7760 return "";
7763 /* Output a move from arm registers to arm registers of a long double
7764 OPERANDS[0] is the destination.
7765 OPERANDS[1] is the source. */
7766 const char *
7767 output_mov_long_double_arm_from_arm (rtx *operands)
7769 /* We have to be careful here because the two might overlap. */
7770 int dest_start = REGNO (operands[0]);
7771 int src_start = REGNO (operands[1]);
7772 rtx ops[2];
7773 int i;
7775 if (dest_start < src_start)
7777 for (i = 0; i < 3; i++)
7779 ops[0] = gen_rtx_REG (SImode, dest_start + i);
7780 ops[1] = gen_rtx_REG (SImode, src_start + i);
7781 output_asm_insn ("mov%?\t%0, %1", ops);
7784 else
7786 for (i = 2; i >= 0; i--)
7788 ops[0] = gen_rtx_REG (SImode, dest_start + i);
7789 ops[1] = gen_rtx_REG (SImode, src_start + i);
7790 output_asm_insn ("mov%?\t%0, %1", ops);
7794 return "";
7798 /* Output a move from arm registers to an fpa registers.
7799 OPERANDS[0] is an fpa register.
7800 OPERANDS[1] is the first registers of an arm register pair. */
7801 const char *
7802 output_mov_double_fpa_from_arm (rtx *operands)
7804 int arm_reg0 = REGNO (operands[1]);
7805 rtx ops[2];
7807 if (arm_reg0 == IP_REGNUM)
7808 abort ();
7810 ops[0] = gen_rtx_REG (SImode, arm_reg0);
7811 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
7812 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1}", ops);
7813 output_asm_insn ("ldf%?d\t%0, [%|sp], #8", operands);
7814 return "";
7817 /* Output a move from an fpa register to arm registers.
7818 OPERANDS[0] is the first registers of an arm register pair.
7819 OPERANDS[1] is an fpa register. */
7820 const char *
7821 output_mov_double_arm_from_fpa (rtx *operands)
7823 int arm_reg0 = REGNO (operands[0]);
7824 rtx ops[2];
7826 if (arm_reg0 == IP_REGNUM)
7827 abort ();
7829 ops[0] = gen_rtx_REG (SImode, arm_reg0);
7830 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
7831 output_asm_insn ("stf%?d\t%1, [%|sp, #-8]!", operands);
7832 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1}", ops);
7833 return "";
7836 /* Output a move between double words.
7837 It must be REG<-REG, REG<-CONST_DOUBLE, REG<-CONST_INT, REG<-MEM
7838 or MEM<-REG and all MEMs must be offsettable addresses. */
7839 const char *
7840 output_move_double (rtx *operands)
7842 enum rtx_code code0 = GET_CODE (operands[0]);
7843 enum rtx_code code1 = GET_CODE (operands[1]);
7844 rtx otherops[3];
7846 if (code0 == REG)
7848 int reg0 = REGNO (operands[0]);
7850 otherops[0] = gen_rtx_REG (SImode, 1 + reg0);
7852 if (code1 == REG)
7854 int reg1 = REGNO (operands[1]);
7855 if (reg1 == IP_REGNUM)
7856 abort ();
7858 /* Ensure the second source is not overwritten. */
7859 if (reg1 == reg0 + (WORDS_BIG_ENDIAN ? -1 : 1))
7860 output_asm_insn ("mov%?\t%Q0, %Q1\n\tmov%?\t%R0, %R1", operands);
7861 else
7862 output_asm_insn ("mov%?\t%R0, %R1\n\tmov%?\t%Q0, %Q1", operands);
7864 else if (code1 == CONST_VECTOR)
7866 HOST_WIDE_INT hint = 0;
7868 switch (GET_MODE (operands[1]))
7870 case V2SImode:
7871 otherops[1] = GEN_INT (INTVAL (CONST_VECTOR_ELT (operands[1], 1)));
7872 operands[1] = GEN_INT (INTVAL (CONST_VECTOR_ELT (operands[1], 0)));
7873 break;
7875 case V4HImode:
7876 if (BYTES_BIG_ENDIAN)
7878 hint = INTVAL (CONST_VECTOR_ELT (operands[1], 2));
7879 hint <<= 16;
7880 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 3));
7882 else
7884 hint = INTVAL (CONST_VECTOR_ELT (operands[1], 3));
7885 hint <<= 16;
7886 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 2));
7889 otherops[1] = GEN_INT (hint);
7890 hint = 0;
7892 if (BYTES_BIG_ENDIAN)
7894 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 0));
7895 hint <<= 16;
7896 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 1));
7898 else
7900 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 1));
7901 hint <<= 16;
7902 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 0));
7905 operands[1] = GEN_INT (hint);
7906 break;
7908 case V8QImode:
7909 if (BYTES_BIG_ENDIAN)
7911 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 4));
7912 hint <<= 8;
7913 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 5));
7914 hint <<= 8;
7915 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 6));
7916 hint <<= 8;
7917 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 7));
7919 else
7921 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 7));
7922 hint <<= 8;
7923 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 6));
7924 hint <<= 8;
7925 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 5));
7926 hint <<= 8;
7927 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 4));
7930 otherops[1] = GEN_INT (hint);
7931 hint = 0;
7933 if (BYTES_BIG_ENDIAN)
7935 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 0));
7936 hint <<= 8;
7937 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 1));
7938 hint <<= 8;
7939 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 2));
7940 hint <<= 8;
7941 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 3));
7943 else
7945 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 3));
7946 hint <<= 8;
7947 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 2));
7948 hint <<= 8;
7949 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 1));
7950 hint <<= 8;
7951 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 0));
7954 operands[1] = GEN_INT (hint);
7955 break;
7957 default:
7958 abort ();
7960 output_mov_immediate (operands);
7961 output_mov_immediate (otherops);
7963 else if (code1 == CONST_DOUBLE)
7965 if (GET_MODE (operands[1]) == DFmode)
7967 REAL_VALUE_TYPE r;
7968 long l[2];
7970 REAL_VALUE_FROM_CONST_DOUBLE (r, operands[1]);
7971 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
7972 otherops[1] = GEN_INT (l[1]);
7973 operands[1] = GEN_INT (l[0]);
7975 else if (GET_MODE (operands[1]) != VOIDmode)
7976 abort ();
7977 else if (WORDS_BIG_ENDIAN)
7979 otherops[1] = GEN_INT (CONST_DOUBLE_LOW (operands[1]));
7980 operands[1] = GEN_INT (CONST_DOUBLE_HIGH (operands[1]));
7982 else
7984 otherops[1] = GEN_INT (CONST_DOUBLE_HIGH (operands[1]));
7985 operands[1] = GEN_INT (CONST_DOUBLE_LOW (operands[1]));
7988 output_mov_immediate (operands);
7989 output_mov_immediate (otherops);
7991 else if (code1 == CONST_INT)
7993 #if HOST_BITS_PER_WIDE_INT > 32
7994 /* If HOST_WIDE_INT is more than 32 bits, the intval tells us
7995 what the upper word is. */
7996 if (WORDS_BIG_ENDIAN)
7998 otherops[1] = GEN_INT (ARM_SIGN_EXTEND (INTVAL (operands[1])));
7999 operands[1] = GEN_INT (INTVAL (operands[1]) >> 32);
8001 else
8003 otherops[1] = GEN_INT (INTVAL (operands[1]) >> 32);
8004 operands[1] = GEN_INT (ARM_SIGN_EXTEND (INTVAL (operands[1])));
8006 #else
8007 /* Sign extend the intval into the high-order word. */
8008 if (WORDS_BIG_ENDIAN)
8010 otherops[1] = operands[1];
8011 operands[1] = (INTVAL (operands[1]) < 0
8012 ? constm1_rtx : const0_rtx);
8014 else
8015 otherops[1] = INTVAL (operands[1]) < 0 ? constm1_rtx : const0_rtx;
8016 #endif
8017 output_mov_immediate (otherops);
8018 output_mov_immediate (operands);
8020 else if (code1 == MEM)
8022 switch (GET_CODE (XEXP (operands[1], 0)))
8024 case REG:
8025 output_asm_insn ("ldm%?ia\t%m1, %M0", operands);
8026 break;
8028 case PRE_INC:
8029 if (!TARGET_LDRD)
8030 abort (); /* Should never happen now. */
8031 output_asm_insn ("ldr%?d\t%0, [%m1, #8]!", operands);
8032 break;
8034 case PRE_DEC:
8035 output_asm_insn ("ldm%?db\t%m1!, %M0", operands);
8036 break;
8038 case POST_INC:
8039 output_asm_insn ("ldm%?ia\t%m1!, %M0", operands);
8040 break;
8042 case POST_DEC:
8043 if (!TARGET_LDRD)
8044 abort (); /* Should never happen now. */
8045 output_asm_insn ("ldr%?d\t%0, [%m1], #-8", operands);
8046 break;
8048 case PRE_MODIFY:
8049 case POST_MODIFY:
8050 otherops[0] = operands[0];
8051 otherops[1] = XEXP (XEXP (XEXP (operands[1], 0), 1), 0);
8052 otherops[2] = XEXP (XEXP (XEXP (operands[1], 0), 1), 1);
8054 if (GET_CODE (XEXP (operands[1], 0)) == PRE_MODIFY)
8056 if (reg_overlap_mentioned_p (otherops[0], otherops[2]))
8058 /* Registers overlap so split out the increment. */
8059 output_asm_insn ("add%?\t%1, %1, %2", otherops);
8060 output_asm_insn ("ldr%?d\t%0, [%1] @split", otherops);
8062 else
8063 output_asm_insn ("ldr%?d\t%0, [%1, %2]!", otherops);
8065 else
8067 /* We only allow constant increments, so this is safe. */
8068 output_asm_insn ("ldr%?d\t%0, [%1], %2", otherops);
8070 break;
8072 case LABEL_REF:
8073 case CONST:
8074 output_asm_insn ("adr%?\t%0, %1", operands);
8075 output_asm_insn ("ldm%?ia\t%0, %M0", operands);
8076 break;
8078 default:
8079 if (arm_add_operand (XEXP (XEXP (operands[1], 0), 1),
8080 GET_MODE (XEXP (XEXP (operands[1], 0), 1))))
8082 otherops[0] = operands[0];
8083 otherops[1] = XEXP (XEXP (operands[1], 0), 0);
8084 otherops[2] = XEXP (XEXP (operands[1], 0), 1);
8086 if (GET_CODE (XEXP (operands[1], 0)) == PLUS)
8088 if (GET_CODE (otherops[2]) == CONST_INT)
8090 switch ((int) INTVAL (otherops[2]))
8092 case -8:
8093 output_asm_insn ("ldm%?db\t%1, %M0", otherops);
8094 return "";
8095 case -4:
8096 output_asm_insn ("ldm%?da\t%1, %M0", otherops);
8097 return "";
8098 case 4:
8099 output_asm_insn ("ldm%?ib\t%1, %M0", otherops);
8100 return "";
8103 if (TARGET_LDRD
8104 && (GET_CODE (otherops[2]) == REG
8105 || (GET_CODE (otherops[2]) == CONST_INT
8106 && INTVAL (otherops[2]) > -256
8107 && INTVAL (otherops[2]) < 256)))
8109 if (reg_overlap_mentioned_p (otherops[0],
8110 otherops[2]))
8112 /* Swap base and index registers over to
8113 avoid a conflict. */
8114 otherops[1] = XEXP (XEXP (operands[1], 0), 1);
8115 otherops[2] = XEXP (XEXP (operands[1], 0), 0);
8118 /* If both registers conflict, it will usually
8119 have been fixed by a splitter. */
8120 if (reg_overlap_mentioned_p (otherops[0],
8121 otherops[2]))
8123 output_asm_insn ("add%?\t%1, %1, %2", otherops);
8124 output_asm_insn ("ldr%?d\t%0, [%1]",
8125 otherops);
8126 return "";
8128 else
8130 output_asm_insn ("ldr%?d\t%0, [%1, %2]",
8131 otherops);
8132 return "";
8135 if (GET_CODE (otherops[2]) == CONST_INT)
8137 if (!(const_ok_for_arm (INTVAL (otherops[2]))))
8138 output_asm_insn ("sub%?\t%0, %1, #%n2", otherops);
8139 else
8140 output_asm_insn ("add%?\t%0, %1, %2", otherops);
8142 else
8143 output_asm_insn ("add%?\t%0, %1, %2", otherops);
8145 else
8146 output_asm_insn ("sub%?\t%0, %1, %2", otherops);
8148 return "ldm%?ia\t%0, %M0";
8150 else
8152 otherops[1] = adjust_address (operands[1], SImode, 4);
8153 /* Take care of overlapping base/data reg. */
8154 if (reg_mentioned_p (operands[0], operands[1]))
8156 output_asm_insn ("ldr%?\t%0, %1", otherops);
8157 output_asm_insn ("ldr%?\t%0, %1", operands);
8159 else
8161 output_asm_insn ("ldr%?\t%0, %1", operands);
8162 output_asm_insn ("ldr%?\t%0, %1", otherops);
8167 else
8168 abort (); /* Constraints should prevent this. */
8170 else if (code0 == MEM && code1 == REG)
8172 if (REGNO (operands[1]) == IP_REGNUM)
8173 abort ();
8175 switch (GET_CODE (XEXP (operands[0], 0)))
8177 case REG:
8178 output_asm_insn ("stm%?ia\t%m0, %M1", operands);
8179 break;
8181 case PRE_INC:
8182 if (!TARGET_LDRD)
8183 abort (); /* Should never happen now. */
8184 output_asm_insn ("str%?d\t%1, [%m0, #8]!", operands);
8185 break;
8187 case PRE_DEC:
8188 output_asm_insn ("stm%?db\t%m0!, %M1", operands);
8189 break;
8191 case POST_INC:
8192 output_asm_insn ("stm%?ia\t%m0!, %M1", operands);
8193 break;
8195 case POST_DEC:
8196 if (!TARGET_LDRD)
8197 abort (); /* Should never happen now. */
8198 output_asm_insn ("str%?d\t%1, [%m0], #-8", operands);
8199 break;
8201 case PRE_MODIFY:
8202 case POST_MODIFY:
8203 otherops[0] = operands[1];
8204 otherops[1] = XEXP (XEXP (XEXP (operands[0], 0), 1), 0);
8205 otherops[2] = XEXP (XEXP (XEXP (operands[0], 0), 1), 1);
8207 if (GET_CODE (XEXP (operands[0], 0)) == PRE_MODIFY)
8208 output_asm_insn ("str%?d\t%0, [%1, %2]!", otherops);
8209 else
8210 output_asm_insn ("str%?d\t%0, [%1], %2", otherops);
8211 break;
8213 case PLUS:
8214 otherops[2] = XEXP (XEXP (operands[0], 0), 1);
8215 if (GET_CODE (otherops[2]) == CONST_INT)
8217 switch ((int) INTVAL (XEXP (XEXP (operands[0], 0), 1)))
8219 case -8:
8220 output_asm_insn ("stm%?db\t%m0, %M1", operands);
8221 return "";
8223 case -4:
8224 output_asm_insn ("stm%?da\t%m0, %M1", operands);
8225 return "";
8227 case 4:
8228 output_asm_insn ("stm%?ib\t%m0, %M1", operands);
8229 return "";
8232 if (TARGET_LDRD
8233 && (GET_CODE (otherops[2]) == REG
8234 || (GET_CODE (otherops[2]) == CONST_INT
8235 && INTVAL (otherops[2]) > -256
8236 && INTVAL (otherops[2]) < 256)))
8238 otherops[0] = operands[1];
8239 otherops[1] = XEXP (XEXP (operands[0], 0), 0);
8240 output_asm_insn ("str%?d\t%0, [%1, %2]", otherops);
8241 return "";
8243 /* Fall through */
8245 default:
8246 otherops[0] = adjust_address (operands[0], SImode, 4);
8247 otherops[1] = gen_rtx_REG (SImode, 1 + REGNO (operands[1]));
8248 output_asm_insn ("str%?\t%1, %0", operands);
8249 output_asm_insn ("str%?\t%1, %0", otherops);
8252 else
8253 /* Constraints should prevent this. */
8254 abort ();
8256 return "";
8260 /* Output an arbitrary MOV reg, #n.
8261 OPERANDS[0] is a register. OPERANDS[1] is a const_int. */
8262 const char *
8263 output_mov_immediate (rtx *operands)
8265 HOST_WIDE_INT n = INTVAL (operands[1]);
8267 /* Try to use one MOV. */
8268 if (const_ok_for_arm (n))
8269 output_asm_insn ("mov%?\t%0, %1", operands);
8271 /* Try to use one MVN. */
8272 else if (const_ok_for_arm (~n))
8274 operands[1] = GEN_INT (~n);
8275 output_asm_insn ("mvn%?\t%0, %1", operands);
8277 else
8279 int n_ones = 0;
8280 int i;
8282 /* If all else fails, make it out of ORRs or BICs as appropriate. */
8283 for (i = 0; i < 32; i++)
8284 if (n & 1 << i)
8285 n_ones++;
8287 if (n_ones > 16) /* Shorter to use MVN with BIC in this case. */
8288 output_multi_immediate (operands, "mvn%?\t%0, %1", "bic%?\t%0, %0, %1", 1, ~ n);
8289 else
8290 output_multi_immediate (operands, "mov%?\t%0, %1", "orr%?\t%0, %0, %1", 1, n);
8293 return "";
8296 /* Output an ADD r, s, #n where n may be too big for one instruction.
8297 If adding zero to one register, output nothing. */
8298 const char *
8299 output_add_immediate (rtx *operands)
8301 HOST_WIDE_INT n = INTVAL (operands[2]);
8303 if (n != 0 || REGNO (operands[0]) != REGNO (operands[1]))
8305 if (n < 0)
8306 output_multi_immediate (operands,
8307 "sub%?\t%0, %1, %2", "sub%?\t%0, %0, %2", 2,
8308 -n);
8309 else
8310 output_multi_immediate (operands,
8311 "add%?\t%0, %1, %2", "add%?\t%0, %0, %2", 2,
8315 return "";
8318 /* Output a multiple immediate operation.
8319 OPERANDS is the vector of operands referred to in the output patterns.
8320 INSTR1 is the output pattern to use for the first constant.
8321 INSTR2 is the output pattern to use for subsequent constants.
8322 IMMED_OP is the index of the constant slot in OPERANDS.
8323 N is the constant value. */
8324 static const char *
8325 output_multi_immediate (rtx *operands, const char *instr1, const char *instr2,
8326 int immed_op, HOST_WIDE_INT n)
8328 #if HOST_BITS_PER_WIDE_INT > 32
8329 n &= 0xffffffff;
8330 #endif
8332 if (n == 0)
8334 /* Quick and easy output. */
8335 operands[immed_op] = const0_rtx;
8336 output_asm_insn (instr1, operands);
8338 else
8340 int i;
8341 const char * instr = instr1;
8343 /* Note that n is never zero here (which would give no output). */
8344 for (i = 0; i < 32; i += 2)
8346 if (n & (3 << i))
8348 operands[immed_op] = GEN_INT (n & (255 << i));
8349 output_asm_insn (instr, operands);
8350 instr = instr2;
8351 i += 6;
8356 return "";
8359 /* Return the appropriate ARM instruction for the operation code.
8360 The returned result should not be overwritten. OP is the rtx of the
8361 operation. SHIFT_FIRST_ARG is TRUE if the first argument of the operator
8362 was shifted. */
8363 const char *
8364 arithmetic_instr (rtx op, int shift_first_arg)
8366 switch (GET_CODE (op))
8368 case PLUS:
8369 return "add";
8371 case MINUS:
8372 return shift_first_arg ? "rsb" : "sub";
8374 case IOR:
8375 return "orr";
8377 case XOR:
8378 return "eor";
8380 case AND:
8381 return "and";
8383 default:
8384 abort ();
8388 /* Ensure valid constant shifts and return the appropriate shift mnemonic
8389 for the operation code. The returned result should not be overwritten.
8390 OP is the rtx code of the shift.
8391 On exit, *AMOUNTP will be -1 if the shift is by a register, or a constant
8392 shift. */
8393 static const char *
8394 shift_op (rtx op, HOST_WIDE_INT *amountp)
8396 const char * mnem;
8397 enum rtx_code code = GET_CODE (op);
8399 if (GET_CODE (XEXP (op, 1)) == REG || GET_CODE (XEXP (op, 1)) == SUBREG)
8400 *amountp = -1;
8401 else if (GET_CODE (XEXP (op, 1)) == CONST_INT)
8402 *amountp = INTVAL (XEXP (op, 1));
8403 else
8404 abort ();
8406 switch (code)
8408 case ASHIFT:
8409 mnem = "asl";
8410 break;
8412 case ASHIFTRT:
8413 mnem = "asr";
8414 break;
8416 case LSHIFTRT:
8417 mnem = "lsr";
8418 break;
8420 case ROTATE:
8421 if (*amountp == -1)
8422 abort ();
8423 *amountp = 32 - *amountp;
8425 /* Fall through. */
8427 case ROTATERT:
8428 mnem = "ror";
8429 break;
8431 case MULT:
8432 /* We never have to worry about the amount being other than a
8433 power of 2, since this case can never be reloaded from a reg. */
8434 if (*amountp != -1)
8435 *amountp = int_log2 (*amountp);
8436 else
8437 abort ();
8438 return "asl";
8440 default:
8441 abort ();
8444 if (*amountp != -1)
8446 /* This is not 100% correct, but follows from the desire to merge
8447 multiplication by a power of 2 with the recognizer for a
8448 shift. >=32 is not a valid shift for "asl", so we must try and
8449 output a shift that produces the correct arithmetical result.
8450 Using lsr #32 is identical except for the fact that the carry bit
8451 is not set correctly if we set the flags; but we never use the
8452 carry bit from such an operation, so we can ignore that. */
8453 if (code == ROTATERT)
8454 /* Rotate is just modulo 32. */
8455 *amountp &= 31;
8456 else if (*amountp != (*amountp & 31))
8458 if (code == ASHIFT)
8459 mnem = "lsr";
8460 *amountp = 32;
8463 /* Shifts of 0 are no-ops. */
8464 if (*amountp == 0)
8465 return NULL;
8468 return mnem;
8471 /* Obtain the shift from the POWER of two. */
8473 static HOST_WIDE_INT
8474 int_log2 (HOST_WIDE_INT power)
8476 HOST_WIDE_INT shift = 0;
8478 while ((((HOST_WIDE_INT) 1 << shift) & power) == 0)
8480 if (shift > 31)
8481 abort ();
8482 shift++;
8485 return shift;
8488 /* Output a .ascii pseudo-op, keeping track of lengths. This is because
8489 /bin/as is horribly restrictive. */
8490 #define MAX_ASCII_LEN 51
8492 void
8493 output_ascii_pseudo_op (FILE *stream, const unsigned char *p, int len)
8495 int i;
8496 int len_so_far = 0;
8498 fputs ("\t.ascii\t\"", stream);
8500 for (i = 0; i < len; i++)
8502 int c = p[i];
8504 if (len_so_far >= MAX_ASCII_LEN)
8506 fputs ("\"\n\t.ascii\t\"", stream);
8507 len_so_far = 0;
8510 switch (c)
8512 case TARGET_TAB:
8513 fputs ("\\t", stream);
8514 len_so_far += 2;
8515 break;
8517 case TARGET_FF:
8518 fputs ("\\f", stream);
8519 len_so_far += 2;
8520 break;
8522 case TARGET_BS:
8523 fputs ("\\b", stream);
8524 len_so_far += 2;
8525 break;
8527 case TARGET_CR:
8528 fputs ("\\r", stream);
8529 len_so_far += 2;
8530 break;
8532 case TARGET_NEWLINE:
8533 fputs ("\\n", stream);
8534 c = p [i + 1];
8535 if ((c >= ' ' && c <= '~')
8536 || c == TARGET_TAB)
8537 /* This is a good place for a line break. */
8538 len_so_far = MAX_ASCII_LEN;
8539 else
8540 len_so_far += 2;
8541 break;
8543 case '\"':
8544 case '\\':
8545 putc ('\\', stream);
8546 len_so_far++;
8547 /* Drop through. */
8549 default:
8550 if (c >= ' ' && c <= '~')
8552 putc (c, stream);
8553 len_so_far++;
8555 else
8557 fprintf (stream, "\\%03o", c);
8558 len_so_far += 4;
8560 break;
8564 fputs ("\"\n", stream);
8567 /* Compute the register save mask for registers 0 through 12
8568 inclusive. This code is used by arm_compute_save_reg_mask. */
8569 static unsigned long
8570 arm_compute_save_reg0_reg12_mask (void)
8572 unsigned long func_type = arm_current_func_type ();
8573 unsigned int save_reg_mask = 0;
8574 unsigned int reg;
8576 if (IS_INTERRUPT (func_type))
8578 unsigned int max_reg;
8579 /* Interrupt functions must not corrupt any registers,
8580 even call clobbered ones. If this is a leaf function
8581 we can just examine the registers used by the RTL, but
8582 otherwise we have to assume that whatever function is
8583 called might clobber anything, and so we have to save
8584 all the call-clobbered registers as well. */
8585 if (ARM_FUNC_TYPE (func_type) == ARM_FT_FIQ)
8586 /* FIQ handlers have registers r8 - r12 banked, so
8587 we only need to check r0 - r7, Normal ISRs only
8588 bank r14 and r15, so we must check up to r12.
8589 r13 is the stack pointer which is always preserved,
8590 so we do not need to consider it here. */
8591 max_reg = 7;
8592 else
8593 max_reg = 12;
8595 for (reg = 0; reg <= max_reg; reg++)
8596 if (regs_ever_live[reg]
8597 || (! current_function_is_leaf && call_used_regs [reg]))
8598 save_reg_mask |= (1 << reg);
8600 else
8602 /* In the normal case we only need to save those registers
8603 which are call saved and which are used by this function. */
8604 for (reg = 0; reg <= 10; reg++)
8605 if (regs_ever_live[reg] && ! call_used_regs [reg])
8606 save_reg_mask |= (1 << reg);
8608 /* Handle the frame pointer as a special case. */
8609 if (! TARGET_APCS_FRAME
8610 && ! frame_pointer_needed
8611 && regs_ever_live[HARD_FRAME_POINTER_REGNUM]
8612 && ! call_used_regs[HARD_FRAME_POINTER_REGNUM])
8613 save_reg_mask |= 1 << HARD_FRAME_POINTER_REGNUM;
8615 /* If we aren't loading the PIC register,
8616 don't stack it even though it may be live. */
8617 if (flag_pic
8618 && ! TARGET_SINGLE_PIC_BASE
8619 && regs_ever_live[PIC_OFFSET_TABLE_REGNUM])
8620 save_reg_mask |= 1 << PIC_OFFSET_TABLE_REGNUM;
8623 /* Save registers so the exception handler can modify them. */
8624 if (current_function_calls_eh_return)
8626 unsigned int i;
8628 for (i = 0; ; i++)
8630 reg = EH_RETURN_DATA_REGNO (i);
8631 if (reg == INVALID_REGNUM)
8632 break;
8633 save_reg_mask |= 1 << reg;
8637 return save_reg_mask;
8640 /* Compute a bit mask of which registers need to be
8641 saved on the stack for the current function. */
8643 static unsigned long
8644 arm_compute_save_reg_mask (void)
8646 unsigned int save_reg_mask = 0;
8647 unsigned long func_type = arm_current_func_type ();
8649 if (IS_NAKED (func_type))
8650 /* This should never really happen. */
8651 return 0;
8653 /* If we are creating a stack frame, then we must save the frame pointer,
8654 IP (which will hold the old stack pointer), LR and the PC. */
8655 if (frame_pointer_needed)
8656 save_reg_mask |=
8657 (1 << ARM_HARD_FRAME_POINTER_REGNUM)
8658 | (1 << IP_REGNUM)
8659 | (1 << LR_REGNUM)
8660 | (1 << PC_REGNUM);
8662 /* Volatile functions do not return, so there
8663 is no need to save any other registers. */
8664 if (IS_VOLATILE (func_type))
8665 return save_reg_mask;
8667 save_reg_mask |= arm_compute_save_reg0_reg12_mask ();
8669 /* Decide if we need to save the link register.
8670 Interrupt routines have their own banked link register,
8671 so they never need to save it.
8672 Otherwise if we do not use the link register we do not need to save
8673 it. If we are pushing other registers onto the stack however, we
8674 can save an instruction in the epilogue by pushing the link register
8675 now and then popping it back into the PC. This incurs extra memory
8676 accesses though, so we only do it when optimizing for size, and only
8677 if we know that we will not need a fancy return sequence. */
8678 if (regs_ever_live [LR_REGNUM]
8679 || (save_reg_mask
8680 && optimize_size
8681 && ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
8682 && !current_function_calls_eh_return))
8683 save_reg_mask |= 1 << LR_REGNUM;
8685 if (cfun->machine->lr_save_eliminated)
8686 save_reg_mask &= ~ (1 << LR_REGNUM);
8688 if (TARGET_REALLY_IWMMXT
8689 && ((bit_count (save_reg_mask)
8690 + ARM_NUM_INTS (current_function_pretend_args_size)) % 2) != 0)
8692 unsigned int reg;
8694 /* The total number of registers that are going to be pushed
8695 onto the stack is odd. We need to ensure that the stack
8696 is 64-bit aligned before we start to save iWMMXt registers,
8697 and also before we start to create locals. (A local variable
8698 might be a double or long long which we will load/store using
8699 an iWMMXt instruction). Therefore we need to push another
8700 ARM register, so that the stack will be 64-bit aligned. We
8701 try to avoid using the arg registers (r0 -r3) as they might be
8702 used to pass values in a tail call. */
8703 for (reg = 4; reg <= 12; reg++)
8704 if ((save_reg_mask & (1 << reg)) == 0)
8705 break;
8707 if (reg <= 12)
8708 save_reg_mask |= (1 << reg);
8709 else
8711 cfun->machine->sibcall_blocked = 1;
8712 save_reg_mask |= (1 << 3);
8716 return save_reg_mask;
8720 /* Compute a bit mask of which registers need to be
8721 saved on the stack for the current function. */
8722 static unsigned long
8723 thumb_compute_save_reg_mask (void)
8725 unsigned long mask;
8726 int reg;
8728 mask = 0;
8729 for (reg = 0; reg < 12; reg ++)
8731 if (regs_ever_live[reg] && !call_used_regs[reg])
8732 mask |= 1 << reg;
8735 if (flag_pic && !TARGET_SINGLE_PIC_BASE)
8736 mask |= (1 << PIC_OFFSET_TABLE_REGNUM);
8737 if (TARGET_SINGLE_PIC_BASE)
8738 mask &= ~(1 << arm_pic_register);
8740 /* lr will also be pushed if any lo regs are pushed. */
8741 if (mask & 0xff || thumb_force_lr_save ())
8742 mask |= (1 << LR_REGNUM);
8744 /* Make sure we have a low work register if we need one. */
8745 if (((mask & 0xff) == 0 && regs_ever_live[LAST_ARG_REGNUM])
8746 && ((mask & 0x0f00) || TARGET_BACKTRACE))
8747 mask |= 1 << LAST_LO_REGNUM;
8749 return mask;
8753 /* Return the number of bytes required to save VFP registers. */
8754 static int
8755 arm_get_vfp_saved_size (void)
8757 unsigned int regno;
8758 int count;
8759 int saved;
8761 saved = 0;
8762 /* Space for saved VFP registers. */
8763 if (TARGET_HARD_FLOAT && TARGET_VFP)
8765 count = 0;
8766 for (regno = FIRST_VFP_REGNUM;
8767 regno < LAST_VFP_REGNUM;
8768 regno += 2)
8770 if ((!regs_ever_live[regno] || call_used_regs[regno])
8771 && (!regs_ever_live[regno + 1] || call_used_regs[regno + 1]))
8773 if (count > 0)
8775 /* Workaround ARM10 VFPr1 bug. */
8776 if (count == 2 && !arm_arch6)
8777 count++;
8778 saved += count * 8 + 4;
8780 count = 0;
8782 else
8783 count++;
8785 if (count > 0)
8787 if (count == 2 && !arm_arch6)
8788 count++;
8789 saved += count * 8 + 4;
8792 return saved;
8796 /* Generate a function exit sequence. If REALLY_RETURN is false, then do
8797 everything bar the final return instruction. */
8798 const char *
8799 output_return_instruction (rtx operand, int really_return, int reverse)
8801 char conditional[10];
8802 char instr[100];
8803 int reg;
8804 unsigned long live_regs_mask;
8805 unsigned long func_type;
8806 arm_stack_offsets *offsets;
8808 func_type = arm_current_func_type ();
8810 if (IS_NAKED (func_type))
8811 return "";
8813 if (IS_VOLATILE (func_type) && TARGET_ABORT_NORETURN)
8815 /* If this function was declared non-returning, and we have
8816 found a tail call, then we have to trust that the called
8817 function won't return. */
8818 if (really_return)
8820 rtx ops[2];
8822 /* Otherwise, trap an attempted return by aborting. */
8823 ops[0] = operand;
8824 ops[1] = gen_rtx_SYMBOL_REF (Pmode, NEED_PLT_RELOC ? "abort(PLT)"
8825 : "abort");
8826 assemble_external_libcall (ops[1]);
8827 output_asm_insn (reverse ? "bl%D0\t%a1" : "bl%d0\t%a1", ops);
8830 return "";
8833 if (current_function_calls_alloca && !really_return)
8834 abort ();
8836 sprintf (conditional, "%%?%%%c0", reverse ? 'D' : 'd');
8838 return_used_this_function = 1;
8840 live_regs_mask = arm_compute_save_reg_mask ();
8842 if (live_regs_mask)
8844 const char * return_reg;
8846 /* If we do not have any special requirements for function exit
8847 (e.g. interworking, or ISR) then we can load the return address
8848 directly into the PC. Otherwise we must load it into LR. */
8849 if (really_return
8850 && ! TARGET_INTERWORK)
8851 return_reg = reg_names[PC_REGNUM];
8852 else
8853 return_reg = reg_names[LR_REGNUM];
8855 if ((live_regs_mask & (1 << IP_REGNUM)) == (1 << IP_REGNUM))
8857 /* There are three possible reasons for the IP register
8858 being saved. 1) a stack frame was created, in which case
8859 IP contains the old stack pointer, or 2) an ISR routine
8860 corrupted it, or 3) it was saved to align the stack on
8861 iWMMXt. In case 1, restore IP into SP, otherwise just
8862 restore IP. */
8863 if (frame_pointer_needed)
8865 live_regs_mask &= ~ (1 << IP_REGNUM);
8866 live_regs_mask |= (1 << SP_REGNUM);
8868 else
8870 if (! IS_INTERRUPT (func_type)
8871 && ! TARGET_REALLY_IWMMXT)
8872 abort ();
8876 /* On some ARM architectures it is faster to use LDR rather than
8877 LDM to load a single register. On other architectures, the
8878 cost is the same. In 26 bit mode, or for exception handlers,
8879 we have to use LDM to load the PC so that the CPSR is also
8880 restored. */
8881 for (reg = 0; reg <= LAST_ARM_REGNUM; reg++)
8883 if (live_regs_mask == (unsigned int)(1 << reg))
8884 break;
8886 if (reg <= LAST_ARM_REGNUM
8887 && (reg != LR_REGNUM
8888 || ! really_return
8889 || ! IS_INTERRUPT (func_type)))
8891 sprintf (instr, "ldr%s\t%%|%s, [%%|sp], #4", conditional,
8892 (reg == LR_REGNUM) ? return_reg : reg_names[reg]);
8894 else
8896 char *p;
8897 int first = 1;
8899 /* Generate the load multiple instruction to restore the
8900 registers. Note we can get here, even if
8901 frame_pointer_needed is true, but only if sp already
8902 points to the base of the saved core registers. */
8903 if (live_regs_mask & (1 << SP_REGNUM))
8905 unsigned HOST_WIDE_INT stack_adjust;
8907 offsets = arm_get_frame_offsets ();
8908 stack_adjust = offsets->outgoing_args - offsets->saved_regs;
8909 if (stack_adjust != 0 && stack_adjust != 4)
8910 abort ();
8912 if (stack_adjust && arm_arch5)
8913 sprintf (instr, "ldm%sib\t%%|sp, {", conditional);
8914 else
8916 /* If we can't use ldmib (SA110 bug), then try to pop r3
8917 instead. */
8918 if (stack_adjust)
8919 live_regs_mask |= 1 << 3;
8920 sprintf (instr, "ldm%sfd\t%%|sp, {", conditional);
8923 else
8924 sprintf (instr, "ldm%sfd\t%%|sp!, {", conditional);
8926 p = instr + strlen (instr);
8928 for (reg = 0; reg <= SP_REGNUM; reg++)
8929 if (live_regs_mask & (1 << reg))
8931 int l = strlen (reg_names[reg]);
8933 if (first)
8934 first = 0;
8935 else
8937 memcpy (p, ", ", 2);
8938 p += 2;
8941 memcpy (p, "%|", 2);
8942 memcpy (p + 2, reg_names[reg], l);
8943 p += l + 2;
8946 if (live_regs_mask & (1 << LR_REGNUM))
8948 sprintf (p, "%s%%|%s}", first ? "" : ", ", return_reg);
8949 /* If returning from an interrupt, restore the CPSR. */
8950 if (IS_INTERRUPT (func_type))
8951 strcat (p, "^");
8953 else
8954 strcpy (p, "}");
8957 output_asm_insn (instr, & operand);
8959 /* See if we need to generate an extra instruction to
8960 perform the actual function return. */
8961 if (really_return
8962 && func_type != ARM_FT_INTERWORKED
8963 && (live_regs_mask & (1 << LR_REGNUM)) != 0)
8965 /* The return has already been handled
8966 by loading the LR into the PC. */
8967 really_return = 0;
8971 if (really_return)
8973 switch ((int) ARM_FUNC_TYPE (func_type))
8975 case ARM_FT_ISR:
8976 case ARM_FT_FIQ:
8977 sprintf (instr, "sub%ss\t%%|pc, %%|lr, #4", conditional);
8978 break;
8980 case ARM_FT_INTERWORKED:
8981 sprintf (instr, "bx%s\t%%|lr", conditional);
8982 break;
8984 case ARM_FT_EXCEPTION:
8985 sprintf (instr, "mov%ss\t%%|pc, %%|lr", conditional);
8986 break;
8988 default:
8989 /* Use bx if it's available. */
8990 if (arm_arch5 || arm_arch4t)
8991 sprintf (instr, "bx%s\t%%|lr", conditional);
8992 else
8993 sprintf (instr, "mov%s\t%%|pc, %%|lr", conditional);
8994 break;
8997 output_asm_insn (instr, & operand);
9000 return "";
9003 /* Write the function name into the code section, directly preceding
9004 the function prologue.
9006 Code will be output similar to this:
9008 .ascii "arm_poke_function_name", 0
9009 .align
9011 .word 0xff000000 + (t1 - t0)
9012 arm_poke_function_name
9013 mov ip, sp
9014 stmfd sp!, {fp, ip, lr, pc}
9015 sub fp, ip, #4
9017 When performing a stack backtrace, code can inspect the value
9018 of 'pc' stored at 'fp' + 0. If the trace function then looks
9019 at location pc - 12 and the top 8 bits are set, then we know
9020 that there is a function name embedded immediately preceding this
9021 location and has length ((pc[-3]) & 0xff000000).
9023 We assume that pc is declared as a pointer to an unsigned long.
9025 It is of no benefit to output the function name if we are assembling
9026 a leaf function. These function types will not contain a stack
9027 backtrace structure, therefore it is not possible to determine the
9028 function name. */
9029 void
9030 arm_poke_function_name (FILE *stream, const char *name)
9032 unsigned long alignlength;
9033 unsigned long length;
9034 rtx x;
9036 length = strlen (name) + 1;
9037 alignlength = ROUND_UP_WORD (length);
9039 ASM_OUTPUT_ASCII (stream, name, length);
9040 ASM_OUTPUT_ALIGN (stream, 2);
9041 x = GEN_INT ((unsigned HOST_WIDE_INT) 0xff000000 + alignlength);
9042 assemble_aligned_integer (UNITS_PER_WORD, x);
9045 /* Place some comments into the assembler stream
9046 describing the current function. */
9047 static void
9048 arm_output_function_prologue (FILE *f, HOST_WIDE_INT frame_size)
9050 unsigned long func_type;
9052 if (!TARGET_ARM)
9054 thumb_output_function_prologue (f, frame_size);
9055 return;
9058 /* Sanity check. */
9059 if (arm_ccfsm_state || arm_target_insn)
9060 abort ();
9062 func_type = arm_current_func_type ();
9064 switch ((int) ARM_FUNC_TYPE (func_type))
9066 default:
9067 case ARM_FT_NORMAL:
9068 break;
9069 case ARM_FT_INTERWORKED:
9070 asm_fprintf (f, "\t%@ Function supports interworking.\n");
9071 break;
9072 case ARM_FT_ISR:
9073 asm_fprintf (f, "\t%@ Interrupt Service Routine.\n");
9074 break;
9075 case ARM_FT_FIQ:
9076 asm_fprintf (f, "\t%@ Fast Interrupt Service Routine.\n");
9077 break;
9078 case ARM_FT_EXCEPTION:
9079 asm_fprintf (f, "\t%@ ARM Exception Handler.\n");
9080 break;
9083 if (IS_NAKED (func_type))
9084 asm_fprintf (f, "\t%@ Naked Function: prologue and epilogue provided by programmer.\n");
9086 if (IS_VOLATILE (func_type))
9087 asm_fprintf (f, "\t%@ Volatile: function does not return.\n");
9089 if (IS_NESTED (func_type))
9090 asm_fprintf (f, "\t%@ Nested: function declared inside another function.\n");
9092 asm_fprintf (f, "\t%@ args = %d, pretend = %d, frame = %wd\n",
9093 current_function_args_size,
9094 current_function_pretend_args_size, frame_size);
9096 asm_fprintf (f, "\t%@ frame_needed = %d, uses_anonymous_args = %d\n",
9097 frame_pointer_needed,
9098 cfun->machine->uses_anonymous_args);
9100 if (cfun->machine->lr_save_eliminated)
9101 asm_fprintf (f, "\t%@ link register save eliminated.\n");
9103 if (current_function_calls_eh_return)
9104 asm_fprintf (f, "\t@ Calls __builtin_eh_return.\n");
9106 #ifdef AOF_ASSEMBLER
9107 if (flag_pic)
9108 asm_fprintf (f, "\tmov\t%r, %r\n", IP_REGNUM, PIC_OFFSET_TABLE_REGNUM);
9109 #endif
9111 return_used_this_function = 0;
9114 const char *
9115 arm_output_epilogue (rtx sibling)
9117 int reg;
9118 unsigned long saved_regs_mask;
9119 unsigned long func_type;
9120 /* Floats_offset is the offset from the "virtual" frame. In an APCS
9121 frame that is $fp + 4 for a non-variadic function. */
9122 int floats_offset = 0;
9123 rtx operands[3];
9124 FILE * f = asm_out_file;
9125 unsigned int lrm_count = 0;
9126 int really_return = (sibling == NULL);
9127 int start_reg;
9128 arm_stack_offsets *offsets;
9130 /* If we have already generated the return instruction
9131 then it is futile to generate anything else. */
9132 if (use_return_insn (FALSE, sibling) && return_used_this_function)
9133 return "";
9135 func_type = arm_current_func_type ();
9137 if (IS_NAKED (func_type))
9138 /* Naked functions don't have epilogues. */
9139 return "";
9141 if (IS_VOLATILE (func_type) && TARGET_ABORT_NORETURN)
9143 rtx op;
9145 /* A volatile function should never return. Call abort. */
9146 op = gen_rtx_SYMBOL_REF (Pmode, NEED_PLT_RELOC ? "abort(PLT)" : "abort");
9147 assemble_external_libcall (op);
9148 output_asm_insn ("bl\t%a0", &op);
9150 return "";
9153 if (current_function_calls_eh_return
9154 && ! really_return)
9155 /* If we are throwing an exception, then we really must
9156 be doing a return, so we can't tail-call. */
9157 abort ();
9159 offsets = arm_get_frame_offsets ();
9160 saved_regs_mask = arm_compute_save_reg_mask ();
9162 if (TARGET_IWMMXT)
9163 lrm_count = bit_count (saved_regs_mask);
9165 floats_offset = offsets->saved_args;
9166 /* Compute how far away the floats will be. */
9167 for (reg = 0; reg <= LAST_ARM_REGNUM; reg++)
9168 if (saved_regs_mask & (1 << reg))
9169 floats_offset += 4;
9171 if (frame_pointer_needed)
9173 /* This variable is for the Virtual Frame Pointer, not VFP regs. */
9174 int vfp_offset = offsets->frame;
9176 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
9178 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
9179 if (regs_ever_live[reg] && !call_used_regs[reg])
9181 floats_offset += 12;
9182 asm_fprintf (f, "\tldfe\t%r, [%r, #-%d]\n",
9183 reg, FP_REGNUM, floats_offset - vfp_offset);
9186 else
9188 start_reg = LAST_FPA_REGNUM;
9190 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
9192 if (regs_ever_live[reg] && !call_used_regs[reg])
9194 floats_offset += 12;
9196 /* We can't unstack more than four registers at once. */
9197 if (start_reg - reg == 3)
9199 asm_fprintf (f, "\tlfm\t%r, 4, [%r, #-%d]\n",
9200 reg, FP_REGNUM, floats_offset - vfp_offset);
9201 start_reg = reg - 1;
9204 else
9206 if (reg != start_reg)
9207 asm_fprintf (f, "\tlfm\t%r, %d, [%r, #-%d]\n",
9208 reg + 1, start_reg - reg,
9209 FP_REGNUM, floats_offset - vfp_offset);
9210 start_reg = reg - 1;
9214 /* Just in case the last register checked also needs unstacking. */
9215 if (reg != start_reg)
9216 asm_fprintf (f, "\tlfm\t%r, %d, [%r, #-%d]\n",
9217 reg + 1, start_reg - reg,
9218 FP_REGNUM, floats_offset - vfp_offset);
9221 if (TARGET_HARD_FLOAT && TARGET_VFP)
9223 int saved_size;
9225 /* The fldmx insn does not have base+offset addressing modes,
9226 so we use IP to hold the address. */
9227 saved_size = arm_get_vfp_saved_size ();
9229 if (saved_size > 0)
9231 floats_offset += saved_size;
9232 asm_fprintf (f, "\tsub\t%r, %r, #%d\n", IP_REGNUM,
9233 FP_REGNUM, floats_offset - vfp_offset);
9235 start_reg = FIRST_VFP_REGNUM;
9236 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
9238 if ((!regs_ever_live[reg] || call_used_regs[reg])
9239 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
9241 if (start_reg != reg)
9242 arm_output_fldmx (f, IP_REGNUM,
9243 (start_reg - FIRST_VFP_REGNUM) / 2,
9244 (reg - start_reg) / 2);
9245 start_reg = reg + 2;
9248 if (start_reg != reg)
9249 arm_output_fldmx (f, IP_REGNUM,
9250 (start_reg - FIRST_VFP_REGNUM) / 2,
9251 (reg - start_reg) / 2);
9254 if (TARGET_IWMMXT)
9256 /* The frame pointer is guaranteed to be non-double-word aligned.
9257 This is because it is set to (old_stack_pointer - 4) and the
9258 old_stack_pointer was double word aligned. Thus the offset to
9259 the iWMMXt registers to be loaded must also be non-double-word
9260 sized, so that the resultant address *is* double-word aligned.
9261 We can ignore floats_offset since that was already included in
9262 the live_regs_mask. */
9263 lrm_count += (lrm_count % 2 ? 2 : 1);
9265 for (reg = LAST_IWMMXT_REGNUM; reg >= FIRST_IWMMXT_REGNUM; reg--)
9266 if (regs_ever_live[reg] && !call_used_regs[reg])
9268 asm_fprintf (f, "\twldrd\t%r, [%r, #-%d]\n",
9269 reg, FP_REGNUM, lrm_count * 4);
9270 lrm_count += 2;
9274 /* saved_regs_mask should contain the IP, which at the time of stack
9275 frame generation actually contains the old stack pointer. So a
9276 quick way to unwind the stack is just pop the IP register directly
9277 into the stack pointer. */
9278 if ((saved_regs_mask & (1 << IP_REGNUM)) == 0)
9279 abort ();
9280 saved_regs_mask &= ~ (1 << IP_REGNUM);
9281 saved_regs_mask |= (1 << SP_REGNUM);
9283 /* There are two registers left in saved_regs_mask - LR and PC. We
9284 only need to restore the LR register (the return address), but to
9285 save time we can load it directly into the PC, unless we need a
9286 special function exit sequence, or we are not really returning. */
9287 if (really_return
9288 && ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
9289 && !current_function_calls_eh_return)
9290 /* Delete the LR from the register mask, so that the LR on
9291 the stack is loaded into the PC in the register mask. */
9292 saved_regs_mask &= ~ (1 << LR_REGNUM);
9293 else
9294 saved_regs_mask &= ~ (1 << PC_REGNUM);
9296 /* We must use SP as the base register, because SP is one of the
9297 registers being restored. If an interrupt or page fault
9298 happens in the ldm instruction, the SP might or might not
9299 have been restored. That would be bad, as then SP will no
9300 longer indicate the safe area of stack, and we can get stack
9301 corruption. Using SP as the base register means that it will
9302 be reset correctly to the original value, should an interrupt
9303 occur. If the stack pointer already points at the right
9304 place, then omit the subtraction. */
9305 if (offsets->outgoing_args != (1 + (int) bit_count (saved_regs_mask))
9306 || current_function_calls_alloca)
9307 asm_fprintf (f, "\tsub\t%r, %r, #%d\n", SP_REGNUM, FP_REGNUM,
9308 4 * bit_count (saved_regs_mask));
9309 print_multi_reg (f, "ldmfd\t%r", SP_REGNUM, saved_regs_mask);
9311 if (IS_INTERRUPT (func_type))
9312 /* Interrupt handlers will have pushed the
9313 IP onto the stack, so restore it now. */
9314 print_multi_reg (f, "ldmfd\t%r!", SP_REGNUM, 1 << IP_REGNUM);
9316 else
9318 /* Restore stack pointer if necessary. */
9319 if (offsets->outgoing_args != offsets->saved_regs)
9321 operands[0] = operands[1] = stack_pointer_rtx;
9322 operands[2] = GEN_INT (offsets->outgoing_args - offsets->saved_regs);
9323 output_add_immediate (operands);
9326 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
9328 for (reg = FIRST_FPA_REGNUM; reg <= LAST_FPA_REGNUM; reg++)
9329 if (regs_ever_live[reg] && !call_used_regs[reg])
9330 asm_fprintf (f, "\tldfe\t%r, [%r], #12\n",
9331 reg, SP_REGNUM);
9333 else
9335 start_reg = FIRST_FPA_REGNUM;
9337 for (reg = FIRST_FPA_REGNUM; reg <= LAST_FPA_REGNUM; reg++)
9339 if (regs_ever_live[reg] && !call_used_regs[reg])
9341 if (reg - start_reg == 3)
9343 asm_fprintf (f, "\tlfmfd\t%r, 4, [%r]!\n",
9344 start_reg, SP_REGNUM);
9345 start_reg = reg + 1;
9348 else
9350 if (reg != start_reg)
9351 asm_fprintf (f, "\tlfmfd\t%r, %d, [%r]!\n",
9352 start_reg, reg - start_reg,
9353 SP_REGNUM);
9355 start_reg = reg + 1;
9359 /* Just in case the last register checked also needs unstacking. */
9360 if (reg != start_reg)
9361 asm_fprintf (f, "\tlfmfd\t%r, %d, [%r]!\n",
9362 start_reg, reg - start_reg, SP_REGNUM);
9365 if (TARGET_HARD_FLOAT && TARGET_VFP)
9367 start_reg = FIRST_VFP_REGNUM;
9368 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
9370 if ((!regs_ever_live[reg] || call_used_regs[reg])
9371 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
9373 if (start_reg != reg)
9374 arm_output_fldmx (f, SP_REGNUM,
9375 (start_reg - FIRST_VFP_REGNUM) / 2,
9376 (reg - start_reg) / 2);
9377 start_reg = reg + 2;
9380 if (start_reg != reg)
9381 arm_output_fldmx (f, SP_REGNUM,
9382 (start_reg - FIRST_VFP_REGNUM) / 2,
9383 (reg - start_reg) / 2);
9385 if (TARGET_IWMMXT)
9386 for (reg = FIRST_IWMMXT_REGNUM; reg <= LAST_IWMMXT_REGNUM; reg++)
9387 if (regs_ever_live[reg] && !call_used_regs[reg])
9388 asm_fprintf (f, "\twldrd\t%r, [%r], #8\n", reg, SP_REGNUM);
9390 /* If we can, restore the LR into the PC. */
9391 if (ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
9392 && really_return
9393 && current_function_pretend_args_size == 0
9394 && saved_regs_mask & (1 << LR_REGNUM)
9395 && !current_function_calls_eh_return)
9397 saved_regs_mask &= ~ (1 << LR_REGNUM);
9398 saved_regs_mask |= (1 << PC_REGNUM);
9401 /* Load the registers off the stack. If we only have one register
9402 to load use the LDR instruction - it is faster. */
9403 if (saved_regs_mask == (1 << LR_REGNUM))
9405 asm_fprintf (f, "\tldr\t%r, [%r], #4\n", LR_REGNUM, SP_REGNUM);
9407 else if (saved_regs_mask)
9409 if (saved_regs_mask & (1 << SP_REGNUM))
9410 /* Note - write back to the stack register is not enabled
9411 (i.e. "ldmfd sp!..."). We know that the stack pointer is
9412 in the list of registers and if we add writeback the
9413 instruction becomes UNPREDICTABLE. */
9414 print_multi_reg (f, "ldmfd\t%r", SP_REGNUM, saved_regs_mask);
9415 else
9416 print_multi_reg (f, "ldmfd\t%r!", SP_REGNUM, saved_regs_mask);
9419 if (current_function_pretend_args_size)
9421 /* Unwind the pre-pushed regs. */
9422 operands[0] = operands[1] = stack_pointer_rtx;
9423 operands[2] = GEN_INT (current_function_pretend_args_size);
9424 output_add_immediate (operands);
9428 /* We may have already restored PC directly from the stack. */
9429 if (!really_return || saved_regs_mask & (1 << PC_REGNUM))
9430 return "";
9432 /* Stack adjustment for exception handler. */
9433 if (current_function_calls_eh_return)
9434 asm_fprintf (f, "\tadd\t%r, %r, %r\n", SP_REGNUM, SP_REGNUM,
9435 ARM_EH_STACKADJ_REGNUM);
9437 /* Generate the return instruction. */
9438 switch ((int) ARM_FUNC_TYPE (func_type))
9440 case ARM_FT_ISR:
9441 case ARM_FT_FIQ:
9442 asm_fprintf (f, "\tsubs\t%r, %r, #4\n", PC_REGNUM, LR_REGNUM);
9443 break;
9445 case ARM_FT_EXCEPTION:
9446 asm_fprintf (f, "\tmovs\t%r, %r\n", PC_REGNUM, LR_REGNUM);
9447 break;
9449 case ARM_FT_INTERWORKED:
9450 asm_fprintf (f, "\tbx\t%r\n", LR_REGNUM);
9451 break;
9453 default:
9454 if (arm_arch5 || arm_arch4t)
9455 asm_fprintf (f, "\tbx\t%r\n", LR_REGNUM);
9456 else
9457 asm_fprintf (f, "\tmov\t%r, %r\n", PC_REGNUM, LR_REGNUM);
9458 break;
9461 return "";
9464 static void
9465 arm_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
9466 HOST_WIDE_INT frame_size ATTRIBUTE_UNUSED)
9468 arm_stack_offsets *offsets;
9470 if (TARGET_THUMB)
9472 /* ??? Probably not safe to set this here, since it assumes that a
9473 function will be emitted as assembly immediately after we generate
9474 RTL for it. This does not happen for inline functions. */
9475 return_used_this_function = 0;
9477 else
9479 /* We need to take into account any stack-frame rounding. */
9480 offsets = arm_get_frame_offsets ();
9482 if (use_return_insn (FALSE, NULL)
9483 && return_used_this_function
9484 && offsets->saved_regs != offsets->outgoing_args
9485 && !frame_pointer_needed)
9486 abort ();
9488 /* Reset the ARM-specific per-function variables. */
9489 after_arm_reorg = 0;
9493 /* Generate and emit an insn that we will recognize as a push_multi.
9494 Unfortunately, since this insn does not reflect very well the actual
9495 semantics of the operation, we need to annotate the insn for the benefit
9496 of DWARF2 frame unwind information. */
9497 static rtx
9498 emit_multi_reg_push (int mask)
9500 int num_regs = 0;
9501 int num_dwarf_regs;
9502 int i, j;
9503 rtx par;
9504 rtx dwarf;
9505 int dwarf_par_index;
9506 rtx tmp, reg;
9508 for (i = 0; i <= LAST_ARM_REGNUM; i++)
9509 if (mask & (1 << i))
9510 num_regs++;
9512 if (num_regs == 0 || num_regs > 16)
9513 abort ();
9515 /* We don't record the PC in the dwarf frame information. */
9516 num_dwarf_regs = num_regs;
9517 if (mask & (1 << PC_REGNUM))
9518 num_dwarf_regs--;
9520 /* For the body of the insn we are going to generate an UNSPEC in
9521 parallel with several USEs. This allows the insn to be recognized
9522 by the push_multi pattern in the arm.md file. The insn looks
9523 something like this:
9525 (parallel [
9526 (set (mem:BLK (pre_dec:BLK (reg:SI sp)))
9527 (unspec:BLK [(reg:SI r4)] UNSPEC_PUSH_MULT))
9528 (use (reg:SI 11 fp))
9529 (use (reg:SI 12 ip))
9530 (use (reg:SI 14 lr))
9531 (use (reg:SI 15 pc))
9534 For the frame note however, we try to be more explicit and actually
9535 show each register being stored into the stack frame, plus a (single)
9536 decrement of the stack pointer. We do it this way in order to be
9537 friendly to the stack unwinding code, which only wants to see a single
9538 stack decrement per instruction. The RTL we generate for the note looks
9539 something like this:
9541 (sequence [
9542 (set (reg:SI sp) (plus:SI (reg:SI sp) (const_int -20)))
9543 (set (mem:SI (reg:SI sp)) (reg:SI r4))
9544 (set (mem:SI (plus:SI (reg:SI sp) (const_int 4))) (reg:SI fp))
9545 (set (mem:SI (plus:SI (reg:SI sp) (const_int 8))) (reg:SI ip))
9546 (set (mem:SI (plus:SI (reg:SI sp) (const_int 12))) (reg:SI lr))
9549 This sequence is used both by the code to support stack unwinding for
9550 exceptions handlers and the code to generate dwarf2 frame debugging. */
9552 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (num_regs));
9553 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (num_dwarf_regs + 1));
9554 dwarf_par_index = 1;
9556 for (i = 0; i <= LAST_ARM_REGNUM; i++)
9558 if (mask & (1 << i))
9560 reg = gen_rtx_REG (SImode, i);
9562 XVECEXP (par, 0, 0)
9563 = gen_rtx_SET (VOIDmode,
9564 gen_rtx_MEM (BLKmode,
9565 gen_rtx_PRE_DEC (BLKmode,
9566 stack_pointer_rtx)),
9567 gen_rtx_UNSPEC (BLKmode,
9568 gen_rtvec (1, reg),
9569 UNSPEC_PUSH_MULT));
9571 if (i != PC_REGNUM)
9573 tmp = gen_rtx_SET (VOIDmode,
9574 gen_rtx_MEM (SImode, stack_pointer_rtx),
9575 reg);
9576 RTX_FRAME_RELATED_P (tmp) = 1;
9577 XVECEXP (dwarf, 0, dwarf_par_index) = tmp;
9578 dwarf_par_index++;
9581 break;
9585 for (j = 1, i++; j < num_regs; i++)
9587 if (mask & (1 << i))
9589 reg = gen_rtx_REG (SImode, i);
9591 XVECEXP (par, 0, j) = gen_rtx_USE (VOIDmode, reg);
9593 if (i != PC_REGNUM)
9595 tmp = gen_rtx_SET (VOIDmode,
9596 gen_rtx_MEM (SImode,
9597 plus_constant (stack_pointer_rtx,
9598 4 * j)),
9599 reg);
9600 RTX_FRAME_RELATED_P (tmp) = 1;
9601 XVECEXP (dwarf, 0, dwarf_par_index++) = tmp;
9604 j++;
9608 par = emit_insn (par);
9610 tmp = gen_rtx_SET (SImode,
9611 stack_pointer_rtx,
9612 gen_rtx_PLUS (SImode,
9613 stack_pointer_rtx,
9614 GEN_INT (-4 * num_regs)));
9615 RTX_FRAME_RELATED_P (tmp) = 1;
9616 XVECEXP (dwarf, 0, 0) = tmp;
9618 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
9619 REG_NOTES (par));
9620 return par;
9623 static rtx
9624 emit_sfm (int base_reg, int count)
9626 rtx par;
9627 rtx dwarf;
9628 rtx tmp, reg;
9629 int i;
9631 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
9632 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (count + 1));
9634 reg = gen_rtx_REG (XFmode, base_reg++);
9636 XVECEXP (par, 0, 0)
9637 = gen_rtx_SET (VOIDmode,
9638 gen_rtx_MEM (BLKmode,
9639 gen_rtx_PRE_DEC (BLKmode, stack_pointer_rtx)),
9640 gen_rtx_UNSPEC (BLKmode,
9641 gen_rtvec (1, reg),
9642 UNSPEC_PUSH_MULT));
9643 tmp = gen_rtx_SET (VOIDmode,
9644 gen_rtx_MEM (XFmode, stack_pointer_rtx), reg);
9645 RTX_FRAME_RELATED_P (tmp) = 1;
9646 XVECEXP (dwarf, 0, 1) = tmp;
9648 for (i = 1; i < count; i++)
9650 reg = gen_rtx_REG (XFmode, base_reg++);
9651 XVECEXP (par, 0, i) = gen_rtx_USE (VOIDmode, reg);
9653 tmp = gen_rtx_SET (VOIDmode,
9654 gen_rtx_MEM (XFmode,
9655 plus_constant (stack_pointer_rtx,
9656 i * 12)),
9657 reg);
9658 RTX_FRAME_RELATED_P (tmp) = 1;
9659 XVECEXP (dwarf, 0, i + 1) = tmp;
9662 tmp = gen_rtx_SET (VOIDmode,
9663 stack_pointer_rtx,
9664 gen_rtx_PLUS (SImode,
9665 stack_pointer_rtx,
9666 GEN_INT (-12 * count)));
9667 RTX_FRAME_RELATED_P (tmp) = 1;
9668 XVECEXP (dwarf, 0, 0) = tmp;
9670 par = emit_insn (par);
9671 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
9672 REG_NOTES (par));
9673 return par;
9677 /* Return true if the current function needs to save/restore LR. */
9679 static bool
9680 thumb_force_lr_save (void)
9682 return !cfun->machine->lr_save_eliminated
9683 && (!leaf_function_p ()
9684 || thumb_far_jump_used_p ()
9685 || regs_ever_live [LR_REGNUM]);
9689 /* Compute the distance from register FROM to register TO.
9690 These can be the arg pointer (26), the soft frame pointer (25),
9691 the stack pointer (13) or the hard frame pointer (11).
9692 In thumb mode r7 is used as the soft frame pointer, if needed.
9693 Typical stack layout looks like this:
9695 old stack pointer -> | |
9696 ----
9697 | | \
9698 | | saved arguments for
9699 | | vararg functions
9700 | | /
9702 hard FP & arg pointer -> | | \
9703 | | stack
9704 | | frame
9705 | | /
9707 | | \
9708 | | call saved
9709 | | registers
9710 soft frame pointer -> | | /
9712 | | \
9713 | | local
9714 | | variables
9715 | | /
9717 | | \
9718 | | outgoing
9719 | | arguments
9720 current stack pointer -> | | /
9723 For a given function some or all of these stack components
9724 may not be needed, giving rise to the possibility of
9725 eliminating some of the registers.
9727 The values returned by this function must reflect the behavior
9728 of arm_expand_prologue() and arm_compute_save_reg_mask().
9730 The sign of the number returned reflects the direction of stack
9731 growth, so the values are positive for all eliminations except
9732 from the soft frame pointer to the hard frame pointer.
9734 SFP may point just inside the local variables block to ensure correct
9735 alignment. */
9738 /* Calculate stack offsets. These are used to calculate register elimination
9739 offsets and in prologue/epilogue code. */
9741 static arm_stack_offsets *
9742 arm_get_frame_offsets (void)
9744 struct arm_stack_offsets *offsets;
9745 unsigned long func_type;
9746 int leaf;
9747 int saved;
9748 HOST_WIDE_INT frame_size;
9750 offsets = &cfun->machine->stack_offsets;
9752 /* We need to know if we are a leaf function. Unfortunately, it
9753 is possible to be called after start_sequence has been called,
9754 which causes get_insns to return the insns for the sequence,
9755 not the function, which will cause leaf_function_p to return
9756 the incorrect result.
9758 to know about leaf functions once reload has completed, and the
9759 frame size cannot be changed after that time, so we can safely
9760 use the cached value. */
9762 if (reload_completed)
9763 return offsets;
9765 /* Initially this is the size of the local variables. It will translated
9766 into an offset once we have determined the size of preceding data. */
9767 frame_size = ROUND_UP_WORD (get_frame_size ());
9769 leaf = leaf_function_p ();
9771 /* Space for variadic functions. */
9772 offsets->saved_args = current_function_pretend_args_size;
9774 offsets->frame = offsets->saved_args + (frame_pointer_needed ? 4 : 0);
9776 if (TARGET_ARM)
9778 unsigned int regno;
9780 saved = bit_count (arm_compute_save_reg_mask ()) * 4;
9782 /* We know that SP will be doubleword aligned on entry, and we must
9783 preserve that condition at any subroutine call. We also require the
9784 soft frame pointer to be doubleword aligned. */
9786 if (TARGET_REALLY_IWMMXT)
9788 /* Check for the call-saved iWMMXt registers. */
9789 for (regno = FIRST_IWMMXT_REGNUM;
9790 regno <= LAST_IWMMXT_REGNUM;
9791 regno++)
9792 if (regs_ever_live [regno] && ! call_used_regs [regno])
9793 saved += 8;
9796 func_type = arm_current_func_type ();
9797 if (! IS_VOLATILE (func_type))
9799 /* Space for saved FPA registers. */
9800 for (regno = FIRST_FPA_REGNUM; regno <= LAST_FPA_REGNUM; regno++)
9801 if (regs_ever_live[regno] && ! call_used_regs[regno])
9802 saved += 12;
9804 /* Space for saved VFP registers. */
9805 if (TARGET_HARD_FLOAT && TARGET_VFP)
9806 saved += arm_get_vfp_saved_size ();
9809 else /* TARGET_THUMB */
9811 saved = bit_count (thumb_compute_save_reg_mask ()) * 4;
9812 if (TARGET_BACKTRACE)
9813 saved += 16;
9816 /* Saved registers include the stack frame. */
9817 offsets->saved_regs = offsets->saved_args + saved;
9818 offsets->soft_frame = offsets->saved_regs;
9819 /* A leaf function does not need any stack alignment if it has nothing
9820 on the stack. */
9821 if (leaf && frame_size == 0)
9823 offsets->outgoing_args = offsets->soft_frame;
9824 return offsets;
9827 /* Ensure SFP has the correct alignment. */
9828 if (ARM_DOUBLEWORD_ALIGN
9829 && (offsets->soft_frame & 7))
9830 offsets->soft_frame += 4;
9832 offsets->outgoing_args = offsets->soft_frame + frame_size
9833 + current_function_outgoing_args_size;
9835 if (ARM_DOUBLEWORD_ALIGN)
9837 /* Ensure SP remains doubleword aligned. */
9838 if (offsets->outgoing_args & 7)
9839 offsets->outgoing_args += 4;
9840 if (offsets->outgoing_args & 7)
9841 abort ();
9844 return offsets;
9848 /* Calculate the relative offsets for the different stack pointers. Positive
9849 offsets are in the direction of stack growth. */
9851 HOST_WIDE_INT
9852 arm_compute_initial_elimination_offset (unsigned int from, unsigned int to)
9854 arm_stack_offsets *offsets;
9856 offsets = arm_get_frame_offsets ();
9858 /* OK, now we have enough information to compute the distances.
9859 There must be an entry in these switch tables for each pair
9860 of registers in ELIMINABLE_REGS, even if some of the entries
9861 seem to be redundant or useless. */
9862 switch (from)
9864 case ARG_POINTER_REGNUM:
9865 switch (to)
9867 case THUMB_HARD_FRAME_POINTER_REGNUM:
9868 return 0;
9870 case FRAME_POINTER_REGNUM:
9871 /* This is the reverse of the soft frame pointer
9872 to hard frame pointer elimination below. */
9873 return offsets->soft_frame - offsets->saved_args;
9875 case ARM_HARD_FRAME_POINTER_REGNUM:
9876 /* If there is no stack frame then the hard
9877 frame pointer and the arg pointer coincide. */
9878 if (offsets->frame == offsets->saved_regs)
9879 return 0;
9880 /* FIXME: Not sure about this. Maybe we should always return 0 ? */
9881 return (frame_pointer_needed
9882 && cfun->static_chain_decl != NULL
9883 && ! cfun->machine->uses_anonymous_args) ? 4 : 0;
9885 case STACK_POINTER_REGNUM:
9886 /* If nothing has been pushed on the stack at all
9887 then this will return -4. This *is* correct! */
9888 return offsets->outgoing_args - (offsets->saved_args + 4);
9890 default:
9891 abort ();
9893 break;
9895 case FRAME_POINTER_REGNUM:
9896 switch (to)
9898 case THUMB_HARD_FRAME_POINTER_REGNUM:
9899 return 0;
9901 case ARM_HARD_FRAME_POINTER_REGNUM:
9902 /* The hard frame pointer points to the top entry in the
9903 stack frame. The soft frame pointer to the bottom entry
9904 in the stack frame. If there is no stack frame at all,
9905 then they are identical. */
9907 return offsets->frame - offsets->soft_frame;
9909 case STACK_POINTER_REGNUM:
9910 return offsets->outgoing_args - offsets->soft_frame;
9912 default:
9913 abort ();
9915 break;
9917 default:
9918 /* You cannot eliminate from the stack pointer.
9919 In theory you could eliminate from the hard frame
9920 pointer to the stack pointer, but this will never
9921 happen, since if a stack frame is not needed the
9922 hard frame pointer will never be used. */
9923 abort ();
9928 /* Generate the prologue instructions for entry into an ARM function. */
9929 void
9930 arm_expand_prologue (void)
9932 int reg;
9933 rtx amount;
9934 rtx insn;
9935 rtx ip_rtx;
9936 unsigned long live_regs_mask;
9937 unsigned long func_type;
9938 int fp_offset = 0;
9939 int saved_pretend_args = 0;
9940 int saved_regs = 0;
9941 unsigned HOST_WIDE_INT args_to_push;
9942 arm_stack_offsets *offsets;
9944 func_type = arm_current_func_type ();
9946 /* Naked functions don't have prologues. */
9947 if (IS_NAKED (func_type))
9948 return;
9950 /* Make a copy of c_f_p_a_s as we may need to modify it locally. */
9951 args_to_push = current_function_pretend_args_size;
9953 /* Compute which register we will have to save onto the stack. */
9954 live_regs_mask = arm_compute_save_reg_mask ();
9956 ip_rtx = gen_rtx_REG (SImode, IP_REGNUM);
9958 if (frame_pointer_needed)
9960 if (IS_INTERRUPT (func_type))
9962 /* Interrupt functions must not corrupt any registers.
9963 Creating a frame pointer however, corrupts the IP
9964 register, so we must push it first. */
9965 insn = emit_multi_reg_push (1 << IP_REGNUM);
9967 /* Do not set RTX_FRAME_RELATED_P on this insn.
9968 The dwarf stack unwinding code only wants to see one
9969 stack decrement per function, and this is not it. If
9970 this instruction is labeled as being part of the frame
9971 creation sequence then dwarf2out_frame_debug_expr will
9972 abort when it encounters the assignment of IP to FP
9973 later on, since the use of SP here establishes SP as
9974 the CFA register and not IP.
9976 Anyway this instruction is not really part of the stack
9977 frame creation although it is part of the prologue. */
9979 else if (IS_NESTED (func_type))
9981 /* The Static chain register is the same as the IP register
9982 used as a scratch register during stack frame creation.
9983 To get around this need to find somewhere to store IP
9984 whilst the frame is being created. We try the following
9985 places in order:
9987 1. The last argument register.
9988 2. A slot on the stack above the frame. (This only
9989 works if the function is not a varargs function).
9990 3. Register r3, after pushing the argument registers
9991 onto the stack.
9993 Note - we only need to tell the dwarf2 backend about the SP
9994 adjustment in the second variant; the static chain register
9995 doesn't need to be unwound, as it doesn't contain a value
9996 inherited from the caller. */
9998 if (regs_ever_live[3] == 0)
10000 insn = gen_rtx_REG (SImode, 3);
10001 insn = gen_rtx_SET (SImode, insn, ip_rtx);
10002 insn = emit_insn (insn);
10004 else if (args_to_push == 0)
10006 rtx dwarf;
10007 insn = gen_rtx_PRE_DEC (SImode, stack_pointer_rtx);
10008 insn = gen_rtx_MEM (SImode, insn);
10009 insn = gen_rtx_SET (VOIDmode, insn, ip_rtx);
10010 insn = emit_insn (insn);
10012 fp_offset = 4;
10014 /* Just tell the dwarf backend that we adjusted SP. */
10015 dwarf = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
10016 gen_rtx_PLUS (SImode, stack_pointer_rtx,
10017 GEN_INT (-fp_offset)));
10018 RTX_FRAME_RELATED_P (insn) = 1;
10019 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
10020 dwarf, REG_NOTES (insn));
10022 else
10024 /* Store the args on the stack. */
10025 if (cfun->machine->uses_anonymous_args)
10026 insn = emit_multi_reg_push
10027 ((0xf0 >> (args_to_push / 4)) & 0xf);
10028 else
10029 insn = emit_insn
10030 (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10031 GEN_INT (- args_to_push)));
10033 RTX_FRAME_RELATED_P (insn) = 1;
10035 saved_pretend_args = 1;
10036 fp_offset = args_to_push;
10037 args_to_push = 0;
10039 /* Now reuse r3 to preserve IP. */
10040 insn = gen_rtx_REG (SImode, 3);
10041 insn = gen_rtx_SET (SImode, insn, ip_rtx);
10042 (void) emit_insn (insn);
10046 if (fp_offset)
10048 insn = gen_rtx_PLUS (SImode, stack_pointer_rtx, GEN_INT (fp_offset));
10049 insn = gen_rtx_SET (SImode, ip_rtx, insn);
10051 else
10052 insn = gen_movsi (ip_rtx, stack_pointer_rtx);
10054 insn = emit_insn (insn);
10055 RTX_FRAME_RELATED_P (insn) = 1;
10058 if (args_to_push)
10060 /* Push the argument registers, or reserve space for them. */
10061 if (cfun->machine->uses_anonymous_args)
10062 insn = emit_multi_reg_push
10063 ((0xf0 >> (args_to_push / 4)) & 0xf);
10064 else
10065 insn = emit_insn
10066 (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10067 GEN_INT (- args_to_push)));
10068 RTX_FRAME_RELATED_P (insn) = 1;
10071 /* If this is an interrupt service routine, and the link register
10072 is going to be pushed, and we are not creating a stack frame,
10073 (which would involve an extra push of IP and a pop in the epilogue)
10074 subtracting four from LR now will mean that the function return
10075 can be done with a single instruction. */
10076 if ((func_type == ARM_FT_ISR || func_type == ARM_FT_FIQ)
10077 && (live_regs_mask & (1 << LR_REGNUM)) != 0
10078 && ! frame_pointer_needed)
10079 emit_insn (gen_rtx_SET (SImode,
10080 gen_rtx_REG (SImode, LR_REGNUM),
10081 gen_rtx_PLUS (SImode,
10082 gen_rtx_REG (SImode, LR_REGNUM),
10083 GEN_INT (-4))));
10085 if (live_regs_mask)
10087 insn = emit_multi_reg_push (live_regs_mask);
10088 saved_regs += bit_count (live_regs_mask) * 4;
10089 RTX_FRAME_RELATED_P (insn) = 1;
10092 if (TARGET_IWMMXT)
10093 for (reg = LAST_IWMMXT_REGNUM; reg >= FIRST_IWMMXT_REGNUM; reg--)
10094 if (regs_ever_live[reg] && ! call_used_regs [reg])
10096 insn = gen_rtx_PRE_DEC (V2SImode, stack_pointer_rtx);
10097 insn = gen_rtx_MEM (V2SImode, insn);
10098 insn = emit_insn (gen_rtx_SET (VOIDmode, insn,
10099 gen_rtx_REG (V2SImode, reg)));
10100 RTX_FRAME_RELATED_P (insn) = 1;
10101 saved_regs += 8;
10104 if (! IS_VOLATILE (func_type))
10106 int start_reg;
10108 /* Save any floating point call-saved registers used by this
10109 function. */
10110 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
10112 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
10113 if (regs_ever_live[reg] && !call_used_regs[reg])
10115 insn = gen_rtx_PRE_DEC (XFmode, stack_pointer_rtx);
10116 insn = gen_rtx_MEM (XFmode, insn);
10117 insn = emit_insn (gen_rtx_SET (VOIDmode, insn,
10118 gen_rtx_REG (XFmode, reg)));
10119 RTX_FRAME_RELATED_P (insn) = 1;
10120 saved_regs += 12;
10123 else
10125 start_reg = LAST_FPA_REGNUM;
10127 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
10129 if (regs_ever_live[reg] && !call_used_regs[reg])
10131 if (start_reg - reg == 3)
10133 insn = emit_sfm (reg, 4);
10134 RTX_FRAME_RELATED_P (insn) = 1;
10135 saved_regs += 48;
10136 start_reg = reg - 1;
10139 else
10141 if (start_reg != reg)
10143 insn = emit_sfm (reg + 1, start_reg - reg);
10144 RTX_FRAME_RELATED_P (insn) = 1;
10145 saved_regs += (start_reg - reg) * 12;
10147 start_reg = reg - 1;
10151 if (start_reg != reg)
10153 insn = emit_sfm (reg + 1, start_reg - reg);
10154 saved_regs += (start_reg - reg) * 12;
10155 RTX_FRAME_RELATED_P (insn) = 1;
10158 if (TARGET_HARD_FLOAT && TARGET_VFP)
10160 start_reg = FIRST_VFP_REGNUM;
10162 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
10164 if ((!regs_ever_live[reg] || call_used_regs[reg])
10165 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
10167 if (start_reg != reg)
10168 saved_regs += vfp_emit_fstmx (start_reg,
10169 (reg - start_reg) / 2);
10170 start_reg = reg + 2;
10173 if (start_reg != reg)
10174 saved_regs += vfp_emit_fstmx (start_reg,
10175 (reg - start_reg) / 2);
10179 if (frame_pointer_needed)
10181 /* Create the new frame pointer. */
10182 insn = GEN_INT (-(4 + args_to_push + fp_offset));
10183 insn = emit_insn (gen_addsi3 (hard_frame_pointer_rtx, ip_rtx, insn));
10184 RTX_FRAME_RELATED_P (insn) = 1;
10186 if (IS_NESTED (func_type))
10188 /* Recover the static chain register. */
10189 if (regs_ever_live [3] == 0
10190 || saved_pretend_args)
10191 insn = gen_rtx_REG (SImode, 3);
10192 else /* if (current_function_pretend_args_size == 0) */
10194 insn = gen_rtx_PLUS (SImode, hard_frame_pointer_rtx,
10195 GEN_INT (4));
10196 insn = gen_rtx_MEM (SImode, insn);
10199 emit_insn (gen_rtx_SET (SImode, ip_rtx, insn));
10200 /* Add a USE to stop propagate_one_insn() from barfing. */
10201 emit_insn (gen_prologue_use (ip_rtx));
10205 offsets = arm_get_frame_offsets ();
10206 if (offsets->outgoing_args != offsets->saved_args + saved_regs)
10208 /* This add can produce multiple insns for a large constant, so we
10209 need to get tricky. */
10210 rtx last = get_last_insn ();
10212 amount = GEN_INT (offsets->saved_args + saved_regs
10213 - offsets->outgoing_args);
10215 insn = emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10216 amount));
10219 last = last ? NEXT_INSN (last) : get_insns ();
10220 RTX_FRAME_RELATED_P (last) = 1;
10222 while (last != insn);
10224 /* If the frame pointer is needed, emit a special barrier that
10225 will prevent the scheduler from moving stores to the frame
10226 before the stack adjustment. */
10227 if (frame_pointer_needed)
10228 insn = emit_insn (gen_stack_tie (stack_pointer_rtx,
10229 hard_frame_pointer_rtx));
10233 if (flag_pic)
10234 arm_load_pic_register ();
10236 /* If we are profiling, make sure no instructions are scheduled before
10237 the call to mcount. Similarly if the user has requested no
10238 scheduling in the prolog. */
10239 if (current_function_profile || TARGET_NO_SCHED_PRO)
10240 emit_insn (gen_blockage ());
10242 /* If the link register is being kept alive, with the return address in it,
10243 then make sure that it does not get reused by the ce2 pass. */
10244 if ((live_regs_mask & (1 << LR_REGNUM)) == 0)
10246 emit_insn (gen_prologue_use (gen_rtx_REG (SImode, LR_REGNUM)));
10247 cfun->machine->lr_save_eliminated = 1;
10251 /* If CODE is 'd', then the X is a condition operand and the instruction
10252 should only be executed if the condition is true.
10253 if CODE is 'D', then the X is a condition operand and the instruction
10254 should only be executed if the condition is false: however, if the mode
10255 of the comparison is CCFPEmode, then always execute the instruction -- we
10256 do this because in these circumstances !GE does not necessarily imply LT;
10257 in these cases the instruction pattern will take care to make sure that
10258 an instruction containing %d will follow, thereby undoing the effects of
10259 doing this instruction unconditionally.
10260 If CODE is 'N' then X is a floating point operand that must be negated
10261 before output.
10262 If CODE is 'B' then output a bitwise inverted value of X (a const int).
10263 If X is a REG and CODE is `M', output a ldm/stm style multi-reg. */
10264 void
10265 arm_print_operand (FILE *stream, rtx x, int code)
10267 switch (code)
10269 case '@':
10270 fputs (ASM_COMMENT_START, stream);
10271 return;
10273 case '_':
10274 fputs (user_label_prefix, stream);
10275 return;
10277 case '|':
10278 fputs (REGISTER_PREFIX, stream);
10279 return;
10281 case '?':
10282 if (arm_ccfsm_state == 3 || arm_ccfsm_state == 4)
10284 if (TARGET_THUMB || current_insn_predicate != NULL)
10285 abort ();
10287 fputs (arm_condition_codes[arm_current_cc], stream);
10289 else if (current_insn_predicate)
10291 enum arm_cond_code code;
10293 if (TARGET_THUMB)
10294 abort ();
10296 code = get_arm_condition_code (current_insn_predicate);
10297 fputs (arm_condition_codes[code], stream);
10299 return;
10301 case 'N':
10303 REAL_VALUE_TYPE r;
10304 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
10305 r = REAL_VALUE_NEGATE (r);
10306 fprintf (stream, "%s", fp_const_from_val (&r));
10308 return;
10310 case 'B':
10311 if (GET_CODE (x) == CONST_INT)
10313 HOST_WIDE_INT val;
10314 val = ARM_SIGN_EXTEND (~INTVAL (x));
10315 fprintf (stream, HOST_WIDE_INT_PRINT_DEC, val);
10317 else
10319 putc ('~', stream);
10320 output_addr_const (stream, x);
10322 return;
10324 case 'i':
10325 fprintf (stream, "%s", arithmetic_instr (x, 1));
10326 return;
10328 /* Truncate Cirrus shift counts. */
10329 case 's':
10330 if (GET_CODE (x) == CONST_INT)
10332 fprintf (stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0x3f);
10333 return;
10335 arm_print_operand (stream, x, 0);
10336 return;
10338 case 'I':
10339 fprintf (stream, "%s", arithmetic_instr (x, 0));
10340 return;
10342 case 'S':
10344 HOST_WIDE_INT val;
10345 const char * shift = shift_op (x, &val);
10347 if (shift)
10349 fprintf (stream, ", %s ", shift_op (x, &val));
10350 if (val == -1)
10351 arm_print_operand (stream, XEXP (x, 1), 0);
10352 else
10353 fprintf (stream, "#" HOST_WIDE_INT_PRINT_DEC, val);
10356 return;
10358 /* An explanation of the 'Q', 'R' and 'H' register operands:
10360 In a pair of registers containing a DI or DF value the 'Q'
10361 operand returns the register number of the register containing
10362 the least significant part of the value. The 'R' operand returns
10363 the register number of the register containing the most
10364 significant part of the value.
10366 The 'H' operand returns the higher of the two register numbers.
10367 On a run where WORDS_BIG_ENDIAN is true the 'H' operand is the
10368 same as the 'Q' operand, since the most significant part of the
10369 value is held in the lower number register. The reverse is true
10370 on systems where WORDS_BIG_ENDIAN is false.
10372 The purpose of these operands is to distinguish between cases
10373 where the endian-ness of the values is important (for example
10374 when they are added together), and cases where the endian-ness
10375 is irrelevant, but the order of register operations is important.
10376 For example when loading a value from memory into a register
10377 pair, the endian-ness does not matter. Provided that the value
10378 from the lower memory address is put into the lower numbered
10379 register, and the value from the higher address is put into the
10380 higher numbered register, the load will work regardless of whether
10381 the value being loaded is big-wordian or little-wordian. The
10382 order of the two register loads can matter however, if the address
10383 of the memory location is actually held in one of the registers
10384 being overwritten by the load. */
10385 case 'Q':
10386 if (REGNO (x) > LAST_ARM_REGNUM)
10387 abort ();
10388 asm_fprintf (stream, "%r", REGNO (x) + (WORDS_BIG_ENDIAN ? 1 : 0));
10389 return;
10391 case 'R':
10392 if (REGNO (x) > LAST_ARM_REGNUM)
10393 abort ();
10394 asm_fprintf (stream, "%r", REGNO (x) + (WORDS_BIG_ENDIAN ? 0 : 1));
10395 return;
10397 case 'H':
10398 if (REGNO (x) > LAST_ARM_REGNUM)
10399 abort ();
10400 asm_fprintf (stream, "%r", REGNO (x) + 1);
10401 return;
10403 case 'm':
10404 asm_fprintf (stream, "%r",
10405 GET_CODE (XEXP (x, 0)) == REG
10406 ? REGNO (XEXP (x, 0)) : REGNO (XEXP (XEXP (x, 0), 0)));
10407 return;
10409 case 'M':
10410 asm_fprintf (stream, "{%r-%r}",
10411 REGNO (x),
10412 REGNO (x) + ARM_NUM_REGS (GET_MODE (x)) - 1);
10413 return;
10415 case 'd':
10416 /* CONST_TRUE_RTX means always -- that's the default. */
10417 if (x == const_true_rtx)
10418 return;
10420 fputs (arm_condition_codes[get_arm_condition_code (x)],
10421 stream);
10422 return;
10424 case 'D':
10425 /* CONST_TRUE_RTX means not always -- i.e. never. We shouldn't ever
10426 want to do that. */
10427 if (x == const_true_rtx)
10428 abort ();
10430 fputs (arm_condition_codes[ARM_INVERSE_CONDITION_CODE
10431 (get_arm_condition_code (x))],
10432 stream);
10433 return;
10435 /* Cirrus registers can be accessed in a variety of ways:
10436 single floating point (f)
10437 double floating point (d)
10438 32bit integer (fx)
10439 64bit integer (dx). */
10440 case 'W': /* Cirrus register in F mode. */
10441 case 'X': /* Cirrus register in D mode. */
10442 case 'Y': /* Cirrus register in FX mode. */
10443 case 'Z': /* Cirrus register in DX mode. */
10444 if (GET_CODE (x) != REG || REGNO_REG_CLASS (REGNO (x)) != CIRRUS_REGS)
10445 abort ();
10447 fprintf (stream, "mv%s%s",
10448 code == 'W' ? "f"
10449 : code == 'X' ? "d"
10450 : code == 'Y' ? "fx" : "dx", reg_names[REGNO (x)] + 2);
10452 return;
10454 /* Print cirrus register in the mode specified by the register's mode. */
10455 case 'V':
10457 int mode = GET_MODE (x);
10459 if (GET_CODE (x) != REG || REGNO_REG_CLASS (REGNO (x)) != CIRRUS_REGS)
10460 abort ();
10462 fprintf (stream, "mv%s%s",
10463 mode == DFmode ? "d"
10464 : mode == SImode ? "fx"
10465 : mode == DImode ? "dx"
10466 : "f", reg_names[REGNO (x)] + 2);
10468 return;
10471 case 'U':
10472 if (GET_CODE (x) != REG
10473 || REGNO (x) < FIRST_IWMMXT_GR_REGNUM
10474 || REGNO (x) > LAST_IWMMXT_GR_REGNUM)
10475 /* Bad value for wCG register number. */
10476 abort ();
10477 else
10478 fprintf (stream, "%d", REGNO (x) - FIRST_IWMMXT_GR_REGNUM);
10479 return;
10481 /* Print an iWMMXt control register name. */
10482 case 'w':
10483 if (GET_CODE (x) != CONST_INT
10484 || INTVAL (x) < 0
10485 || INTVAL (x) >= 16)
10486 /* Bad value for wC register number. */
10487 abort ();
10488 else
10490 static const char * wc_reg_names [16] =
10492 "wCID", "wCon", "wCSSF", "wCASF",
10493 "wC4", "wC5", "wC6", "wC7",
10494 "wCGR0", "wCGR1", "wCGR2", "wCGR3",
10495 "wC12", "wC13", "wC14", "wC15"
10498 fprintf (stream, wc_reg_names [INTVAL (x)]);
10500 return;
10502 /* Print a VFP double precision register name. */
10503 case 'P':
10505 int mode = GET_MODE (x);
10506 int num;
10508 if (mode != DImode && mode != DFmode)
10509 abort ();
10511 if (GET_CODE (x) != REG
10512 || !IS_VFP_REGNUM (REGNO (x)))
10513 abort ();
10515 num = REGNO(x) - FIRST_VFP_REGNUM;
10516 if (num & 1)
10517 abort ();
10519 fprintf (stream, "d%d", num >> 1);
10521 return;
10523 default:
10524 if (x == 0)
10525 abort ();
10527 if (GET_CODE (x) == REG)
10528 asm_fprintf (stream, "%r", REGNO (x));
10529 else if (GET_CODE (x) == MEM)
10531 output_memory_reference_mode = GET_MODE (x);
10532 output_address (XEXP (x, 0));
10534 else if (GET_CODE (x) == CONST_DOUBLE)
10535 fprintf (stream, "#%s", fp_immediate_constant (x));
10536 else if (GET_CODE (x) == NEG)
10537 abort (); /* This should never happen now. */
10538 else
10540 fputc ('#', stream);
10541 output_addr_const (stream, x);
10546 #ifndef AOF_ASSEMBLER
10547 /* Target hook for assembling integer objects. The ARM version needs to
10548 handle word-sized values specially. */
10549 static bool
10550 arm_assemble_integer (rtx x, unsigned int size, int aligned_p)
10552 if (size == UNITS_PER_WORD && aligned_p)
10554 fputs ("\t.word\t", asm_out_file);
10555 output_addr_const (asm_out_file, x);
10557 /* Mark symbols as position independent. We only do this in the
10558 .text segment, not in the .data segment. */
10559 if (NEED_GOT_RELOC && flag_pic && making_const_table &&
10560 (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF))
10562 if (GET_CODE (x) == SYMBOL_REF
10563 && (CONSTANT_POOL_ADDRESS_P (x)
10564 || SYMBOL_REF_LOCAL_P (x)))
10565 fputs ("(GOTOFF)", asm_out_file);
10566 else if (GET_CODE (x) == LABEL_REF)
10567 fputs ("(GOTOFF)", asm_out_file);
10568 else
10569 fputs ("(GOT)", asm_out_file);
10571 fputc ('\n', asm_out_file);
10572 return true;
10575 if (arm_vector_mode_supported_p (GET_MODE (x)))
10577 int i, units;
10579 if (GET_CODE (x) != CONST_VECTOR)
10580 abort ();
10582 units = CONST_VECTOR_NUNITS (x);
10584 switch (GET_MODE (x))
10586 case V2SImode: size = 4; break;
10587 case V4HImode: size = 2; break;
10588 case V8QImode: size = 1; break;
10589 default:
10590 abort ();
10593 for (i = 0; i < units; i++)
10595 rtx elt;
10597 elt = CONST_VECTOR_ELT (x, i);
10598 assemble_integer
10599 (elt, size, i == 0 ? BIGGEST_ALIGNMENT : size * BITS_PER_UNIT, 1);
10602 return true;
10605 return default_assemble_integer (x, size, aligned_p);
10607 #endif
10609 /* A finite state machine takes care of noticing whether or not instructions
10610 can be conditionally executed, and thus decrease execution time and code
10611 size by deleting branch instructions. The fsm is controlled by
10612 final_prescan_insn, and controls the actions of ASM_OUTPUT_OPCODE. */
10614 /* The state of the fsm controlling condition codes are:
10615 0: normal, do nothing special
10616 1: make ASM_OUTPUT_OPCODE not output this instruction
10617 2: make ASM_OUTPUT_OPCODE not output this instruction
10618 3: make instructions conditional
10619 4: make instructions conditional
10621 State transitions (state->state by whom under condition):
10622 0 -> 1 final_prescan_insn if the `target' is a label
10623 0 -> 2 final_prescan_insn if the `target' is an unconditional branch
10624 1 -> 3 ASM_OUTPUT_OPCODE after not having output the conditional branch
10625 2 -> 4 ASM_OUTPUT_OPCODE after not having output the conditional branch
10626 3 -> 0 (*targetm.asm_out.internal_label) if the `target' label is reached
10627 (the target label has CODE_LABEL_NUMBER equal to arm_target_label).
10628 4 -> 0 final_prescan_insn if the `target' unconditional branch is reached
10629 (the target insn is arm_target_insn).
10631 If the jump clobbers the conditions then we use states 2 and 4.
10633 A similar thing can be done with conditional return insns.
10635 XXX In case the `target' is an unconditional branch, this conditionalising
10636 of the instructions always reduces code size, but not always execution
10637 time. But then, I want to reduce the code size to somewhere near what
10638 /bin/cc produces. */
10640 /* Returns the index of the ARM condition code string in
10641 `arm_condition_codes'. COMPARISON should be an rtx like
10642 `(eq (...) (...))'. */
10643 static enum arm_cond_code
10644 get_arm_condition_code (rtx comparison)
10646 enum machine_mode mode = GET_MODE (XEXP (comparison, 0));
10647 int code;
10648 enum rtx_code comp_code = GET_CODE (comparison);
10650 if (GET_MODE_CLASS (mode) != MODE_CC)
10651 mode = SELECT_CC_MODE (comp_code, XEXP (comparison, 0),
10652 XEXP (comparison, 1));
10654 switch (mode)
10656 case CC_DNEmode: code = ARM_NE; goto dominance;
10657 case CC_DEQmode: code = ARM_EQ; goto dominance;
10658 case CC_DGEmode: code = ARM_GE; goto dominance;
10659 case CC_DGTmode: code = ARM_GT; goto dominance;
10660 case CC_DLEmode: code = ARM_LE; goto dominance;
10661 case CC_DLTmode: code = ARM_LT; goto dominance;
10662 case CC_DGEUmode: code = ARM_CS; goto dominance;
10663 case CC_DGTUmode: code = ARM_HI; goto dominance;
10664 case CC_DLEUmode: code = ARM_LS; goto dominance;
10665 case CC_DLTUmode: code = ARM_CC;
10667 dominance:
10668 if (comp_code != EQ && comp_code != NE)
10669 abort ();
10671 if (comp_code == EQ)
10672 return ARM_INVERSE_CONDITION_CODE (code);
10673 return code;
10675 case CC_NOOVmode:
10676 switch (comp_code)
10678 case NE: return ARM_NE;
10679 case EQ: return ARM_EQ;
10680 case GE: return ARM_PL;
10681 case LT: return ARM_MI;
10682 default: abort ();
10685 case CC_Zmode:
10686 switch (comp_code)
10688 case NE: return ARM_NE;
10689 case EQ: return ARM_EQ;
10690 default: abort ();
10693 case CC_Nmode:
10694 switch (comp_code)
10696 case NE: return ARM_MI;
10697 case EQ: return ARM_PL;
10698 default: abort ();
10701 case CCFPEmode:
10702 case CCFPmode:
10703 /* These encodings assume that AC=1 in the FPA system control
10704 byte. This allows us to handle all cases except UNEQ and
10705 LTGT. */
10706 switch (comp_code)
10708 case GE: return ARM_GE;
10709 case GT: return ARM_GT;
10710 case LE: return ARM_LS;
10711 case LT: return ARM_MI;
10712 case NE: return ARM_NE;
10713 case EQ: return ARM_EQ;
10714 case ORDERED: return ARM_VC;
10715 case UNORDERED: return ARM_VS;
10716 case UNLT: return ARM_LT;
10717 case UNLE: return ARM_LE;
10718 case UNGT: return ARM_HI;
10719 case UNGE: return ARM_PL;
10720 /* UNEQ and LTGT do not have a representation. */
10721 case UNEQ: /* Fall through. */
10722 case LTGT: /* Fall through. */
10723 default: abort ();
10726 case CC_SWPmode:
10727 switch (comp_code)
10729 case NE: return ARM_NE;
10730 case EQ: return ARM_EQ;
10731 case GE: return ARM_LE;
10732 case GT: return ARM_LT;
10733 case LE: return ARM_GE;
10734 case LT: return ARM_GT;
10735 case GEU: return ARM_LS;
10736 case GTU: return ARM_CC;
10737 case LEU: return ARM_CS;
10738 case LTU: return ARM_HI;
10739 default: abort ();
10742 case CC_Cmode:
10743 switch (comp_code)
10745 case LTU: return ARM_CS;
10746 case GEU: return ARM_CC;
10747 default: abort ();
10750 case CCmode:
10751 switch (comp_code)
10753 case NE: return ARM_NE;
10754 case EQ: return ARM_EQ;
10755 case GE: return ARM_GE;
10756 case GT: return ARM_GT;
10757 case LE: return ARM_LE;
10758 case LT: return ARM_LT;
10759 case GEU: return ARM_CS;
10760 case GTU: return ARM_HI;
10761 case LEU: return ARM_LS;
10762 case LTU: return ARM_CC;
10763 default: abort ();
10766 default: abort ();
10769 abort ();
10772 void
10773 arm_final_prescan_insn (rtx insn)
10775 /* BODY will hold the body of INSN. */
10776 rtx body = PATTERN (insn);
10778 /* This will be 1 if trying to repeat the trick, and things need to be
10779 reversed if it appears to fail. */
10780 int reverse = 0;
10782 /* JUMP_CLOBBERS will be one implies that the conditions if a branch is
10783 taken are clobbered, even if the rtl suggests otherwise. It also
10784 means that we have to grub around within the jump expression to find
10785 out what the conditions are when the jump isn't taken. */
10786 int jump_clobbers = 0;
10788 /* If we start with a return insn, we only succeed if we find another one. */
10789 int seeking_return = 0;
10791 /* START_INSN will hold the insn from where we start looking. This is the
10792 first insn after the following code_label if REVERSE is true. */
10793 rtx start_insn = insn;
10795 /* If in state 4, check if the target branch is reached, in order to
10796 change back to state 0. */
10797 if (arm_ccfsm_state == 4)
10799 if (insn == arm_target_insn)
10801 arm_target_insn = NULL;
10802 arm_ccfsm_state = 0;
10804 return;
10807 /* If in state 3, it is possible to repeat the trick, if this insn is an
10808 unconditional branch to a label, and immediately following this branch
10809 is the previous target label which is only used once, and the label this
10810 branch jumps to is not too far off. */
10811 if (arm_ccfsm_state == 3)
10813 if (simplejump_p (insn))
10815 start_insn = next_nonnote_insn (start_insn);
10816 if (GET_CODE (start_insn) == BARRIER)
10818 /* XXX Isn't this always a barrier? */
10819 start_insn = next_nonnote_insn (start_insn);
10821 if (GET_CODE (start_insn) == CODE_LABEL
10822 && CODE_LABEL_NUMBER (start_insn) == arm_target_label
10823 && LABEL_NUSES (start_insn) == 1)
10824 reverse = TRUE;
10825 else
10826 return;
10828 else if (GET_CODE (body) == RETURN)
10830 start_insn = next_nonnote_insn (start_insn);
10831 if (GET_CODE (start_insn) == BARRIER)
10832 start_insn = next_nonnote_insn (start_insn);
10833 if (GET_CODE (start_insn) == CODE_LABEL
10834 && CODE_LABEL_NUMBER (start_insn) == arm_target_label
10835 && LABEL_NUSES (start_insn) == 1)
10837 reverse = TRUE;
10838 seeking_return = 1;
10840 else
10841 return;
10843 else
10844 return;
10847 if (arm_ccfsm_state != 0 && !reverse)
10848 abort ();
10849 if (GET_CODE (insn) != JUMP_INSN)
10850 return;
10852 /* This jump might be paralleled with a clobber of the condition codes
10853 the jump should always come first */
10854 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) > 0)
10855 body = XVECEXP (body, 0, 0);
10857 if (reverse
10858 || (GET_CODE (body) == SET && GET_CODE (SET_DEST (body)) == PC
10859 && GET_CODE (SET_SRC (body)) == IF_THEN_ELSE))
10861 int insns_skipped;
10862 int fail = FALSE, succeed = FALSE;
10863 /* Flag which part of the IF_THEN_ELSE is the LABEL_REF. */
10864 int then_not_else = TRUE;
10865 rtx this_insn = start_insn, label = 0;
10867 /* If the jump cannot be done with one instruction, we cannot
10868 conditionally execute the instruction in the inverse case. */
10869 if (get_attr_conds (insn) == CONDS_JUMP_CLOB)
10871 jump_clobbers = 1;
10872 return;
10875 /* Register the insn jumped to. */
10876 if (reverse)
10878 if (!seeking_return)
10879 label = XEXP (SET_SRC (body), 0);
10881 else if (GET_CODE (XEXP (SET_SRC (body), 1)) == LABEL_REF)
10882 label = XEXP (XEXP (SET_SRC (body), 1), 0);
10883 else if (GET_CODE (XEXP (SET_SRC (body), 2)) == LABEL_REF)
10885 label = XEXP (XEXP (SET_SRC (body), 2), 0);
10886 then_not_else = FALSE;
10888 else if (GET_CODE (XEXP (SET_SRC (body), 1)) == RETURN)
10889 seeking_return = 1;
10890 else if (GET_CODE (XEXP (SET_SRC (body), 2)) == RETURN)
10892 seeking_return = 1;
10893 then_not_else = FALSE;
10895 else
10896 abort ();
10898 /* See how many insns this branch skips, and what kind of insns. If all
10899 insns are okay, and the label or unconditional branch to the same
10900 label is not too far away, succeed. */
10901 for (insns_skipped = 0;
10902 !fail && !succeed && insns_skipped++ < max_insns_skipped;)
10904 rtx scanbody;
10906 this_insn = next_nonnote_insn (this_insn);
10907 if (!this_insn)
10908 break;
10910 switch (GET_CODE (this_insn))
10912 case CODE_LABEL:
10913 /* Succeed if it is the target label, otherwise fail since
10914 control falls in from somewhere else. */
10915 if (this_insn == label)
10917 if (jump_clobbers)
10919 arm_ccfsm_state = 2;
10920 this_insn = next_nonnote_insn (this_insn);
10922 else
10923 arm_ccfsm_state = 1;
10924 succeed = TRUE;
10926 else
10927 fail = TRUE;
10928 break;
10930 case BARRIER:
10931 /* Succeed if the following insn is the target label.
10932 Otherwise fail.
10933 If return insns are used then the last insn in a function
10934 will be a barrier. */
10935 this_insn = next_nonnote_insn (this_insn);
10936 if (this_insn && this_insn == label)
10938 if (jump_clobbers)
10940 arm_ccfsm_state = 2;
10941 this_insn = next_nonnote_insn (this_insn);
10943 else
10944 arm_ccfsm_state = 1;
10945 succeed = TRUE;
10947 else
10948 fail = TRUE;
10949 break;
10951 case CALL_INSN:
10952 /* The AAPCS says that conditional calls should not be
10953 used since they make interworking inefficient (the
10954 linker can't transform BL<cond> into BLX). That's
10955 only a problem if the machine has BLX. */
10956 if (arm_arch5)
10958 fail = TRUE;
10959 break;
10962 /* Succeed if the following insn is the target label, or
10963 if the following two insns are a barrier and the
10964 target label. */
10965 this_insn = next_nonnote_insn (this_insn);
10966 if (this_insn && GET_CODE (this_insn) == BARRIER)
10967 this_insn = next_nonnote_insn (this_insn);
10969 if (this_insn && this_insn == label
10970 && insns_skipped < max_insns_skipped)
10972 if (jump_clobbers)
10974 arm_ccfsm_state = 2;
10975 this_insn = next_nonnote_insn (this_insn);
10977 else
10978 arm_ccfsm_state = 1;
10979 succeed = TRUE;
10981 else
10982 fail = TRUE;
10983 break;
10985 case JUMP_INSN:
10986 /* If this is an unconditional branch to the same label, succeed.
10987 If it is to another label, do nothing. If it is conditional,
10988 fail. */
10989 /* XXX Probably, the tests for SET and the PC are
10990 unnecessary. */
10992 scanbody = PATTERN (this_insn);
10993 if (GET_CODE (scanbody) == SET
10994 && GET_CODE (SET_DEST (scanbody)) == PC)
10996 if (GET_CODE (SET_SRC (scanbody)) == LABEL_REF
10997 && XEXP (SET_SRC (scanbody), 0) == label && !reverse)
10999 arm_ccfsm_state = 2;
11000 succeed = TRUE;
11002 else if (GET_CODE (SET_SRC (scanbody)) == IF_THEN_ELSE)
11003 fail = TRUE;
11005 /* Fail if a conditional return is undesirable (e.g. on a
11006 StrongARM), but still allow this if optimizing for size. */
11007 else if (GET_CODE (scanbody) == RETURN
11008 && !use_return_insn (TRUE, NULL)
11009 && !optimize_size)
11010 fail = TRUE;
11011 else if (GET_CODE (scanbody) == RETURN
11012 && seeking_return)
11014 arm_ccfsm_state = 2;
11015 succeed = TRUE;
11017 else if (GET_CODE (scanbody) == PARALLEL)
11019 switch (get_attr_conds (this_insn))
11021 case CONDS_NOCOND:
11022 break;
11023 default:
11024 fail = TRUE;
11025 break;
11028 else
11029 fail = TRUE; /* Unrecognized jump (e.g. epilogue). */
11031 break;
11033 case INSN:
11034 /* Instructions using or affecting the condition codes make it
11035 fail. */
11036 scanbody = PATTERN (this_insn);
11037 if (!(GET_CODE (scanbody) == SET
11038 || GET_CODE (scanbody) == PARALLEL)
11039 || get_attr_conds (this_insn) != CONDS_NOCOND)
11040 fail = TRUE;
11042 /* A conditional cirrus instruction must be followed by
11043 a non Cirrus instruction. However, since we
11044 conditionalize instructions in this function and by
11045 the time we get here we can't add instructions
11046 (nops), because shorten_branches() has already been
11047 called, we will disable conditionalizing Cirrus
11048 instructions to be safe. */
11049 if (GET_CODE (scanbody) != USE
11050 && GET_CODE (scanbody) != CLOBBER
11051 && get_attr_cirrus (this_insn) != CIRRUS_NOT)
11052 fail = TRUE;
11053 break;
11055 default:
11056 break;
11059 if (succeed)
11061 if ((!seeking_return) && (arm_ccfsm_state == 1 || reverse))
11062 arm_target_label = CODE_LABEL_NUMBER (label);
11063 else if (seeking_return || arm_ccfsm_state == 2)
11065 while (this_insn && GET_CODE (PATTERN (this_insn)) == USE)
11067 this_insn = next_nonnote_insn (this_insn);
11068 if (this_insn && (GET_CODE (this_insn) == BARRIER
11069 || GET_CODE (this_insn) == CODE_LABEL))
11070 abort ();
11072 if (!this_insn)
11074 /* Oh, dear! we ran off the end.. give up. */
11075 recog (PATTERN (insn), insn, NULL);
11076 arm_ccfsm_state = 0;
11077 arm_target_insn = NULL;
11078 return;
11080 arm_target_insn = this_insn;
11082 else
11083 abort ();
11084 if (jump_clobbers)
11086 if (reverse)
11087 abort ();
11088 arm_current_cc =
11089 get_arm_condition_code (XEXP (XEXP (XEXP (SET_SRC (body),
11090 0), 0), 1));
11091 if (GET_CODE (XEXP (XEXP (SET_SRC (body), 0), 0)) == AND)
11092 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
11093 if (GET_CODE (XEXP (SET_SRC (body), 0)) == NE)
11094 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
11096 else
11098 /* If REVERSE is true, ARM_CURRENT_CC needs to be inverted from
11099 what it was. */
11100 if (!reverse)
11101 arm_current_cc = get_arm_condition_code (XEXP (SET_SRC (body),
11102 0));
11105 if (reverse || then_not_else)
11106 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
11109 /* Restore recog_data (getting the attributes of other insns can
11110 destroy this array, but final.c assumes that it remains intact
11111 across this call; since the insn has been recognized already we
11112 call recog direct). */
11113 recog (PATTERN (insn), insn, NULL);
11117 /* Returns true if REGNO is a valid register
11118 for holding a quantity of type MODE. */
11120 arm_hard_regno_mode_ok (unsigned int regno, enum machine_mode mode)
11122 if (GET_MODE_CLASS (mode) == MODE_CC)
11123 return regno == CC_REGNUM || regno == VFPCC_REGNUM;
11125 if (TARGET_THUMB)
11126 /* For the Thumb we only allow values bigger than SImode in
11127 registers 0 - 6, so that there is always a second low
11128 register available to hold the upper part of the value.
11129 We probably we ought to ensure that the register is the
11130 start of an even numbered register pair. */
11131 return (ARM_NUM_REGS (mode) < 2) || (regno < LAST_LO_REGNUM);
11133 if (IS_CIRRUS_REGNUM (regno))
11134 /* We have outlawed SI values in Cirrus registers because they
11135 reside in the lower 32 bits, but SF values reside in the
11136 upper 32 bits. This causes gcc all sorts of grief. We can't
11137 even split the registers into pairs because Cirrus SI values
11138 get sign extended to 64bits-- aldyh. */
11139 return (GET_MODE_CLASS (mode) == MODE_FLOAT) || (mode == DImode);
11141 if (IS_VFP_REGNUM (regno))
11143 if (mode == SFmode || mode == SImode)
11144 return TRUE;
11146 /* DFmode values are only valid in even register pairs. */
11147 if (mode == DFmode)
11148 return ((regno - FIRST_VFP_REGNUM) & 1) == 0;
11149 return FALSE;
11152 if (IS_IWMMXT_GR_REGNUM (regno))
11153 return mode == SImode;
11155 if (IS_IWMMXT_REGNUM (regno))
11156 return VALID_IWMMXT_REG_MODE (mode);
11158 /* We allow any value to be stored in the general registers.
11159 Restrict doubleword quantities to even register pairs so that we can
11160 use ldrd. */
11161 if (regno <= LAST_ARM_REGNUM)
11162 return !(TARGET_LDRD && GET_MODE_SIZE (mode) > 4 && (regno & 1) != 0);
11164 if ( regno == FRAME_POINTER_REGNUM
11165 || regno == ARG_POINTER_REGNUM)
11166 /* We only allow integers in the fake hard registers. */
11167 return GET_MODE_CLASS (mode) == MODE_INT;
11169 /* The only registers left are the FPA registers
11170 which we only allow to hold FP values. */
11171 return GET_MODE_CLASS (mode) == MODE_FLOAT
11172 && regno >= FIRST_FPA_REGNUM
11173 && regno <= LAST_FPA_REGNUM;
11177 arm_regno_class (int regno)
11179 if (TARGET_THUMB)
11181 if (regno == STACK_POINTER_REGNUM)
11182 return STACK_REG;
11183 if (regno == CC_REGNUM)
11184 return CC_REG;
11185 if (regno < 8)
11186 return LO_REGS;
11187 return HI_REGS;
11190 if ( regno <= LAST_ARM_REGNUM
11191 || regno == FRAME_POINTER_REGNUM
11192 || regno == ARG_POINTER_REGNUM)
11193 return GENERAL_REGS;
11195 if (regno == CC_REGNUM || regno == VFPCC_REGNUM)
11196 return NO_REGS;
11198 if (IS_CIRRUS_REGNUM (regno))
11199 return CIRRUS_REGS;
11201 if (IS_VFP_REGNUM (regno))
11202 return VFP_REGS;
11204 if (IS_IWMMXT_REGNUM (regno))
11205 return IWMMXT_REGS;
11207 if (IS_IWMMXT_GR_REGNUM (regno))
11208 return IWMMXT_GR_REGS;
11210 return FPA_REGS;
11213 /* Handle a special case when computing the offset
11214 of an argument from the frame pointer. */
11216 arm_debugger_arg_offset (int value, rtx addr)
11218 rtx insn;
11220 /* We are only interested if dbxout_parms() failed to compute the offset. */
11221 if (value != 0)
11222 return 0;
11224 /* We can only cope with the case where the address is held in a register. */
11225 if (GET_CODE (addr) != REG)
11226 return 0;
11228 /* If we are using the frame pointer to point at the argument, then
11229 an offset of 0 is correct. */
11230 if (REGNO (addr) == (unsigned) HARD_FRAME_POINTER_REGNUM)
11231 return 0;
11233 /* If we are using the stack pointer to point at the
11234 argument, then an offset of 0 is correct. */
11235 if ((TARGET_THUMB || !frame_pointer_needed)
11236 && REGNO (addr) == SP_REGNUM)
11237 return 0;
11239 /* Oh dear. The argument is pointed to by a register rather
11240 than being held in a register, or being stored at a known
11241 offset from the frame pointer. Since GDB only understands
11242 those two kinds of argument we must translate the address
11243 held in the register into an offset from the frame pointer.
11244 We do this by searching through the insns for the function
11245 looking to see where this register gets its value. If the
11246 register is initialized from the frame pointer plus an offset
11247 then we are in luck and we can continue, otherwise we give up.
11249 This code is exercised by producing debugging information
11250 for a function with arguments like this:
11252 double func (double a, double b, int c, double d) {return d;}
11254 Without this code the stab for parameter 'd' will be set to
11255 an offset of 0 from the frame pointer, rather than 8. */
11257 /* The if() statement says:
11259 If the insn is a normal instruction
11260 and if the insn is setting the value in a register
11261 and if the register being set is the register holding the address of the argument
11262 and if the address is computing by an addition
11263 that involves adding to a register
11264 which is the frame pointer
11265 a constant integer
11267 then... */
11269 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
11271 if ( GET_CODE (insn) == INSN
11272 && GET_CODE (PATTERN (insn)) == SET
11273 && REGNO (XEXP (PATTERN (insn), 0)) == REGNO (addr)
11274 && GET_CODE (XEXP (PATTERN (insn), 1)) == PLUS
11275 && GET_CODE (XEXP (XEXP (PATTERN (insn), 1), 0)) == REG
11276 && REGNO (XEXP (XEXP (PATTERN (insn), 1), 0)) == (unsigned) HARD_FRAME_POINTER_REGNUM
11277 && GET_CODE (XEXP (XEXP (PATTERN (insn), 1), 1)) == CONST_INT
11280 value = INTVAL (XEXP (XEXP (PATTERN (insn), 1), 1));
11282 break;
11286 if (value == 0)
11288 debug_rtx (addr);
11289 warning ("unable to compute real location of stacked parameter");
11290 value = 8; /* XXX magic hack */
11293 return value;
11296 #define def_mbuiltin(MASK, NAME, TYPE, CODE) \
11297 do \
11299 if ((MASK) & insn_flags) \
11300 lang_hooks.builtin_function ((NAME), (TYPE), (CODE), \
11301 BUILT_IN_MD, NULL, NULL_TREE); \
11303 while (0)
11305 struct builtin_description
11307 const unsigned int mask;
11308 const enum insn_code icode;
11309 const char * const name;
11310 const enum arm_builtins code;
11311 const enum rtx_code comparison;
11312 const unsigned int flag;
11315 static const struct builtin_description bdesc_2arg[] =
11317 #define IWMMXT_BUILTIN(code, string, builtin) \
11318 { FL_IWMMXT, CODE_FOR_##code, "__builtin_arm_" string, \
11319 ARM_BUILTIN_##builtin, 0, 0 },
11321 IWMMXT_BUILTIN (addv8qi3, "waddb", WADDB)
11322 IWMMXT_BUILTIN (addv4hi3, "waddh", WADDH)
11323 IWMMXT_BUILTIN (addv2si3, "waddw", WADDW)
11324 IWMMXT_BUILTIN (subv8qi3, "wsubb", WSUBB)
11325 IWMMXT_BUILTIN (subv4hi3, "wsubh", WSUBH)
11326 IWMMXT_BUILTIN (subv2si3, "wsubw", WSUBW)
11327 IWMMXT_BUILTIN (ssaddv8qi3, "waddbss", WADDSSB)
11328 IWMMXT_BUILTIN (ssaddv4hi3, "waddhss", WADDSSH)
11329 IWMMXT_BUILTIN (ssaddv2si3, "waddwss", WADDSSW)
11330 IWMMXT_BUILTIN (sssubv8qi3, "wsubbss", WSUBSSB)
11331 IWMMXT_BUILTIN (sssubv4hi3, "wsubhss", WSUBSSH)
11332 IWMMXT_BUILTIN (sssubv2si3, "wsubwss", WSUBSSW)
11333 IWMMXT_BUILTIN (usaddv8qi3, "waddbus", WADDUSB)
11334 IWMMXT_BUILTIN (usaddv4hi3, "waddhus", WADDUSH)
11335 IWMMXT_BUILTIN (usaddv2si3, "waddwus", WADDUSW)
11336 IWMMXT_BUILTIN (ussubv8qi3, "wsubbus", WSUBUSB)
11337 IWMMXT_BUILTIN (ussubv4hi3, "wsubhus", WSUBUSH)
11338 IWMMXT_BUILTIN (ussubv2si3, "wsubwus", WSUBUSW)
11339 IWMMXT_BUILTIN (mulv4hi3, "wmulul", WMULUL)
11340 IWMMXT_BUILTIN (smulv4hi3_highpart, "wmulsm", WMULSM)
11341 IWMMXT_BUILTIN (umulv4hi3_highpart, "wmulum", WMULUM)
11342 IWMMXT_BUILTIN (eqv8qi3, "wcmpeqb", WCMPEQB)
11343 IWMMXT_BUILTIN (eqv4hi3, "wcmpeqh", WCMPEQH)
11344 IWMMXT_BUILTIN (eqv2si3, "wcmpeqw", WCMPEQW)
11345 IWMMXT_BUILTIN (gtuv8qi3, "wcmpgtub", WCMPGTUB)
11346 IWMMXT_BUILTIN (gtuv4hi3, "wcmpgtuh", WCMPGTUH)
11347 IWMMXT_BUILTIN (gtuv2si3, "wcmpgtuw", WCMPGTUW)
11348 IWMMXT_BUILTIN (gtv8qi3, "wcmpgtsb", WCMPGTSB)
11349 IWMMXT_BUILTIN (gtv4hi3, "wcmpgtsh", WCMPGTSH)
11350 IWMMXT_BUILTIN (gtv2si3, "wcmpgtsw", WCMPGTSW)
11351 IWMMXT_BUILTIN (umaxv8qi3, "wmaxub", WMAXUB)
11352 IWMMXT_BUILTIN (smaxv8qi3, "wmaxsb", WMAXSB)
11353 IWMMXT_BUILTIN (umaxv4hi3, "wmaxuh", WMAXUH)
11354 IWMMXT_BUILTIN (smaxv4hi3, "wmaxsh", WMAXSH)
11355 IWMMXT_BUILTIN (umaxv2si3, "wmaxuw", WMAXUW)
11356 IWMMXT_BUILTIN (smaxv2si3, "wmaxsw", WMAXSW)
11357 IWMMXT_BUILTIN (uminv8qi3, "wminub", WMINUB)
11358 IWMMXT_BUILTIN (sminv8qi3, "wminsb", WMINSB)
11359 IWMMXT_BUILTIN (uminv4hi3, "wminuh", WMINUH)
11360 IWMMXT_BUILTIN (sminv4hi3, "wminsh", WMINSH)
11361 IWMMXT_BUILTIN (uminv2si3, "wminuw", WMINUW)
11362 IWMMXT_BUILTIN (sminv2si3, "wminsw", WMINSW)
11363 IWMMXT_BUILTIN (iwmmxt_anddi3, "wand", WAND)
11364 IWMMXT_BUILTIN (iwmmxt_nanddi3, "wandn", WANDN)
11365 IWMMXT_BUILTIN (iwmmxt_iordi3, "wor", WOR)
11366 IWMMXT_BUILTIN (iwmmxt_xordi3, "wxor", WXOR)
11367 IWMMXT_BUILTIN (iwmmxt_uavgv8qi3, "wavg2b", WAVG2B)
11368 IWMMXT_BUILTIN (iwmmxt_uavgv4hi3, "wavg2h", WAVG2H)
11369 IWMMXT_BUILTIN (iwmmxt_uavgrndv8qi3, "wavg2br", WAVG2BR)
11370 IWMMXT_BUILTIN (iwmmxt_uavgrndv4hi3, "wavg2hr", WAVG2HR)
11371 IWMMXT_BUILTIN (iwmmxt_wunpckilb, "wunpckilb", WUNPCKILB)
11372 IWMMXT_BUILTIN (iwmmxt_wunpckilh, "wunpckilh", WUNPCKILH)
11373 IWMMXT_BUILTIN (iwmmxt_wunpckilw, "wunpckilw", WUNPCKILW)
11374 IWMMXT_BUILTIN (iwmmxt_wunpckihb, "wunpckihb", WUNPCKIHB)
11375 IWMMXT_BUILTIN (iwmmxt_wunpckihh, "wunpckihh", WUNPCKIHH)
11376 IWMMXT_BUILTIN (iwmmxt_wunpckihw, "wunpckihw", WUNPCKIHW)
11377 IWMMXT_BUILTIN (iwmmxt_wmadds, "wmadds", WMADDS)
11378 IWMMXT_BUILTIN (iwmmxt_wmaddu, "wmaddu", WMADDU)
11380 #define IWMMXT_BUILTIN2(code, builtin) \
11381 { FL_IWMMXT, CODE_FOR_##code, NULL, ARM_BUILTIN_##builtin, 0, 0 },
11383 IWMMXT_BUILTIN2 (iwmmxt_wpackhss, WPACKHSS)
11384 IWMMXT_BUILTIN2 (iwmmxt_wpackwss, WPACKWSS)
11385 IWMMXT_BUILTIN2 (iwmmxt_wpackdss, WPACKDSS)
11386 IWMMXT_BUILTIN2 (iwmmxt_wpackhus, WPACKHUS)
11387 IWMMXT_BUILTIN2 (iwmmxt_wpackwus, WPACKWUS)
11388 IWMMXT_BUILTIN2 (iwmmxt_wpackdus, WPACKDUS)
11389 IWMMXT_BUILTIN2 (ashlv4hi3_di, WSLLH)
11390 IWMMXT_BUILTIN2 (ashlv4hi3, WSLLHI)
11391 IWMMXT_BUILTIN2 (ashlv2si3_di, WSLLW)
11392 IWMMXT_BUILTIN2 (ashlv2si3, WSLLWI)
11393 IWMMXT_BUILTIN2 (ashldi3_di, WSLLD)
11394 IWMMXT_BUILTIN2 (ashldi3_iwmmxt, WSLLDI)
11395 IWMMXT_BUILTIN2 (lshrv4hi3_di, WSRLH)
11396 IWMMXT_BUILTIN2 (lshrv4hi3, WSRLHI)
11397 IWMMXT_BUILTIN2 (lshrv2si3_di, WSRLW)
11398 IWMMXT_BUILTIN2 (lshrv2si3, WSRLWI)
11399 IWMMXT_BUILTIN2 (lshrdi3_di, WSRLD)
11400 IWMMXT_BUILTIN2 (lshrdi3_iwmmxt, WSRLDI)
11401 IWMMXT_BUILTIN2 (ashrv4hi3_di, WSRAH)
11402 IWMMXT_BUILTIN2 (ashrv4hi3, WSRAHI)
11403 IWMMXT_BUILTIN2 (ashrv2si3_di, WSRAW)
11404 IWMMXT_BUILTIN2 (ashrv2si3, WSRAWI)
11405 IWMMXT_BUILTIN2 (ashrdi3_di, WSRAD)
11406 IWMMXT_BUILTIN2 (ashrdi3_iwmmxt, WSRADI)
11407 IWMMXT_BUILTIN2 (rorv4hi3_di, WRORH)
11408 IWMMXT_BUILTIN2 (rorv4hi3, WRORHI)
11409 IWMMXT_BUILTIN2 (rorv2si3_di, WRORW)
11410 IWMMXT_BUILTIN2 (rorv2si3, WRORWI)
11411 IWMMXT_BUILTIN2 (rordi3_di, WRORD)
11412 IWMMXT_BUILTIN2 (rordi3, WRORDI)
11413 IWMMXT_BUILTIN2 (iwmmxt_wmacuz, WMACUZ)
11414 IWMMXT_BUILTIN2 (iwmmxt_wmacsz, WMACSZ)
11417 static const struct builtin_description bdesc_1arg[] =
11419 IWMMXT_BUILTIN (iwmmxt_tmovmskb, "tmovmskb", TMOVMSKB)
11420 IWMMXT_BUILTIN (iwmmxt_tmovmskh, "tmovmskh", TMOVMSKH)
11421 IWMMXT_BUILTIN (iwmmxt_tmovmskw, "tmovmskw", TMOVMSKW)
11422 IWMMXT_BUILTIN (iwmmxt_waccb, "waccb", WACCB)
11423 IWMMXT_BUILTIN (iwmmxt_wacch, "wacch", WACCH)
11424 IWMMXT_BUILTIN (iwmmxt_waccw, "waccw", WACCW)
11425 IWMMXT_BUILTIN (iwmmxt_wunpckehub, "wunpckehub", WUNPCKEHUB)
11426 IWMMXT_BUILTIN (iwmmxt_wunpckehuh, "wunpckehuh", WUNPCKEHUH)
11427 IWMMXT_BUILTIN (iwmmxt_wunpckehuw, "wunpckehuw", WUNPCKEHUW)
11428 IWMMXT_BUILTIN (iwmmxt_wunpckehsb, "wunpckehsb", WUNPCKEHSB)
11429 IWMMXT_BUILTIN (iwmmxt_wunpckehsh, "wunpckehsh", WUNPCKEHSH)
11430 IWMMXT_BUILTIN (iwmmxt_wunpckehsw, "wunpckehsw", WUNPCKEHSW)
11431 IWMMXT_BUILTIN (iwmmxt_wunpckelub, "wunpckelub", WUNPCKELUB)
11432 IWMMXT_BUILTIN (iwmmxt_wunpckeluh, "wunpckeluh", WUNPCKELUH)
11433 IWMMXT_BUILTIN (iwmmxt_wunpckeluw, "wunpckeluw", WUNPCKELUW)
11434 IWMMXT_BUILTIN (iwmmxt_wunpckelsb, "wunpckelsb", WUNPCKELSB)
11435 IWMMXT_BUILTIN (iwmmxt_wunpckelsh, "wunpckelsh", WUNPCKELSH)
11436 IWMMXT_BUILTIN (iwmmxt_wunpckelsw, "wunpckelsw", WUNPCKELSW)
11439 /* Set up all the iWMMXt builtins. This is
11440 not called if TARGET_IWMMXT is zero. */
11442 static void
11443 arm_init_iwmmxt_builtins (void)
11445 const struct builtin_description * d;
11446 size_t i;
11447 tree endlink = void_list_node;
11449 tree V2SI_type_node = build_vector_type_for_mode (intSI_type_node, V2SImode);
11450 tree V4HI_type_node = build_vector_type_for_mode (intHI_type_node, V4HImode);
11451 tree V8QI_type_node = build_vector_type_for_mode (intQI_type_node, V8QImode);
11453 tree int_ftype_int
11454 = build_function_type (integer_type_node,
11455 tree_cons (NULL_TREE, integer_type_node, endlink));
11456 tree v8qi_ftype_v8qi_v8qi_int
11457 = build_function_type (V8QI_type_node,
11458 tree_cons (NULL_TREE, V8QI_type_node,
11459 tree_cons (NULL_TREE, V8QI_type_node,
11460 tree_cons (NULL_TREE,
11461 integer_type_node,
11462 endlink))));
11463 tree v4hi_ftype_v4hi_int
11464 = build_function_type (V4HI_type_node,
11465 tree_cons (NULL_TREE, V4HI_type_node,
11466 tree_cons (NULL_TREE, integer_type_node,
11467 endlink)));
11468 tree v2si_ftype_v2si_int
11469 = build_function_type (V2SI_type_node,
11470 tree_cons (NULL_TREE, V2SI_type_node,
11471 tree_cons (NULL_TREE, integer_type_node,
11472 endlink)));
11473 tree v2si_ftype_di_di
11474 = build_function_type (V2SI_type_node,
11475 tree_cons (NULL_TREE, long_long_integer_type_node,
11476 tree_cons (NULL_TREE, long_long_integer_type_node,
11477 endlink)));
11478 tree di_ftype_di_int
11479 = build_function_type (long_long_integer_type_node,
11480 tree_cons (NULL_TREE, long_long_integer_type_node,
11481 tree_cons (NULL_TREE, integer_type_node,
11482 endlink)));
11483 tree di_ftype_di_int_int
11484 = build_function_type (long_long_integer_type_node,
11485 tree_cons (NULL_TREE, long_long_integer_type_node,
11486 tree_cons (NULL_TREE, integer_type_node,
11487 tree_cons (NULL_TREE,
11488 integer_type_node,
11489 endlink))));
11490 tree int_ftype_v8qi
11491 = build_function_type (integer_type_node,
11492 tree_cons (NULL_TREE, V8QI_type_node,
11493 endlink));
11494 tree int_ftype_v4hi
11495 = build_function_type (integer_type_node,
11496 tree_cons (NULL_TREE, V4HI_type_node,
11497 endlink));
11498 tree int_ftype_v2si
11499 = build_function_type (integer_type_node,
11500 tree_cons (NULL_TREE, V2SI_type_node,
11501 endlink));
11502 tree int_ftype_v8qi_int
11503 = build_function_type (integer_type_node,
11504 tree_cons (NULL_TREE, V8QI_type_node,
11505 tree_cons (NULL_TREE, integer_type_node,
11506 endlink)));
11507 tree int_ftype_v4hi_int
11508 = build_function_type (integer_type_node,
11509 tree_cons (NULL_TREE, V4HI_type_node,
11510 tree_cons (NULL_TREE, integer_type_node,
11511 endlink)));
11512 tree int_ftype_v2si_int
11513 = build_function_type (integer_type_node,
11514 tree_cons (NULL_TREE, V2SI_type_node,
11515 tree_cons (NULL_TREE, integer_type_node,
11516 endlink)));
11517 tree v8qi_ftype_v8qi_int_int
11518 = build_function_type (V8QI_type_node,
11519 tree_cons (NULL_TREE, V8QI_type_node,
11520 tree_cons (NULL_TREE, integer_type_node,
11521 tree_cons (NULL_TREE,
11522 integer_type_node,
11523 endlink))));
11524 tree v4hi_ftype_v4hi_int_int
11525 = build_function_type (V4HI_type_node,
11526 tree_cons (NULL_TREE, V4HI_type_node,
11527 tree_cons (NULL_TREE, integer_type_node,
11528 tree_cons (NULL_TREE,
11529 integer_type_node,
11530 endlink))));
11531 tree v2si_ftype_v2si_int_int
11532 = build_function_type (V2SI_type_node,
11533 tree_cons (NULL_TREE, V2SI_type_node,
11534 tree_cons (NULL_TREE, integer_type_node,
11535 tree_cons (NULL_TREE,
11536 integer_type_node,
11537 endlink))));
11538 /* Miscellaneous. */
11539 tree v8qi_ftype_v4hi_v4hi
11540 = build_function_type (V8QI_type_node,
11541 tree_cons (NULL_TREE, V4HI_type_node,
11542 tree_cons (NULL_TREE, V4HI_type_node,
11543 endlink)));
11544 tree v4hi_ftype_v2si_v2si
11545 = build_function_type (V4HI_type_node,
11546 tree_cons (NULL_TREE, V2SI_type_node,
11547 tree_cons (NULL_TREE, V2SI_type_node,
11548 endlink)));
11549 tree v2si_ftype_v4hi_v4hi
11550 = build_function_type (V2SI_type_node,
11551 tree_cons (NULL_TREE, V4HI_type_node,
11552 tree_cons (NULL_TREE, V4HI_type_node,
11553 endlink)));
11554 tree v2si_ftype_v8qi_v8qi
11555 = build_function_type (V2SI_type_node,
11556 tree_cons (NULL_TREE, V8QI_type_node,
11557 tree_cons (NULL_TREE, V8QI_type_node,
11558 endlink)));
11559 tree v4hi_ftype_v4hi_di
11560 = build_function_type (V4HI_type_node,
11561 tree_cons (NULL_TREE, V4HI_type_node,
11562 tree_cons (NULL_TREE,
11563 long_long_integer_type_node,
11564 endlink)));
11565 tree v2si_ftype_v2si_di
11566 = build_function_type (V2SI_type_node,
11567 tree_cons (NULL_TREE, V2SI_type_node,
11568 tree_cons (NULL_TREE,
11569 long_long_integer_type_node,
11570 endlink)));
11571 tree void_ftype_int_int
11572 = build_function_type (void_type_node,
11573 tree_cons (NULL_TREE, integer_type_node,
11574 tree_cons (NULL_TREE, integer_type_node,
11575 endlink)));
11576 tree di_ftype_void
11577 = build_function_type (long_long_unsigned_type_node, endlink);
11578 tree di_ftype_v8qi
11579 = build_function_type (long_long_integer_type_node,
11580 tree_cons (NULL_TREE, V8QI_type_node,
11581 endlink));
11582 tree di_ftype_v4hi
11583 = build_function_type (long_long_integer_type_node,
11584 tree_cons (NULL_TREE, V4HI_type_node,
11585 endlink));
11586 tree di_ftype_v2si
11587 = build_function_type (long_long_integer_type_node,
11588 tree_cons (NULL_TREE, V2SI_type_node,
11589 endlink));
11590 tree v2si_ftype_v4hi
11591 = build_function_type (V2SI_type_node,
11592 tree_cons (NULL_TREE, V4HI_type_node,
11593 endlink));
11594 tree v4hi_ftype_v8qi
11595 = build_function_type (V4HI_type_node,
11596 tree_cons (NULL_TREE, V8QI_type_node,
11597 endlink));
11599 tree di_ftype_di_v4hi_v4hi
11600 = build_function_type (long_long_unsigned_type_node,
11601 tree_cons (NULL_TREE,
11602 long_long_unsigned_type_node,
11603 tree_cons (NULL_TREE, V4HI_type_node,
11604 tree_cons (NULL_TREE,
11605 V4HI_type_node,
11606 endlink))));
11608 tree di_ftype_v4hi_v4hi
11609 = build_function_type (long_long_unsigned_type_node,
11610 tree_cons (NULL_TREE, V4HI_type_node,
11611 tree_cons (NULL_TREE, V4HI_type_node,
11612 endlink)));
11614 /* Normal vector binops. */
11615 tree v8qi_ftype_v8qi_v8qi
11616 = build_function_type (V8QI_type_node,
11617 tree_cons (NULL_TREE, V8QI_type_node,
11618 tree_cons (NULL_TREE, V8QI_type_node,
11619 endlink)));
11620 tree v4hi_ftype_v4hi_v4hi
11621 = build_function_type (V4HI_type_node,
11622 tree_cons (NULL_TREE, V4HI_type_node,
11623 tree_cons (NULL_TREE, V4HI_type_node,
11624 endlink)));
11625 tree v2si_ftype_v2si_v2si
11626 = build_function_type (V2SI_type_node,
11627 tree_cons (NULL_TREE, V2SI_type_node,
11628 tree_cons (NULL_TREE, V2SI_type_node,
11629 endlink)));
11630 tree di_ftype_di_di
11631 = build_function_type (long_long_unsigned_type_node,
11632 tree_cons (NULL_TREE, long_long_unsigned_type_node,
11633 tree_cons (NULL_TREE,
11634 long_long_unsigned_type_node,
11635 endlink)));
11637 /* Add all builtins that are more or less simple operations on two
11638 operands. */
11639 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
11641 /* Use one of the operands; the target can have a different mode for
11642 mask-generating compares. */
11643 enum machine_mode mode;
11644 tree type;
11646 if (d->name == 0)
11647 continue;
11649 mode = insn_data[d->icode].operand[1].mode;
11651 switch (mode)
11653 case V8QImode:
11654 type = v8qi_ftype_v8qi_v8qi;
11655 break;
11656 case V4HImode:
11657 type = v4hi_ftype_v4hi_v4hi;
11658 break;
11659 case V2SImode:
11660 type = v2si_ftype_v2si_v2si;
11661 break;
11662 case DImode:
11663 type = di_ftype_di_di;
11664 break;
11666 default:
11667 abort ();
11670 def_mbuiltin (d->mask, d->name, type, d->code);
11673 /* Add the remaining MMX insns with somewhat more complicated types. */
11674 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wzero", di_ftype_void, ARM_BUILTIN_WZERO);
11675 def_mbuiltin (FL_IWMMXT, "__builtin_arm_setwcx", void_ftype_int_int, ARM_BUILTIN_SETWCX);
11676 def_mbuiltin (FL_IWMMXT, "__builtin_arm_getwcx", int_ftype_int, ARM_BUILTIN_GETWCX);
11678 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSLLH);
11679 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllw", v2si_ftype_v2si_di, ARM_BUILTIN_WSLLW);
11680 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wslld", di_ftype_di_di, ARM_BUILTIN_WSLLD);
11681 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSLLHI);
11682 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllwi", v2si_ftype_v2si_int, ARM_BUILTIN_WSLLWI);
11683 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wslldi", di_ftype_di_int, ARM_BUILTIN_WSLLDI);
11685 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSRLH);
11686 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlw", v2si_ftype_v2si_di, ARM_BUILTIN_WSRLW);
11687 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrld", di_ftype_di_di, ARM_BUILTIN_WSRLD);
11688 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSRLHI);
11689 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlwi", v2si_ftype_v2si_int, ARM_BUILTIN_WSRLWI);
11690 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrldi", di_ftype_di_int, ARM_BUILTIN_WSRLDI);
11692 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrah", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSRAH);
11693 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsraw", v2si_ftype_v2si_di, ARM_BUILTIN_WSRAW);
11694 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrad", di_ftype_di_di, ARM_BUILTIN_WSRAD);
11695 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrahi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSRAHI);
11696 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrawi", v2si_ftype_v2si_int, ARM_BUILTIN_WSRAWI);
11697 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsradi", di_ftype_di_int, ARM_BUILTIN_WSRADI);
11699 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WRORH);
11700 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorw", v2si_ftype_v2si_di, ARM_BUILTIN_WRORW);
11701 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrord", di_ftype_di_di, ARM_BUILTIN_WRORD);
11702 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WRORHI);
11703 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorwi", v2si_ftype_v2si_int, ARM_BUILTIN_WRORWI);
11704 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrordi", di_ftype_di_int, ARM_BUILTIN_WRORDI);
11706 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wshufh", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSHUFH);
11708 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadb", v2si_ftype_v8qi_v8qi, ARM_BUILTIN_WSADB);
11709 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadh", v2si_ftype_v4hi_v4hi, ARM_BUILTIN_WSADH);
11710 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadbz", v2si_ftype_v8qi_v8qi, ARM_BUILTIN_WSADBZ);
11711 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadhz", v2si_ftype_v4hi_v4hi, ARM_BUILTIN_WSADHZ);
11713 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsb", int_ftype_v8qi_int, ARM_BUILTIN_TEXTRMSB);
11714 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsh", int_ftype_v4hi_int, ARM_BUILTIN_TEXTRMSH);
11715 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsw", int_ftype_v2si_int, ARM_BUILTIN_TEXTRMSW);
11716 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmub", int_ftype_v8qi_int, ARM_BUILTIN_TEXTRMUB);
11717 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmuh", int_ftype_v4hi_int, ARM_BUILTIN_TEXTRMUH);
11718 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmuw", int_ftype_v2si_int, ARM_BUILTIN_TEXTRMUW);
11719 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrb", v8qi_ftype_v8qi_int_int, ARM_BUILTIN_TINSRB);
11720 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrh", v4hi_ftype_v4hi_int_int, ARM_BUILTIN_TINSRH);
11721 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrw", v2si_ftype_v2si_int_int, ARM_BUILTIN_TINSRW);
11723 def_mbuiltin (FL_IWMMXT, "__builtin_arm_waccb", di_ftype_v8qi, ARM_BUILTIN_WACCB);
11724 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wacch", di_ftype_v4hi, ARM_BUILTIN_WACCH);
11725 def_mbuiltin (FL_IWMMXT, "__builtin_arm_waccw", di_ftype_v2si, ARM_BUILTIN_WACCW);
11727 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskb", int_ftype_v8qi, ARM_BUILTIN_TMOVMSKB);
11728 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskh", int_ftype_v4hi, ARM_BUILTIN_TMOVMSKH);
11729 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskw", int_ftype_v2si, ARM_BUILTIN_TMOVMSKW);
11731 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackhss", v8qi_ftype_v4hi_v4hi, ARM_BUILTIN_WPACKHSS);
11732 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackhus", v8qi_ftype_v4hi_v4hi, ARM_BUILTIN_WPACKHUS);
11733 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackwus", v4hi_ftype_v2si_v2si, ARM_BUILTIN_WPACKWUS);
11734 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackwss", v4hi_ftype_v2si_v2si, ARM_BUILTIN_WPACKWSS);
11735 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackdus", v2si_ftype_di_di, ARM_BUILTIN_WPACKDUS);
11736 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackdss", v2si_ftype_di_di, ARM_BUILTIN_WPACKDSS);
11738 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehub", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKEHUB);
11739 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehuh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKEHUH);
11740 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehuw", di_ftype_v2si, ARM_BUILTIN_WUNPCKEHUW);
11741 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsb", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKEHSB);
11742 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKEHSH);
11743 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsw", di_ftype_v2si, ARM_BUILTIN_WUNPCKEHSW);
11744 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelub", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKELUB);
11745 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckeluh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKELUH);
11746 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckeluw", di_ftype_v2si, ARM_BUILTIN_WUNPCKELUW);
11747 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsb", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKELSB);
11748 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKELSH);
11749 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsw", di_ftype_v2si, ARM_BUILTIN_WUNPCKELSW);
11751 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacs", di_ftype_di_v4hi_v4hi, ARM_BUILTIN_WMACS);
11752 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacsz", di_ftype_v4hi_v4hi, ARM_BUILTIN_WMACSZ);
11753 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacu", di_ftype_di_v4hi_v4hi, ARM_BUILTIN_WMACU);
11754 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacuz", di_ftype_v4hi_v4hi, ARM_BUILTIN_WMACUZ);
11756 def_mbuiltin (FL_IWMMXT, "__builtin_arm_walign", v8qi_ftype_v8qi_v8qi_int, ARM_BUILTIN_WALIGN);
11757 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmia", di_ftype_di_int_int, ARM_BUILTIN_TMIA);
11758 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiaph", di_ftype_di_int_int, ARM_BUILTIN_TMIAPH);
11759 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiabb", di_ftype_di_int_int, ARM_BUILTIN_TMIABB);
11760 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiabt", di_ftype_di_int_int, ARM_BUILTIN_TMIABT);
11761 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiatb", di_ftype_di_int_int, ARM_BUILTIN_TMIATB);
11762 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiatt", di_ftype_di_int_int, ARM_BUILTIN_TMIATT);
11765 static void
11766 arm_init_builtins (void)
11768 if (TARGET_REALLY_IWMMXT)
11769 arm_init_iwmmxt_builtins ();
11772 /* Errors in the source file can cause expand_expr to return const0_rtx
11773 where we expect a vector. To avoid crashing, use one of the vector
11774 clear instructions. */
11776 static rtx
11777 safe_vector_operand (rtx x, enum machine_mode mode)
11779 if (x != const0_rtx)
11780 return x;
11781 x = gen_reg_rtx (mode);
11783 emit_insn (gen_iwmmxt_clrdi (mode == DImode ? x
11784 : gen_rtx_SUBREG (DImode, x, 0)));
11785 return x;
11788 /* Subroutine of arm_expand_builtin to take care of binop insns. */
11790 static rtx
11791 arm_expand_binop_builtin (enum insn_code icode,
11792 tree arglist, rtx target)
11794 rtx pat;
11795 tree arg0 = TREE_VALUE (arglist);
11796 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
11797 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
11798 rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
11799 enum machine_mode tmode = insn_data[icode].operand[0].mode;
11800 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
11801 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
11803 if (VECTOR_MODE_P (mode0))
11804 op0 = safe_vector_operand (op0, mode0);
11805 if (VECTOR_MODE_P (mode1))
11806 op1 = safe_vector_operand (op1, mode1);
11808 if (! target
11809 || GET_MODE (target) != tmode
11810 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
11811 target = gen_reg_rtx (tmode);
11813 /* In case the insn wants input operands in modes different from
11814 the result, abort. */
11815 if (GET_MODE (op0) != mode0 || GET_MODE (op1) != mode1)
11816 abort ();
11818 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
11819 op0 = copy_to_mode_reg (mode0, op0);
11820 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
11821 op1 = copy_to_mode_reg (mode1, op1);
11823 pat = GEN_FCN (icode) (target, op0, op1);
11824 if (! pat)
11825 return 0;
11826 emit_insn (pat);
11827 return target;
11830 /* Subroutine of arm_expand_builtin to take care of unop insns. */
11832 static rtx
11833 arm_expand_unop_builtin (enum insn_code icode,
11834 tree arglist, rtx target, int do_load)
11836 rtx pat;
11837 tree arg0 = TREE_VALUE (arglist);
11838 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
11839 enum machine_mode tmode = insn_data[icode].operand[0].mode;
11840 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
11842 if (! target
11843 || GET_MODE (target) != tmode
11844 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
11845 target = gen_reg_rtx (tmode);
11846 if (do_load)
11847 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
11848 else
11850 if (VECTOR_MODE_P (mode0))
11851 op0 = safe_vector_operand (op0, mode0);
11853 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
11854 op0 = copy_to_mode_reg (mode0, op0);
11857 pat = GEN_FCN (icode) (target, op0);
11858 if (! pat)
11859 return 0;
11860 emit_insn (pat);
11861 return target;
11864 /* Expand an expression EXP that calls a built-in function,
11865 with result going to TARGET if that's convenient
11866 (and in mode MODE if that's convenient).
11867 SUBTARGET may be used as the target for computing one of EXP's operands.
11868 IGNORE is nonzero if the value is to be ignored. */
11870 static rtx
11871 arm_expand_builtin (tree exp,
11872 rtx target,
11873 rtx subtarget ATTRIBUTE_UNUSED,
11874 enum machine_mode mode ATTRIBUTE_UNUSED,
11875 int ignore ATTRIBUTE_UNUSED)
11877 const struct builtin_description * d;
11878 enum insn_code icode;
11879 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
11880 tree arglist = TREE_OPERAND (exp, 1);
11881 tree arg0;
11882 tree arg1;
11883 tree arg2;
11884 rtx op0;
11885 rtx op1;
11886 rtx op2;
11887 rtx pat;
11888 int fcode = DECL_FUNCTION_CODE (fndecl);
11889 size_t i;
11890 enum machine_mode tmode;
11891 enum machine_mode mode0;
11892 enum machine_mode mode1;
11893 enum machine_mode mode2;
11895 switch (fcode)
11897 case ARM_BUILTIN_TEXTRMSB:
11898 case ARM_BUILTIN_TEXTRMUB:
11899 case ARM_BUILTIN_TEXTRMSH:
11900 case ARM_BUILTIN_TEXTRMUH:
11901 case ARM_BUILTIN_TEXTRMSW:
11902 case ARM_BUILTIN_TEXTRMUW:
11903 icode = (fcode == ARM_BUILTIN_TEXTRMSB ? CODE_FOR_iwmmxt_textrmsb
11904 : fcode == ARM_BUILTIN_TEXTRMUB ? CODE_FOR_iwmmxt_textrmub
11905 : fcode == ARM_BUILTIN_TEXTRMSH ? CODE_FOR_iwmmxt_textrmsh
11906 : fcode == ARM_BUILTIN_TEXTRMUH ? CODE_FOR_iwmmxt_textrmuh
11907 : CODE_FOR_iwmmxt_textrmw);
11909 arg0 = TREE_VALUE (arglist);
11910 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
11911 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
11912 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
11913 tmode = insn_data[icode].operand[0].mode;
11914 mode0 = insn_data[icode].operand[1].mode;
11915 mode1 = insn_data[icode].operand[2].mode;
11917 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
11918 op0 = copy_to_mode_reg (mode0, op0);
11919 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
11921 /* @@@ better error message */
11922 error ("selector must be an immediate");
11923 return gen_reg_rtx (tmode);
11925 if (target == 0
11926 || GET_MODE (target) != tmode
11927 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
11928 target = gen_reg_rtx (tmode);
11929 pat = GEN_FCN (icode) (target, op0, op1);
11930 if (! pat)
11931 return 0;
11932 emit_insn (pat);
11933 return target;
11935 case ARM_BUILTIN_TINSRB:
11936 case ARM_BUILTIN_TINSRH:
11937 case ARM_BUILTIN_TINSRW:
11938 icode = (fcode == ARM_BUILTIN_TINSRB ? CODE_FOR_iwmmxt_tinsrb
11939 : fcode == ARM_BUILTIN_TINSRH ? CODE_FOR_iwmmxt_tinsrh
11940 : CODE_FOR_iwmmxt_tinsrw);
11941 arg0 = TREE_VALUE (arglist);
11942 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
11943 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
11944 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
11945 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
11946 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
11947 tmode = insn_data[icode].operand[0].mode;
11948 mode0 = insn_data[icode].operand[1].mode;
11949 mode1 = insn_data[icode].operand[2].mode;
11950 mode2 = insn_data[icode].operand[3].mode;
11952 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
11953 op0 = copy_to_mode_reg (mode0, op0);
11954 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
11955 op1 = copy_to_mode_reg (mode1, op1);
11956 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
11958 /* @@@ better error message */
11959 error ("selector must be an immediate");
11960 return const0_rtx;
11962 if (target == 0
11963 || GET_MODE (target) != tmode
11964 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
11965 target = gen_reg_rtx (tmode);
11966 pat = GEN_FCN (icode) (target, op0, op1, op2);
11967 if (! pat)
11968 return 0;
11969 emit_insn (pat);
11970 return target;
11972 case ARM_BUILTIN_SETWCX:
11973 arg0 = TREE_VALUE (arglist);
11974 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
11975 op0 = force_reg (SImode, expand_expr (arg0, NULL_RTX, VOIDmode, 0));
11976 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
11977 emit_insn (gen_iwmmxt_tmcr (op1, op0));
11978 return 0;
11980 case ARM_BUILTIN_GETWCX:
11981 arg0 = TREE_VALUE (arglist);
11982 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
11983 target = gen_reg_rtx (SImode);
11984 emit_insn (gen_iwmmxt_tmrc (target, op0));
11985 return target;
11987 case ARM_BUILTIN_WSHUFH:
11988 icode = CODE_FOR_iwmmxt_wshufh;
11989 arg0 = TREE_VALUE (arglist);
11990 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
11991 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
11992 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
11993 tmode = insn_data[icode].operand[0].mode;
11994 mode1 = insn_data[icode].operand[1].mode;
11995 mode2 = insn_data[icode].operand[2].mode;
11997 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
11998 op0 = copy_to_mode_reg (mode1, op0);
11999 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
12001 /* @@@ better error message */
12002 error ("mask must be an immediate");
12003 return const0_rtx;
12005 if (target == 0
12006 || GET_MODE (target) != tmode
12007 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12008 target = gen_reg_rtx (tmode);
12009 pat = GEN_FCN (icode) (target, op0, op1);
12010 if (! pat)
12011 return 0;
12012 emit_insn (pat);
12013 return target;
12015 case ARM_BUILTIN_WSADB:
12016 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadb, arglist, target);
12017 case ARM_BUILTIN_WSADH:
12018 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadh, arglist, target);
12019 case ARM_BUILTIN_WSADBZ:
12020 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadbz, arglist, target);
12021 case ARM_BUILTIN_WSADHZ:
12022 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadhz, arglist, target);
12024 /* Several three-argument builtins. */
12025 case ARM_BUILTIN_WMACS:
12026 case ARM_BUILTIN_WMACU:
12027 case ARM_BUILTIN_WALIGN:
12028 case ARM_BUILTIN_TMIA:
12029 case ARM_BUILTIN_TMIAPH:
12030 case ARM_BUILTIN_TMIATT:
12031 case ARM_BUILTIN_TMIATB:
12032 case ARM_BUILTIN_TMIABT:
12033 case ARM_BUILTIN_TMIABB:
12034 icode = (fcode == ARM_BUILTIN_WMACS ? CODE_FOR_iwmmxt_wmacs
12035 : fcode == ARM_BUILTIN_WMACU ? CODE_FOR_iwmmxt_wmacu
12036 : fcode == ARM_BUILTIN_TMIA ? CODE_FOR_iwmmxt_tmia
12037 : fcode == ARM_BUILTIN_TMIAPH ? CODE_FOR_iwmmxt_tmiaph
12038 : fcode == ARM_BUILTIN_TMIABB ? CODE_FOR_iwmmxt_tmiabb
12039 : fcode == ARM_BUILTIN_TMIABT ? CODE_FOR_iwmmxt_tmiabt
12040 : fcode == ARM_BUILTIN_TMIATB ? CODE_FOR_iwmmxt_tmiatb
12041 : fcode == ARM_BUILTIN_TMIATT ? CODE_FOR_iwmmxt_tmiatt
12042 : CODE_FOR_iwmmxt_walign);
12043 arg0 = TREE_VALUE (arglist);
12044 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12045 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
12046 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12047 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12048 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
12049 tmode = insn_data[icode].operand[0].mode;
12050 mode0 = insn_data[icode].operand[1].mode;
12051 mode1 = insn_data[icode].operand[2].mode;
12052 mode2 = insn_data[icode].operand[3].mode;
12054 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12055 op0 = copy_to_mode_reg (mode0, op0);
12056 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12057 op1 = copy_to_mode_reg (mode1, op1);
12058 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
12059 op2 = copy_to_mode_reg (mode2, op2);
12060 if (target == 0
12061 || GET_MODE (target) != tmode
12062 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12063 target = gen_reg_rtx (tmode);
12064 pat = GEN_FCN (icode) (target, op0, op1, op2);
12065 if (! pat)
12066 return 0;
12067 emit_insn (pat);
12068 return target;
12070 case ARM_BUILTIN_WZERO:
12071 target = gen_reg_rtx (DImode);
12072 emit_insn (gen_iwmmxt_clrdi (target));
12073 return target;
12075 default:
12076 break;
12079 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
12080 if (d->code == (const enum arm_builtins) fcode)
12081 return arm_expand_binop_builtin (d->icode, arglist, target);
12083 for (i = 0, d = bdesc_1arg; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
12084 if (d->code == (const enum arm_builtins) fcode)
12085 return arm_expand_unop_builtin (d->icode, arglist, target, 0);
12087 /* @@@ Should really do something sensible here. */
12088 return NULL_RTX;
12091 /* Recursively search through all of the blocks in a function
12092 checking to see if any of the variables created in that
12093 function match the RTX called 'orig'. If they do then
12094 replace them with the RTX called 'new'. */
12095 static void
12096 replace_symbols_in_block (tree block, rtx orig, rtx new)
12098 for (; block; block = BLOCK_CHAIN (block))
12100 tree sym;
12102 if (!TREE_USED (block))
12103 continue;
12105 for (sym = BLOCK_VARS (block); sym; sym = TREE_CHAIN (sym))
12107 if ( (DECL_NAME (sym) == 0 && TREE_CODE (sym) != TYPE_DECL)
12108 || DECL_IGNORED_P (sym)
12109 || TREE_CODE (sym) != VAR_DECL
12110 || DECL_EXTERNAL (sym)
12111 || !rtx_equal_p (DECL_RTL (sym), orig)
12113 continue;
12115 SET_DECL_RTL (sym, new);
12118 replace_symbols_in_block (BLOCK_SUBBLOCKS (block), orig, new);
12122 /* Return the number (counting from 0) of
12123 the least significant set bit in MASK. */
12125 inline static int
12126 number_of_first_bit_set (int mask)
12128 int bit;
12130 for (bit = 0;
12131 (mask & (1 << bit)) == 0;
12132 ++bit)
12133 continue;
12135 return bit;
12138 /* Generate code to return from a thumb function.
12139 If 'reg_containing_return_addr' is -1, then the return address is
12140 actually on the stack, at the stack pointer. */
12141 static void
12142 thumb_exit (FILE *f, int reg_containing_return_addr)
12144 unsigned regs_available_for_popping;
12145 unsigned regs_to_pop;
12146 int pops_needed;
12147 unsigned available;
12148 unsigned required;
12149 int mode;
12150 int size;
12151 int restore_a4 = FALSE;
12153 /* Compute the registers we need to pop. */
12154 regs_to_pop = 0;
12155 pops_needed = 0;
12157 if (reg_containing_return_addr == -1)
12159 regs_to_pop |= 1 << LR_REGNUM;
12160 ++pops_needed;
12163 if (TARGET_BACKTRACE)
12165 /* Restore the (ARM) frame pointer and stack pointer. */
12166 regs_to_pop |= (1 << ARM_HARD_FRAME_POINTER_REGNUM) | (1 << SP_REGNUM);
12167 pops_needed += 2;
12170 /* If there is nothing to pop then just emit the BX instruction and
12171 return. */
12172 if (pops_needed == 0)
12174 if (current_function_calls_eh_return)
12175 asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, ARM_EH_STACKADJ_REGNUM);
12177 asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
12178 return;
12180 /* Otherwise if we are not supporting interworking and we have not created
12181 a backtrace structure and the function was not entered in ARM mode then
12182 just pop the return address straight into the PC. */
12183 else if (!TARGET_INTERWORK
12184 && !TARGET_BACKTRACE
12185 && !is_called_in_ARM_mode (current_function_decl)
12186 && !current_function_calls_eh_return)
12188 asm_fprintf (f, "\tpop\t{%r}\n", PC_REGNUM);
12189 return;
12192 /* Find out how many of the (return) argument registers we can corrupt. */
12193 regs_available_for_popping = 0;
12195 /* If returning via __builtin_eh_return, the bottom three registers
12196 all contain information needed for the return. */
12197 if (current_function_calls_eh_return)
12198 size = 12;
12199 else
12201 /* If we can deduce the registers used from the function's
12202 return value. This is more reliable that examining
12203 regs_ever_live[] because that will be set if the register is
12204 ever used in the function, not just if the register is used
12205 to hold a return value. */
12207 if (current_function_return_rtx != 0)
12208 mode = GET_MODE (current_function_return_rtx);
12209 else
12210 mode = DECL_MODE (DECL_RESULT (current_function_decl));
12212 size = GET_MODE_SIZE (mode);
12214 if (size == 0)
12216 /* In a void function we can use any argument register.
12217 In a function that returns a structure on the stack
12218 we can use the second and third argument registers. */
12219 if (mode == VOIDmode)
12220 regs_available_for_popping =
12221 (1 << ARG_REGISTER (1))
12222 | (1 << ARG_REGISTER (2))
12223 | (1 << ARG_REGISTER (3));
12224 else
12225 regs_available_for_popping =
12226 (1 << ARG_REGISTER (2))
12227 | (1 << ARG_REGISTER (3));
12229 else if (size <= 4)
12230 regs_available_for_popping =
12231 (1 << ARG_REGISTER (2))
12232 | (1 << ARG_REGISTER (3));
12233 else if (size <= 8)
12234 regs_available_for_popping =
12235 (1 << ARG_REGISTER (3));
12238 /* Match registers to be popped with registers into which we pop them. */
12239 for (available = regs_available_for_popping,
12240 required = regs_to_pop;
12241 required != 0 && available != 0;
12242 available &= ~(available & - available),
12243 required &= ~(required & - required))
12244 -- pops_needed;
12246 /* If we have any popping registers left over, remove them. */
12247 if (available > 0)
12248 regs_available_for_popping &= ~available;
12250 /* Otherwise if we need another popping register we can use
12251 the fourth argument register. */
12252 else if (pops_needed)
12254 /* If we have not found any free argument registers and
12255 reg a4 contains the return address, we must move it. */
12256 if (regs_available_for_popping == 0
12257 && reg_containing_return_addr == LAST_ARG_REGNUM)
12259 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM, LAST_ARG_REGNUM);
12260 reg_containing_return_addr = LR_REGNUM;
12262 else if (size > 12)
12264 /* Register a4 is being used to hold part of the return value,
12265 but we have dire need of a free, low register. */
12266 restore_a4 = TRUE;
12268 asm_fprintf (f, "\tmov\t%r, %r\n",IP_REGNUM, LAST_ARG_REGNUM);
12271 if (reg_containing_return_addr != LAST_ARG_REGNUM)
12273 /* The fourth argument register is available. */
12274 regs_available_for_popping |= 1 << LAST_ARG_REGNUM;
12276 --pops_needed;
12280 /* Pop as many registers as we can. */
12281 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
12282 regs_available_for_popping);
12284 /* Process the registers we popped. */
12285 if (reg_containing_return_addr == -1)
12287 /* The return address was popped into the lowest numbered register. */
12288 regs_to_pop &= ~(1 << LR_REGNUM);
12290 reg_containing_return_addr =
12291 number_of_first_bit_set (regs_available_for_popping);
12293 /* Remove this register for the mask of available registers, so that
12294 the return address will not be corrupted by further pops. */
12295 regs_available_for_popping &= ~(1 << reg_containing_return_addr);
12298 /* If we popped other registers then handle them here. */
12299 if (regs_available_for_popping)
12301 int frame_pointer;
12303 /* Work out which register currently contains the frame pointer. */
12304 frame_pointer = number_of_first_bit_set (regs_available_for_popping);
12306 /* Move it into the correct place. */
12307 asm_fprintf (f, "\tmov\t%r, %r\n",
12308 ARM_HARD_FRAME_POINTER_REGNUM, frame_pointer);
12310 /* (Temporarily) remove it from the mask of popped registers. */
12311 regs_available_for_popping &= ~(1 << frame_pointer);
12312 regs_to_pop &= ~(1 << ARM_HARD_FRAME_POINTER_REGNUM);
12314 if (regs_available_for_popping)
12316 int stack_pointer;
12318 /* We popped the stack pointer as well,
12319 find the register that contains it. */
12320 stack_pointer = number_of_first_bit_set (regs_available_for_popping);
12322 /* Move it into the stack register. */
12323 asm_fprintf (f, "\tmov\t%r, %r\n", SP_REGNUM, stack_pointer);
12325 /* At this point we have popped all necessary registers, so
12326 do not worry about restoring regs_available_for_popping
12327 to its correct value:
12329 assert (pops_needed == 0)
12330 assert (regs_available_for_popping == (1 << frame_pointer))
12331 assert (regs_to_pop == (1 << STACK_POINTER)) */
12333 else
12335 /* Since we have just move the popped value into the frame
12336 pointer, the popping register is available for reuse, and
12337 we know that we still have the stack pointer left to pop. */
12338 regs_available_for_popping |= (1 << frame_pointer);
12342 /* If we still have registers left on the stack, but we no longer have
12343 any registers into which we can pop them, then we must move the return
12344 address into the link register and make available the register that
12345 contained it. */
12346 if (regs_available_for_popping == 0 && pops_needed > 0)
12348 regs_available_for_popping |= 1 << reg_containing_return_addr;
12350 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM,
12351 reg_containing_return_addr);
12353 reg_containing_return_addr = LR_REGNUM;
12356 /* If we have registers left on the stack then pop some more.
12357 We know that at most we will want to pop FP and SP. */
12358 if (pops_needed > 0)
12360 int popped_into;
12361 int move_to;
12363 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
12364 regs_available_for_popping);
12366 /* We have popped either FP or SP.
12367 Move whichever one it is into the correct register. */
12368 popped_into = number_of_first_bit_set (regs_available_for_popping);
12369 move_to = number_of_first_bit_set (regs_to_pop);
12371 asm_fprintf (f, "\tmov\t%r, %r\n", move_to, popped_into);
12373 regs_to_pop &= ~(1 << move_to);
12375 --pops_needed;
12378 /* If we still have not popped everything then we must have only
12379 had one register available to us and we are now popping the SP. */
12380 if (pops_needed > 0)
12382 int popped_into;
12384 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
12385 regs_available_for_popping);
12387 popped_into = number_of_first_bit_set (regs_available_for_popping);
12389 asm_fprintf (f, "\tmov\t%r, %r\n", SP_REGNUM, popped_into);
12391 assert (regs_to_pop == (1 << STACK_POINTER))
12392 assert (pops_needed == 1)
12396 /* If necessary restore the a4 register. */
12397 if (restore_a4)
12399 if (reg_containing_return_addr != LR_REGNUM)
12401 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM, LAST_ARG_REGNUM);
12402 reg_containing_return_addr = LR_REGNUM;
12405 asm_fprintf (f, "\tmov\t%r, %r\n", LAST_ARG_REGNUM, IP_REGNUM);
12408 if (current_function_calls_eh_return)
12409 asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, ARM_EH_STACKADJ_REGNUM);
12411 /* Return to caller. */
12412 asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
12415 /* Emit code to push or pop registers to or from the stack. F is the
12416 assembly file. MASK is the registers to push or pop. PUSH is
12417 nonzero if we should push, and zero if we should pop. For debugging
12418 output, if pushing, adjust CFA_OFFSET by the amount of space added
12419 to the stack. REAL_REGS should have the same number of bits set as
12420 MASK, and will be used instead (in the same order) to describe which
12421 registers were saved - this is used to mark the save slots when we
12422 push high registers after moving them to low registers. */
12423 static void
12424 thumb_pushpop (FILE *f, int mask, int push, int *cfa_offset, int real_regs)
12426 int regno;
12427 int lo_mask = mask & 0xFF;
12428 int pushed_words = 0;
12430 if (lo_mask == 0 && !push && (mask & (1 << PC_REGNUM)))
12432 /* Special case. Do not generate a POP PC statement here, do it in
12433 thumb_exit() */
12434 thumb_exit (f, -1);
12435 return;
12438 fprintf (f, "\t%s\t{", push ? "push" : "pop");
12440 /* Look at the low registers first. */
12441 for (regno = 0; regno <= LAST_LO_REGNUM; regno++, lo_mask >>= 1)
12443 if (lo_mask & 1)
12445 asm_fprintf (f, "%r", regno);
12447 if ((lo_mask & ~1) != 0)
12448 fprintf (f, ", ");
12450 pushed_words++;
12454 if (push && (mask & (1 << LR_REGNUM)))
12456 /* Catch pushing the LR. */
12457 if (mask & 0xFF)
12458 fprintf (f, ", ");
12460 asm_fprintf (f, "%r", LR_REGNUM);
12462 pushed_words++;
12464 else if (!push && (mask & (1 << PC_REGNUM)))
12466 /* Catch popping the PC. */
12467 if (TARGET_INTERWORK || TARGET_BACKTRACE
12468 || current_function_calls_eh_return)
12470 /* The PC is never poped directly, instead
12471 it is popped into r3 and then BX is used. */
12472 fprintf (f, "}\n");
12474 thumb_exit (f, -1);
12476 return;
12478 else
12480 if (mask & 0xFF)
12481 fprintf (f, ", ");
12483 asm_fprintf (f, "%r", PC_REGNUM);
12487 fprintf (f, "}\n");
12489 if (push && pushed_words && dwarf2out_do_frame ())
12491 char *l = dwarf2out_cfi_label ();
12492 int pushed_mask = real_regs;
12494 *cfa_offset += pushed_words * 4;
12495 dwarf2out_def_cfa (l, SP_REGNUM, *cfa_offset);
12497 pushed_words = 0;
12498 pushed_mask = real_regs;
12499 for (regno = 0; regno <= 14; regno++, pushed_mask >>= 1)
12501 if (pushed_mask & 1)
12502 dwarf2out_reg_save (l, regno, 4 * pushed_words++ - *cfa_offset);
12507 void
12508 thumb_final_prescan_insn (rtx insn)
12510 if (flag_print_asm_name)
12511 asm_fprintf (asm_out_file, "%@ 0x%04x\n",
12512 INSN_ADDRESSES (INSN_UID (insn)));
12516 thumb_shiftable_const (unsigned HOST_WIDE_INT val)
12518 unsigned HOST_WIDE_INT mask = 0xff;
12519 int i;
12521 if (val == 0) /* XXX */
12522 return 0;
12524 for (i = 0; i < 25; i++)
12525 if ((val & (mask << i)) == val)
12526 return 1;
12528 return 0;
12531 /* Returns nonzero if the current function contains,
12532 or might contain a far jump. */
12533 static int
12534 thumb_far_jump_used_p (void)
12536 rtx insn;
12538 /* This test is only important for leaf functions. */
12539 /* assert (!leaf_function_p ()); */
12541 /* If we have already decided that far jumps may be used,
12542 do not bother checking again, and always return true even if
12543 it turns out that they are not being used. Once we have made
12544 the decision that far jumps are present (and that hence the link
12545 register will be pushed onto the stack) we cannot go back on it. */
12546 if (cfun->machine->far_jump_used)
12547 return 1;
12549 /* If this function is not being called from the prologue/epilogue
12550 generation code then it must be being called from the
12551 INITIAL_ELIMINATION_OFFSET macro. */
12552 if (!(ARM_DOUBLEWORD_ALIGN || reload_completed))
12554 /* In this case we know that we are being asked about the elimination
12555 of the arg pointer register. If that register is not being used,
12556 then there are no arguments on the stack, and we do not have to
12557 worry that a far jump might force the prologue to push the link
12558 register, changing the stack offsets. In this case we can just
12559 return false, since the presence of far jumps in the function will
12560 not affect stack offsets.
12562 If the arg pointer is live (or if it was live, but has now been
12563 eliminated and so set to dead) then we do have to test to see if
12564 the function might contain a far jump. This test can lead to some
12565 false negatives, since before reload is completed, then length of
12566 branch instructions is not known, so gcc defaults to returning their
12567 longest length, which in turn sets the far jump attribute to true.
12569 A false negative will not result in bad code being generated, but it
12570 will result in a needless push and pop of the link register. We
12571 hope that this does not occur too often.
12573 If we need doubleword stack alignment this could affect the other
12574 elimination offsets so we can't risk getting it wrong. */
12575 if (regs_ever_live [ARG_POINTER_REGNUM])
12576 cfun->machine->arg_pointer_live = 1;
12577 else if (!cfun->machine->arg_pointer_live)
12578 return 0;
12581 /* Check to see if the function contains a branch
12582 insn with the far jump attribute set. */
12583 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
12585 if (GET_CODE (insn) == JUMP_INSN
12586 /* Ignore tablejump patterns. */
12587 && GET_CODE (PATTERN (insn)) != ADDR_VEC
12588 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC
12589 && get_attr_far_jump (insn) == FAR_JUMP_YES
12592 /* Record the fact that we have decided that
12593 the function does use far jumps. */
12594 cfun->machine->far_jump_used = 1;
12595 return 1;
12599 return 0;
12602 /* Return nonzero if FUNC must be entered in ARM mode. */
12604 is_called_in_ARM_mode (tree func)
12606 if (TREE_CODE (func) != FUNCTION_DECL)
12607 abort ();
12609 /* Ignore the problem about functions whoes address is taken. */
12610 if (TARGET_CALLEE_INTERWORKING && TREE_PUBLIC (func))
12611 return TRUE;
12613 #ifdef ARM_PE
12614 return lookup_attribute ("interfacearm", DECL_ATTRIBUTES (func)) != NULL_TREE;
12615 #else
12616 return FALSE;
12617 #endif
12620 /* The bits which aren't usefully expanded as rtl. */
12621 const char *
12622 thumb_unexpanded_epilogue (void)
12624 int regno;
12625 int live_regs_mask = 0;
12626 int high_regs_pushed = 0;
12627 int had_to_push_lr;
12628 int size;
12629 int mode;
12631 if (return_used_this_function)
12632 return "";
12634 if (IS_NAKED (arm_current_func_type ()))
12635 return "";
12637 live_regs_mask = thumb_compute_save_reg_mask ();
12638 high_regs_pushed = bit_count (live_regs_mask & 0x0f00);
12640 /* If we can deduce the registers used from the function's return value.
12641 This is more reliable that examining regs_ever_live[] because that
12642 will be set if the register is ever used in the function, not just if
12643 the register is used to hold a return value. */
12645 if (current_function_return_rtx != 0)
12646 mode = GET_MODE (current_function_return_rtx);
12647 else
12648 mode = DECL_MODE (DECL_RESULT (current_function_decl));
12650 size = GET_MODE_SIZE (mode);
12652 /* The prolog may have pushed some high registers to use as
12653 work registers. e.g. the testsuite file:
12654 gcc/testsuite/gcc/gcc.c-torture/execute/complex-2.c
12655 compiles to produce:
12656 push {r4, r5, r6, r7, lr}
12657 mov r7, r9
12658 mov r6, r8
12659 push {r6, r7}
12660 as part of the prolog. We have to undo that pushing here. */
12662 if (high_regs_pushed)
12664 int mask = live_regs_mask & 0xff;
12665 int next_hi_reg;
12667 /* The available low registers depend on the size of the value we are
12668 returning. */
12669 if (size <= 12)
12670 mask |= 1 << 3;
12671 if (size <= 8)
12672 mask |= 1 << 2;
12674 if (mask == 0)
12675 /* Oh dear! We have no low registers into which we can pop
12676 high registers! */
12677 internal_error
12678 ("no low registers available for popping high registers");
12680 for (next_hi_reg = 8; next_hi_reg < 13; next_hi_reg++)
12681 if (live_regs_mask & (1 << next_hi_reg))
12682 break;
12684 while (high_regs_pushed)
12686 /* Find lo register(s) into which the high register(s) can
12687 be popped. */
12688 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
12690 if (mask & (1 << regno))
12691 high_regs_pushed--;
12692 if (high_regs_pushed == 0)
12693 break;
12696 mask &= (2 << regno) - 1; /* A noop if regno == 8 */
12698 /* Pop the values into the low register(s). */
12699 thumb_pushpop (asm_out_file, mask, 0, NULL, mask);
12701 /* Move the value(s) into the high registers. */
12702 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
12704 if (mask & (1 << regno))
12706 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", next_hi_reg,
12707 regno);
12709 for (next_hi_reg++; next_hi_reg < 13; next_hi_reg++)
12710 if (live_regs_mask & (1 << next_hi_reg))
12711 break;
12715 live_regs_mask &= ~0x0f00;
12718 had_to_push_lr = (live_regs_mask & (1 << LR_REGNUM)) != 0;
12719 live_regs_mask &= 0xff;
12721 if (current_function_pretend_args_size == 0 || TARGET_BACKTRACE)
12723 /* Pop the return address into the PC. */
12724 if (had_to_push_lr)
12725 live_regs_mask |= 1 << PC_REGNUM;
12727 /* Either no argument registers were pushed or a backtrace
12728 structure was created which includes an adjusted stack
12729 pointer, so just pop everything. */
12730 if (live_regs_mask)
12731 thumb_pushpop (asm_out_file, live_regs_mask, FALSE, NULL,
12732 live_regs_mask);
12734 /* We have either just popped the return address into the
12735 PC or it is was kept in LR for the entire function. */
12736 if (!had_to_push_lr)
12737 thumb_exit (asm_out_file, LR_REGNUM);
12739 else
12741 /* Pop everything but the return address. */
12742 if (live_regs_mask)
12743 thumb_pushpop (asm_out_file, live_regs_mask, FALSE, NULL,
12744 live_regs_mask);
12746 if (had_to_push_lr)
12748 if (size > 12)
12750 /* We have no free low regs, so save one. */
12751 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", IP_REGNUM,
12752 LAST_ARG_REGNUM);
12755 /* Get the return address into a temporary register. */
12756 thumb_pushpop (asm_out_file, 1 << LAST_ARG_REGNUM, 0, NULL,
12757 1 << LAST_ARG_REGNUM);
12759 if (size > 12)
12761 /* Move the return address to lr. */
12762 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", LR_REGNUM,
12763 LAST_ARG_REGNUM);
12764 /* Restore the low register. */
12765 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", LAST_ARG_REGNUM,
12766 IP_REGNUM);
12767 regno = LR_REGNUM;
12769 else
12770 regno = LAST_ARG_REGNUM;
12772 else
12773 regno = LR_REGNUM;
12775 /* Remove the argument registers that were pushed onto the stack. */
12776 asm_fprintf (asm_out_file, "\tadd\t%r, %r, #%d\n",
12777 SP_REGNUM, SP_REGNUM,
12778 current_function_pretend_args_size);
12780 thumb_exit (asm_out_file, regno);
12783 return "";
12786 /* Functions to save and restore machine-specific function data. */
12787 static struct machine_function *
12788 arm_init_machine_status (void)
12790 struct machine_function *machine;
12791 machine = (machine_function *) ggc_alloc_cleared (sizeof (machine_function));
12793 #if ARM_FT_UNKNOWN != 0
12794 machine->func_type = ARM_FT_UNKNOWN;
12795 #endif
12796 return machine;
12799 /* Return an RTX indicating where the return address to the
12800 calling function can be found. */
12802 arm_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
12804 if (count != 0)
12805 return NULL_RTX;
12807 return get_hard_reg_initial_val (Pmode, LR_REGNUM);
12810 /* Do anything needed before RTL is emitted for each function. */
12811 void
12812 arm_init_expanders (void)
12814 /* Arrange to initialize and mark the machine per-function status. */
12815 init_machine_status = arm_init_machine_status;
12817 /* This is to stop the combine pass optimizing away the alignment
12818 adjustment of va_arg. */
12819 /* ??? It is claimed that this should not be necessary. */
12820 if (cfun)
12821 mark_reg_pointer (arg_pointer_rtx, PARM_BOUNDARY);
12825 /* Like arm_compute_initial_elimination offset. Simpler because
12826 THUMB_HARD_FRAME_POINTER isn't actually the ABI specified frame pointer. */
12828 HOST_WIDE_INT
12829 thumb_compute_initial_elimination_offset (unsigned int from, unsigned int to)
12831 arm_stack_offsets *offsets;
12833 offsets = arm_get_frame_offsets ();
12835 switch (from)
12837 case ARG_POINTER_REGNUM:
12838 switch (to)
12840 case STACK_POINTER_REGNUM:
12841 return offsets->outgoing_args - offsets->saved_args;
12843 case FRAME_POINTER_REGNUM:
12844 return offsets->soft_frame - offsets->saved_args;
12846 case THUMB_HARD_FRAME_POINTER_REGNUM:
12847 case ARM_HARD_FRAME_POINTER_REGNUM:
12848 return offsets->saved_regs - offsets->saved_args;
12850 default:
12851 abort();
12853 break;
12855 case FRAME_POINTER_REGNUM:
12856 switch (to)
12858 case STACK_POINTER_REGNUM:
12859 return offsets->outgoing_args - offsets->soft_frame;
12861 case THUMB_HARD_FRAME_POINTER_REGNUM:
12862 case ARM_HARD_FRAME_POINTER_REGNUM:
12863 return offsets->saved_regs - offsets->soft_frame;
12865 default:
12866 abort();
12868 break;
12870 default:
12871 abort ();
12876 /* Generate the rest of a function's prologue. */
12877 void
12878 thumb_expand_prologue (void)
12880 rtx insn, dwarf;
12882 HOST_WIDE_INT amount;
12883 arm_stack_offsets *offsets;
12884 unsigned long func_type;
12885 int regno;
12886 unsigned long live_regs_mask;
12888 func_type = arm_current_func_type ();
12890 /* Naked functions don't have prologues. */
12891 if (IS_NAKED (func_type))
12892 return;
12894 if (IS_INTERRUPT (func_type))
12896 error ("interrupt Service Routines cannot be coded in Thumb mode");
12897 return;
12900 /* Load the pic register before setting the frame pointer, so we can use r7
12901 as a temporary work register. */
12902 if (flag_pic)
12903 arm_load_pic_register ();
12905 offsets = arm_get_frame_offsets ();
12907 if (frame_pointer_needed)
12909 insn = emit_insn (gen_movsi (hard_frame_pointer_rtx,
12910 stack_pointer_rtx));
12911 RTX_FRAME_RELATED_P (insn) = 1;
12914 live_regs_mask = thumb_compute_save_reg_mask ();
12915 amount = offsets->outgoing_args - offsets->saved_regs;
12916 if (amount)
12918 if (amount < 512)
12920 insn = emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
12921 GEN_INT (- amount)));
12922 RTX_FRAME_RELATED_P (insn) = 1;
12924 else
12926 rtx reg;
12928 /* The stack decrement is too big for an immediate value in a single
12929 insn. In theory we could issue multiple subtracts, but after
12930 three of them it becomes more space efficient to place the full
12931 value in the constant pool and load into a register. (Also the
12932 ARM debugger really likes to see only one stack decrement per
12933 function). So instead we look for a scratch register into which
12934 we can load the decrement, and then we subtract this from the
12935 stack pointer. Unfortunately on the thumb the only available
12936 scratch registers are the argument registers, and we cannot use
12937 these as they may hold arguments to the function. Instead we
12938 attempt to locate a call preserved register which is used by this
12939 function. If we can find one, then we know that it will have
12940 been pushed at the start of the prologue and so we can corrupt
12941 it now. */
12942 for (regno = LAST_ARG_REGNUM + 1; regno <= LAST_LO_REGNUM; regno++)
12943 if (live_regs_mask & (1 << regno)
12944 && !(frame_pointer_needed
12945 && (regno == THUMB_HARD_FRAME_POINTER_REGNUM)))
12946 break;
12948 if (regno > LAST_LO_REGNUM) /* Very unlikely. */
12950 rtx spare = gen_rtx_REG (SImode, IP_REGNUM);
12952 /* Choose an arbitrary, non-argument low register. */
12953 reg = gen_rtx_REG (SImode, LAST_LO_REGNUM);
12955 /* Save it by copying it into a high, scratch register. */
12956 emit_insn (gen_movsi (spare, reg));
12957 /* Add a USE to stop propagate_one_insn() from barfing. */
12958 emit_insn (gen_prologue_use (spare));
12960 /* Decrement the stack. */
12961 emit_insn (gen_movsi (reg, GEN_INT (- amount)));
12962 insn = emit_insn (gen_addsi3 (stack_pointer_rtx,
12963 stack_pointer_rtx, reg));
12964 RTX_FRAME_RELATED_P (insn) = 1;
12965 dwarf = gen_rtx_SET (SImode, stack_pointer_rtx,
12966 plus_constant (stack_pointer_rtx,
12967 -amount));
12968 RTX_FRAME_RELATED_P (dwarf) = 1;
12969 REG_NOTES (insn)
12970 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
12971 REG_NOTES (insn));
12973 /* Restore the low register's original value. */
12974 emit_insn (gen_movsi (reg, spare));
12976 /* Emit a USE of the restored scratch register, so that flow
12977 analysis will not consider the restore redundant. The
12978 register won't be used again in this function and isn't
12979 restored by the epilogue. */
12980 emit_insn (gen_prologue_use (reg));
12982 else
12984 reg = gen_rtx_REG (SImode, regno);
12986 emit_insn (gen_movsi (reg, GEN_INT (- amount)));
12988 insn = emit_insn (gen_addsi3 (stack_pointer_rtx,
12989 stack_pointer_rtx, reg));
12990 RTX_FRAME_RELATED_P (insn) = 1;
12991 dwarf = gen_rtx_SET (SImode, stack_pointer_rtx,
12992 plus_constant (stack_pointer_rtx,
12993 -amount));
12994 RTX_FRAME_RELATED_P (dwarf) = 1;
12995 REG_NOTES (insn)
12996 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
12997 REG_NOTES (insn));
13000 /* If the frame pointer is needed, emit a special barrier that
13001 will prevent the scheduler from moving stores to the frame
13002 before the stack adjustment. */
13003 if (frame_pointer_needed)
13004 emit_insn (gen_stack_tie (stack_pointer_rtx,
13005 hard_frame_pointer_rtx));
13008 if (current_function_profile || TARGET_NO_SCHED_PRO)
13009 emit_insn (gen_blockage ());
13011 cfun->machine->lr_save_eliminated = !thumb_force_lr_save ();
13012 if (live_regs_mask & 0xff)
13013 cfun->machine->lr_save_eliminated = 0;
13015 /* If the link register is being kept alive, with the return address in it,
13016 then make sure that it does not get reused by the ce2 pass. */
13017 if (cfun->machine->lr_save_eliminated)
13018 emit_insn (gen_prologue_use (gen_rtx_REG (SImode, LR_REGNUM)));
13022 void
13023 thumb_expand_epilogue (void)
13025 HOST_WIDE_INT amount;
13026 arm_stack_offsets *offsets;
13027 int regno;
13029 /* Naked functions don't have prologues. */
13030 if (IS_NAKED (arm_current_func_type ()))
13031 return;
13033 offsets = arm_get_frame_offsets ();
13034 amount = offsets->outgoing_args - offsets->saved_regs;
13036 if (frame_pointer_needed)
13037 emit_insn (gen_movsi (stack_pointer_rtx, hard_frame_pointer_rtx));
13038 else if (amount)
13040 if (amount < 512)
13041 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
13042 GEN_INT (amount)));
13043 else
13045 /* r3 is always free in the epilogue. */
13046 rtx reg = gen_rtx_REG (SImode, LAST_ARG_REGNUM);
13048 emit_insn (gen_movsi (reg, GEN_INT (amount)));
13049 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, reg));
13053 /* Emit a USE (stack_pointer_rtx), so that
13054 the stack adjustment will not be deleted. */
13055 emit_insn (gen_prologue_use (stack_pointer_rtx));
13057 if (current_function_profile || TARGET_NO_SCHED_PRO)
13058 emit_insn (gen_blockage ());
13060 /* Emit a clobber for each insn that will be restored in the epilogue,
13061 so that flow2 will get register lifetimes correct. */
13062 for (regno = 0; regno < 13; regno++)
13063 if (regs_ever_live[regno] && !call_used_regs[regno])
13064 emit_insn (gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, regno)));
13066 if (! regs_ever_live[LR_REGNUM])
13067 emit_insn (gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, LR_REGNUM)));
13070 static void
13071 thumb_output_function_prologue (FILE *f, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
13073 int live_regs_mask = 0;
13074 int l_mask;
13075 int high_regs_pushed = 0;
13076 int cfa_offset = 0;
13077 int regno;
13079 if (IS_NAKED (arm_current_func_type ()))
13080 return;
13082 if (is_called_in_ARM_mode (current_function_decl))
13084 const char * name;
13086 if (GET_CODE (DECL_RTL (current_function_decl)) != MEM)
13087 abort ();
13088 if (GET_CODE (XEXP (DECL_RTL (current_function_decl), 0)) != SYMBOL_REF)
13089 abort ();
13090 name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
13092 /* Generate code sequence to switch us into Thumb mode. */
13093 /* The .code 32 directive has already been emitted by
13094 ASM_DECLARE_FUNCTION_NAME. */
13095 asm_fprintf (f, "\torr\t%r, %r, #1\n", IP_REGNUM, PC_REGNUM);
13096 asm_fprintf (f, "\tbx\t%r\n", IP_REGNUM);
13098 /* Generate a label, so that the debugger will notice the
13099 change in instruction sets. This label is also used by
13100 the assembler to bypass the ARM code when this function
13101 is called from a Thumb encoded function elsewhere in the
13102 same file. Hence the definition of STUB_NAME here must
13103 agree with the definition in gas/config/tc-arm.c. */
13105 #define STUB_NAME ".real_start_of"
13107 fprintf (f, "\t.code\t16\n");
13108 #ifdef ARM_PE
13109 if (arm_dllexport_name_p (name))
13110 name = arm_strip_name_encoding (name);
13111 #endif
13112 asm_fprintf (f, "\t.globl %s%U%s\n", STUB_NAME, name);
13113 fprintf (f, "\t.thumb_func\n");
13114 asm_fprintf (f, "%s%U%s:\n", STUB_NAME, name);
13117 if (current_function_pretend_args_size)
13119 if (cfun->machine->uses_anonymous_args)
13121 int num_pushes;
13123 fprintf (f, "\tpush\t{");
13125 num_pushes = ARM_NUM_INTS (current_function_pretend_args_size);
13127 for (regno = LAST_ARG_REGNUM + 1 - num_pushes;
13128 regno <= LAST_ARG_REGNUM;
13129 regno++)
13130 asm_fprintf (f, "%r%s", regno,
13131 regno == LAST_ARG_REGNUM ? "" : ", ");
13133 fprintf (f, "}\n");
13135 else
13136 asm_fprintf (f, "\tsub\t%r, %r, #%d\n",
13137 SP_REGNUM, SP_REGNUM,
13138 current_function_pretend_args_size);
13140 /* We don't need to record the stores for unwinding (would it
13141 help the debugger any if we did?), but record the change in
13142 the stack pointer. */
13143 if (dwarf2out_do_frame ())
13145 char *l = dwarf2out_cfi_label ();
13146 cfa_offset = cfa_offset + current_function_pretend_args_size;
13147 dwarf2out_def_cfa (l, SP_REGNUM, cfa_offset);
13151 live_regs_mask = thumb_compute_save_reg_mask ();
13152 /* Just low regs and lr. */
13153 l_mask = live_regs_mask & 0x40ff;
13155 if (TARGET_BACKTRACE)
13157 int offset;
13158 int work_register;
13160 /* We have been asked to create a stack backtrace structure.
13161 The code looks like this:
13163 0 .align 2
13164 0 func:
13165 0 sub SP, #16 Reserve space for 4 registers.
13166 2 push {R7} Push low registers.
13167 4 add R7, SP, #20 Get the stack pointer before the push.
13168 6 str R7, [SP, #8] Store the stack pointer (before reserving the space).
13169 8 mov R7, PC Get hold of the start of this code plus 12.
13170 10 str R7, [SP, #16] Store it.
13171 12 mov R7, FP Get hold of the current frame pointer.
13172 14 str R7, [SP, #4] Store it.
13173 16 mov R7, LR Get hold of the current return address.
13174 18 str R7, [SP, #12] Store it.
13175 20 add R7, SP, #16 Point at the start of the backtrace structure.
13176 22 mov FP, R7 Put this value into the frame pointer. */
13178 work_register = thumb_find_work_register (live_regs_mask);
13180 asm_fprintf
13181 (f, "\tsub\t%r, %r, #16\t%@ Create stack backtrace structure\n",
13182 SP_REGNUM, SP_REGNUM);
13184 if (dwarf2out_do_frame ())
13186 char *l = dwarf2out_cfi_label ();
13187 cfa_offset = cfa_offset + 16;
13188 dwarf2out_def_cfa (l, SP_REGNUM, cfa_offset);
13191 if (l_mask)
13193 thumb_pushpop (f, l_mask, 1, &cfa_offset, l_mask);
13194 offset = bit_count (l_mask);
13196 else
13197 offset = 0;
13199 asm_fprintf (f, "\tadd\t%r, %r, #%d\n", work_register, SP_REGNUM,
13200 offset + 16 + current_function_pretend_args_size);
13202 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13203 offset + 4);
13205 /* Make sure that the instruction fetching the PC is in the right place
13206 to calculate "start of backtrace creation code + 12". */
13207 if (l_mask)
13209 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, PC_REGNUM);
13210 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13211 offset + 12);
13212 asm_fprintf (f, "\tmov\t%r, %r\n", work_register,
13213 ARM_HARD_FRAME_POINTER_REGNUM);
13214 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13215 offset);
13217 else
13219 asm_fprintf (f, "\tmov\t%r, %r\n", work_register,
13220 ARM_HARD_FRAME_POINTER_REGNUM);
13221 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13222 offset);
13223 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, PC_REGNUM);
13224 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13225 offset + 12);
13228 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, LR_REGNUM);
13229 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13230 offset + 8);
13231 asm_fprintf (f, "\tadd\t%r, %r, #%d\n", work_register, SP_REGNUM,
13232 offset + 12);
13233 asm_fprintf (f, "\tmov\t%r, %r\t\t%@ Backtrace structure created\n",
13234 ARM_HARD_FRAME_POINTER_REGNUM, work_register);
13236 else if (l_mask)
13237 thumb_pushpop (f, l_mask, 1, &cfa_offset, l_mask);
13239 high_regs_pushed = bit_count (live_regs_mask & 0x0f00);
13241 if (high_regs_pushed)
13243 int pushable_regs = 0;
13244 int next_hi_reg;
13246 for (next_hi_reg = 12; next_hi_reg > LAST_LO_REGNUM; next_hi_reg--)
13247 if (live_regs_mask & (1 << next_hi_reg))
13248 break;
13250 pushable_regs = l_mask & 0xff;
13252 if (pushable_regs == 0)
13253 pushable_regs = 1 << thumb_find_work_register (live_regs_mask);
13255 while (high_regs_pushed > 0)
13257 int real_regs_mask = 0;
13259 for (regno = LAST_LO_REGNUM; regno >= 0; regno--)
13261 if (pushable_regs & (1 << regno))
13263 asm_fprintf (f, "\tmov\t%r, %r\n", regno, next_hi_reg);
13265 high_regs_pushed--;
13266 real_regs_mask |= (1 << next_hi_reg);
13268 if (high_regs_pushed)
13270 for (next_hi_reg--; next_hi_reg > LAST_LO_REGNUM;
13271 next_hi_reg--)
13272 if (live_regs_mask & (1 << next_hi_reg))
13273 break;
13275 else
13277 pushable_regs &= ~((1 << regno) - 1);
13278 break;
13283 thumb_pushpop (f, pushable_regs, 1, &cfa_offset, real_regs_mask);
13288 /* Handle the case of a double word load into a low register from
13289 a computed memory address. The computed address may involve a
13290 register which is overwritten by the load. */
13291 const char *
13292 thumb_load_double_from_address (rtx *operands)
13294 rtx addr;
13295 rtx base;
13296 rtx offset;
13297 rtx arg1;
13298 rtx arg2;
13300 if (GET_CODE (operands[0]) != REG)
13301 abort ();
13303 if (GET_CODE (operands[1]) != MEM)
13304 abort ();
13306 /* Get the memory address. */
13307 addr = XEXP (operands[1], 0);
13309 /* Work out how the memory address is computed. */
13310 switch (GET_CODE (addr))
13312 case REG:
13313 operands[2] = gen_rtx_MEM (SImode,
13314 plus_constant (XEXP (operands[1], 0), 4));
13316 if (REGNO (operands[0]) == REGNO (addr))
13318 output_asm_insn ("ldr\t%H0, %2", operands);
13319 output_asm_insn ("ldr\t%0, %1", operands);
13321 else
13323 output_asm_insn ("ldr\t%0, %1", operands);
13324 output_asm_insn ("ldr\t%H0, %2", operands);
13326 break;
13328 case CONST:
13329 /* Compute <address> + 4 for the high order load. */
13330 operands[2] = gen_rtx_MEM (SImode,
13331 plus_constant (XEXP (operands[1], 0), 4));
13333 output_asm_insn ("ldr\t%0, %1", operands);
13334 output_asm_insn ("ldr\t%H0, %2", operands);
13335 break;
13337 case PLUS:
13338 arg1 = XEXP (addr, 0);
13339 arg2 = XEXP (addr, 1);
13341 if (CONSTANT_P (arg1))
13342 base = arg2, offset = arg1;
13343 else
13344 base = arg1, offset = arg2;
13346 if (GET_CODE (base) != REG)
13347 abort ();
13349 /* Catch the case of <address> = <reg> + <reg> */
13350 if (GET_CODE (offset) == REG)
13352 int reg_offset = REGNO (offset);
13353 int reg_base = REGNO (base);
13354 int reg_dest = REGNO (operands[0]);
13356 /* Add the base and offset registers together into the
13357 higher destination register. */
13358 asm_fprintf (asm_out_file, "\tadd\t%r, %r, %r",
13359 reg_dest + 1, reg_base, reg_offset);
13361 /* Load the lower destination register from the address in
13362 the higher destination register. */
13363 asm_fprintf (asm_out_file, "\tldr\t%r, [%r, #0]",
13364 reg_dest, reg_dest + 1);
13366 /* Load the higher destination register from its own address
13367 plus 4. */
13368 asm_fprintf (asm_out_file, "\tldr\t%r, [%r, #4]",
13369 reg_dest + 1, reg_dest + 1);
13371 else
13373 /* Compute <address> + 4 for the high order load. */
13374 operands[2] = gen_rtx_MEM (SImode,
13375 plus_constant (XEXP (operands[1], 0), 4));
13377 /* If the computed address is held in the low order register
13378 then load the high order register first, otherwise always
13379 load the low order register first. */
13380 if (REGNO (operands[0]) == REGNO (base))
13382 output_asm_insn ("ldr\t%H0, %2", operands);
13383 output_asm_insn ("ldr\t%0, %1", operands);
13385 else
13387 output_asm_insn ("ldr\t%0, %1", operands);
13388 output_asm_insn ("ldr\t%H0, %2", operands);
13391 break;
13393 case LABEL_REF:
13394 /* With no registers to worry about we can just load the value
13395 directly. */
13396 operands[2] = gen_rtx_MEM (SImode,
13397 plus_constant (XEXP (operands[1], 0), 4));
13399 output_asm_insn ("ldr\t%H0, %2", operands);
13400 output_asm_insn ("ldr\t%0, %1", operands);
13401 break;
13403 default:
13404 abort ();
13405 break;
13408 return "";
13411 const char *
13412 thumb_output_move_mem_multiple (int n, rtx *operands)
13414 rtx tmp;
13416 switch (n)
13418 case 2:
13419 if (REGNO (operands[4]) > REGNO (operands[5]))
13421 tmp = operands[4];
13422 operands[4] = operands[5];
13423 operands[5] = tmp;
13425 output_asm_insn ("ldmia\t%1!, {%4, %5}", operands);
13426 output_asm_insn ("stmia\t%0!, {%4, %5}", operands);
13427 break;
13429 case 3:
13430 if (REGNO (operands[4]) > REGNO (operands[5]))
13432 tmp = operands[4];
13433 operands[4] = operands[5];
13434 operands[5] = tmp;
13436 if (REGNO (operands[5]) > REGNO (operands[6]))
13438 tmp = operands[5];
13439 operands[5] = operands[6];
13440 operands[6] = tmp;
13442 if (REGNO (operands[4]) > REGNO (operands[5]))
13444 tmp = operands[4];
13445 operands[4] = operands[5];
13446 operands[5] = tmp;
13449 output_asm_insn ("ldmia\t%1!, {%4, %5, %6}", operands);
13450 output_asm_insn ("stmia\t%0!, {%4, %5, %6}", operands);
13451 break;
13453 default:
13454 abort ();
13457 return "";
13460 /* Routines for generating rtl. */
13461 void
13462 thumb_expand_movmemqi (rtx *operands)
13464 rtx out = copy_to_mode_reg (SImode, XEXP (operands[0], 0));
13465 rtx in = copy_to_mode_reg (SImode, XEXP (operands[1], 0));
13466 HOST_WIDE_INT len = INTVAL (operands[2]);
13467 HOST_WIDE_INT offset = 0;
13469 while (len >= 12)
13471 emit_insn (gen_movmem12b (out, in, out, in));
13472 len -= 12;
13475 if (len >= 8)
13477 emit_insn (gen_movmem8b (out, in, out, in));
13478 len -= 8;
13481 if (len >= 4)
13483 rtx reg = gen_reg_rtx (SImode);
13484 emit_insn (gen_movsi (reg, gen_rtx_MEM (SImode, in)));
13485 emit_insn (gen_movsi (gen_rtx_MEM (SImode, out), reg));
13486 len -= 4;
13487 offset += 4;
13490 if (len >= 2)
13492 rtx reg = gen_reg_rtx (HImode);
13493 emit_insn (gen_movhi (reg, gen_rtx_MEM (HImode,
13494 plus_constant (in, offset))));
13495 emit_insn (gen_movhi (gen_rtx_MEM (HImode, plus_constant (out, offset)),
13496 reg));
13497 len -= 2;
13498 offset += 2;
13501 if (len)
13503 rtx reg = gen_reg_rtx (QImode);
13504 emit_insn (gen_movqi (reg, gen_rtx_MEM (QImode,
13505 plus_constant (in, offset))));
13506 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (out, offset)),
13507 reg));
13511 void
13512 thumb_reload_out_hi (rtx *operands)
13514 emit_insn (gen_thumb_movhi_clobber (operands[0], operands[1], operands[2]));
13517 /* Handle reading a half-word from memory during reload. */
13518 void
13519 thumb_reload_in_hi (rtx *operands ATTRIBUTE_UNUSED)
13521 abort ();
13524 /* Return the length of a function name prefix
13525 that starts with the character 'c'. */
13526 static int
13527 arm_get_strip_length (int c)
13529 switch (c)
13531 ARM_NAME_ENCODING_LENGTHS
13532 default: return 0;
13536 /* Return a pointer to a function's name with any
13537 and all prefix encodings stripped from it. */
13538 const char *
13539 arm_strip_name_encoding (const char *name)
13541 int skip;
13543 while ((skip = arm_get_strip_length (* name)))
13544 name += skip;
13546 return name;
13549 /* If there is a '*' anywhere in the name's prefix, then
13550 emit the stripped name verbatim, otherwise prepend an
13551 underscore if leading underscores are being used. */
13552 void
13553 arm_asm_output_labelref (FILE *stream, const char *name)
13555 int skip;
13556 int verbatim = 0;
13558 while ((skip = arm_get_strip_length (* name)))
13560 verbatim |= (*name == '*');
13561 name += skip;
13564 if (verbatim)
13565 fputs (name, stream);
13566 else
13567 asm_fprintf (stream, "%U%s", name);
13570 rtx aof_pic_label;
13572 #ifdef AOF_ASSEMBLER
13573 /* Special functions only needed when producing AOF syntax assembler. */
13575 struct pic_chain
13577 struct pic_chain * next;
13578 const char * symname;
13581 static struct pic_chain * aof_pic_chain = NULL;
13584 aof_pic_entry (rtx x)
13586 struct pic_chain ** chainp;
13587 int offset;
13589 if (aof_pic_label == NULL_RTX)
13591 aof_pic_label = gen_rtx_SYMBOL_REF (Pmode, "x$adcons");
13594 for (offset = 0, chainp = &aof_pic_chain; *chainp;
13595 offset += 4, chainp = &(*chainp)->next)
13596 if ((*chainp)->symname == XSTR (x, 0))
13597 return plus_constant (aof_pic_label, offset);
13599 *chainp = (struct pic_chain *) xmalloc (sizeof (struct pic_chain));
13600 (*chainp)->next = NULL;
13601 (*chainp)->symname = XSTR (x, 0);
13602 return plus_constant (aof_pic_label, offset);
13605 void
13606 aof_dump_pic_table (FILE *f)
13608 struct pic_chain * chain;
13610 if (aof_pic_chain == NULL)
13611 return;
13613 asm_fprintf (f, "\tAREA |%r$$adcons|, BASED %r\n",
13614 PIC_OFFSET_TABLE_REGNUM,
13615 PIC_OFFSET_TABLE_REGNUM);
13616 fputs ("|x$adcons|\n", f);
13618 for (chain = aof_pic_chain; chain; chain = chain->next)
13620 fputs ("\tDCD\t", f);
13621 assemble_name (f, chain->symname);
13622 fputs ("\n", f);
13626 int arm_text_section_count = 1;
13628 char *
13629 aof_text_section (void )
13631 static char buf[100];
13632 sprintf (buf, "\tAREA |C$$code%d|, CODE, READONLY",
13633 arm_text_section_count++);
13634 if (flag_pic)
13635 strcat (buf, ", PIC, REENTRANT");
13636 return buf;
13639 static int arm_data_section_count = 1;
13641 char *
13642 aof_data_section (void)
13644 static char buf[100];
13645 sprintf (buf, "\tAREA |C$$data%d|, DATA", arm_data_section_count++);
13646 return buf;
13649 /* The AOF assembler is religiously strict about declarations of
13650 imported and exported symbols, so that it is impossible to declare
13651 a function as imported near the beginning of the file, and then to
13652 export it later on. It is, however, possible to delay the decision
13653 until all the functions in the file have been compiled. To get
13654 around this, we maintain a list of the imports and exports, and
13655 delete from it any that are subsequently defined. At the end of
13656 compilation we spit the remainder of the list out before the END
13657 directive. */
13659 struct import
13661 struct import * next;
13662 const char * name;
13665 static struct import * imports_list = NULL;
13667 void
13668 aof_add_import (const char *name)
13670 struct import * new;
13672 for (new = imports_list; new; new = new->next)
13673 if (new->name == name)
13674 return;
13676 new = (struct import *) xmalloc (sizeof (struct import));
13677 new->next = imports_list;
13678 imports_list = new;
13679 new->name = name;
13682 void
13683 aof_delete_import (const char *name)
13685 struct import ** old;
13687 for (old = &imports_list; *old; old = & (*old)->next)
13689 if ((*old)->name == name)
13691 *old = (*old)->next;
13692 return;
13697 int arm_main_function = 0;
13699 static void
13700 aof_dump_imports (FILE *f)
13702 /* The AOF assembler needs this to cause the startup code to be extracted
13703 from the library. Brining in __main causes the whole thing to work
13704 automagically. */
13705 if (arm_main_function)
13707 text_section ();
13708 fputs ("\tIMPORT __main\n", f);
13709 fputs ("\tDCD __main\n", f);
13712 /* Now dump the remaining imports. */
13713 while (imports_list)
13715 fprintf (f, "\tIMPORT\t");
13716 assemble_name (f, imports_list->name);
13717 fputc ('\n', f);
13718 imports_list = imports_list->next;
13722 static void
13723 aof_globalize_label (FILE *stream, const char *name)
13725 default_globalize_label (stream, name);
13726 if (! strcmp (name, "main"))
13727 arm_main_function = 1;
13730 static void
13731 aof_file_start (void)
13733 fputs ("__r0\tRN\t0\n", asm_out_file);
13734 fputs ("__a1\tRN\t0\n", asm_out_file);
13735 fputs ("__a2\tRN\t1\n", asm_out_file);
13736 fputs ("__a3\tRN\t2\n", asm_out_file);
13737 fputs ("__a4\tRN\t3\n", asm_out_file);
13738 fputs ("__v1\tRN\t4\n", asm_out_file);
13739 fputs ("__v2\tRN\t5\n", asm_out_file);
13740 fputs ("__v3\tRN\t6\n", asm_out_file);
13741 fputs ("__v4\tRN\t7\n", asm_out_file);
13742 fputs ("__v5\tRN\t8\n", asm_out_file);
13743 fputs ("__v6\tRN\t9\n", asm_out_file);
13744 fputs ("__sl\tRN\t10\n", asm_out_file);
13745 fputs ("__fp\tRN\t11\n", asm_out_file);
13746 fputs ("__ip\tRN\t12\n", asm_out_file);
13747 fputs ("__sp\tRN\t13\n", asm_out_file);
13748 fputs ("__lr\tRN\t14\n", asm_out_file);
13749 fputs ("__pc\tRN\t15\n", asm_out_file);
13750 fputs ("__f0\tFN\t0\n", asm_out_file);
13751 fputs ("__f1\tFN\t1\n", asm_out_file);
13752 fputs ("__f2\tFN\t2\n", asm_out_file);
13753 fputs ("__f3\tFN\t3\n", asm_out_file);
13754 fputs ("__f4\tFN\t4\n", asm_out_file);
13755 fputs ("__f5\tFN\t5\n", asm_out_file);
13756 fputs ("__f6\tFN\t6\n", asm_out_file);
13757 fputs ("__f7\tFN\t7\n", asm_out_file);
13758 text_section ();
13761 static void
13762 aof_file_end (void)
13764 if (flag_pic)
13765 aof_dump_pic_table (asm_out_file);
13766 aof_dump_imports (asm_out_file);
13767 fputs ("\tEND\n", asm_out_file);
13769 #endif /* AOF_ASSEMBLER */
13771 #ifndef ARM_PE
13772 /* Symbols in the text segment can be accessed without indirecting via the
13773 constant pool; it may take an extra binary operation, but this is still
13774 faster than indirecting via memory. Don't do this when not optimizing,
13775 since we won't be calculating al of the offsets necessary to do this
13776 simplification. */
13778 static void
13779 arm_encode_section_info (tree decl, rtx rtl, int first)
13781 /* This doesn't work with AOF syntax, since the string table may be in
13782 a different AREA. */
13783 #ifndef AOF_ASSEMBLER
13784 if (optimize > 0 && TREE_CONSTANT (decl))
13785 SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
13786 #endif
13788 /* If we are referencing a function that is weak then encode a long call
13789 flag in the function name, otherwise if the function is static or
13790 or known to be defined in this file then encode a short call flag. */
13791 if (first && DECL_P (decl))
13793 if (TREE_CODE (decl) == FUNCTION_DECL && DECL_WEAK (decl))
13794 arm_encode_call_attribute (decl, LONG_CALL_FLAG_CHAR);
13795 else if (! TREE_PUBLIC (decl))
13796 arm_encode_call_attribute (decl, SHORT_CALL_FLAG_CHAR);
13799 #endif /* !ARM_PE */
13801 static void
13802 arm_internal_label (FILE *stream, const char *prefix, unsigned long labelno)
13804 if (arm_ccfsm_state == 3 && (unsigned) arm_target_label == labelno
13805 && !strcmp (prefix, "L"))
13807 arm_ccfsm_state = 0;
13808 arm_target_insn = NULL;
13810 default_internal_label (stream, prefix, labelno);
13813 /* Output code to add DELTA to the first argument, and then jump
13814 to FUNCTION. Used for C++ multiple inheritance. */
13815 static void
13816 arm_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
13817 HOST_WIDE_INT delta,
13818 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
13819 tree function)
13821 static int thunk_label = 0;
13822 char label[256];
13823 int mi_delta = delta;
13824 const char *const mi_op = mi_delta < 0 ? "sub" : "add";
13825 int shift = 0;
13826 int this_regno = (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function)
13827 ? 1 : 0);
13828 if (mi_delta < 0)
13829 mi_delta = - mi_delta;
13830 if (TARGET_THUMB)
13832 int labelno = thunk_label++;
13833 ASM_GENERATE_INTERNAL_LABEL (label, "LTHUMBFUNC", labelno);
13834 fputs ("\tldr\tr12, ", file);
13835 assemble_name (file, label);
13836 fputc ('\n', file);
13838 while (mi_delta != 0)
13840 if ((mi_delta & (3 << shift)) == 0)
13841 shift += 2;
13842 else
13844 asm_fprintf (file, "\t%s\t%r, %r, #%d\n",
13845 mi_op, this_regno, this_regno,
13846 mi_delta & (0xff << shift));
13847 mi_delta &= ~(0xff << shift);
13848 shift += 8;
13851 if (TARGET_THUMB)
13853 fprintf (file, "\tbx\tr12\n");
13854 ASM_OUTPUT_ALIGN (file, 2);
13855 assemble_name (file, label);
13856 fputs (":\n", file);
13857 assemble_integer (XEXP (DECL_RTL (function), 0), 4, BITS_PER_WORD, 1);
13859 else
13861 fputs ("\tb\t", file);
13862 assemble_name (file, XSTR (XEXP (DECL_RTL (function), 0), 0));
13863 if (NEED_PLT_RELOC)
13864 fputs ("(PLT)", file);
13865 fputc ('\n', file);
13870 arm_emit_vector_const (FILE *file, rtx x)
13872 int i;
13873 const char * pattern;
13875 if (GET_CODE (x) != CONST_VECTOR)
13876 abort ();
13878 switch (GET_MODE (x))
13880 case V2SImode: pattern = "%08x"; break;
13881 case V4HImode: pattern = "%04x"; break;
13882 case V8QImode: pattern = "%02x"; break;
13883 default: abort ();
13886 fprintf (file, "0x");
13887 for (i = CONST_VECTOR_NUNITS (x); i--;)
13889 rtx element;
13891 element = CONST_VECTOR_ELT (x, i);
13892 fprintf (file, pattern, INTVAL (element));
13895 return 1;
13898 const char *
13899 arm_output_load_gr (rtx *operands)
13901 rtx reg;
13902 rtx offset;
13903 rtx wcgr;
13904 rtx sum;
13906 if (GET_CODE (operands [1]) != MEM
13907 || GET_CODE (sum = XEXP (operands [1], 0)) != PLUS
13908 || GET_CODE (reg = XEXP (sum, 0)) != REG
13909 || GET_CODE (offset = XEXP (sum, 1)) != CONST_INT
13910 || ((INTVAL (offset) < 1024) && (INTVAL (offset) > -1024)))
13911 return "wldrw%?\t%0, %1";
13913 /* Fix up an out-of-range load of a GR register. */
13914 output_asm_insn ("str%?\t%0, [sp, #-4]!\t@ Start of GR load expansion", & reg);
13915 wcgr = operands[0];
13916 operands[0] = reg;
13917 output_asm_insn ("ldr%?\t%0, %1", operands);
13919 operands[0] = wcgr;
13920 operands[1] = reg;
13921 output_asm_insn ("tmcr%?\t%0, %1", operands);
13922 output_asm_insn ("ldr%?\t%0, [sp], #4\t@ End of GR load expansion", & reg);
13924 return "";
13927 static rtx
13928 arm_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
13929 int incoming ATTRIBUTE_UNUSED)
13931 #if 0
13932 /* FIXME: The ARM backend has special code to handle structure
13933 returns, and will reserve its own hidden first argument. So
13934 if this macro is enabled a *second* hidden argument will be
13935 reserved, which will break binary compatibility with old
13936 toolchains and also thunk handling. One day this should be
13937 fixed. */
13938 return 0;
13939 #else
13940 /* Register in which address to store a structure value
13941 is passed to a function. */
13942 return gen_rtx_REG (Pmode, ARG_REGISTER (1));
13943 #endif
13946 /* Worker function for TARGET_SETUP_INCOMING_VARARGS.
13948 On the ARM, PRETEND_SIZE is set in order to have the prologue push the last
13949 named arg and all anonymous args onto the stack.
13950 XXX I know the prologue shouldn't be pushing registers, but it is faster
13951 that way. */
13953 static void
13954 arm_setup_incoming_varargs (CUMULATIVE_ARGS *cum,
13955 enum machine_mode mode ATTRIBUTE_UNUSED,
13956 tree type ATTRIBUTE_UNUSED,
13957 int *pretend_size,
13958 int second_time ATTRIBUTE_UNUSED)
13960 cfun->machine->uses_anonymous_args = 1;
13961 if (cum->nregs < NUM_ARG_REGS)
13962 *pretend_size = (NUM_ARG_REGS - cum->nregs) * UNITS_PER_WORD;
13965 /* Return nonzero if the CONSUMER instruction (a store) does not need
13966 PRODUCER's value to calculate the address. */
13969 arm_no_early_store_addr_dep (rtx producer, rtx consumer)
13971 rtx value = PATTERN (producer);
13972 rtx addr = PATTERN (consumer);
13974 if (GET_CODE (value) == COND_EXEC)
13975 value = COND_EXEC_CODE (value);
13976 if (GET_CODE (value) == PARALLEL)
13977 value = XVECEXP (value, 0, 0);
13978 value = XEXP (value, 0);
13979 if (GET_CODE (addr) == COND_EXEC)
13980 addr = COND_EXEC_CODE (addr);
13981 if (GET_CODE (addr) == PARALLEL)
13982 addr = XVECEXP (addr, 0, 0);
13983 addr = XEXP (addr, 0);
13985 return !reg_overlap_mentioned_p (value, addr);
13988 /* Return nonzero if the CONSUMER instruction (an ALU op) does not
13989 have an early register shift value or amount dependency on the
13990 result of PRODUCER. */
13993 arm_no_early_alu_shift_dep (rtx producer, rtx consumer)
13995 rtx value = PATTERN (producer);
13996 rtx op = PATTERN (consumer);
13997 rtx early_op;
13999 if (GET_CODE (value) == COND_EXEC)
14000 value = COND_EXEC_CODE (value);
14001 if (GET_CODE (value) == PARALLEL)
14002 value = XVECEXP (value, 0, 0);
14003 value = XEXP (value, 0);
14004 if (GET_CODE (op) == COND_EXEC)
14005 op = COND_EXEC_CODE (op);
14006 if (GET_CODE (op) == PARALLEL)
14007 op = XVECEXP (op, 0, 0);
14008 op = XEXP (op, 1);
14010 early_op = XEXP (op, 0);
14011 /* This is either an actual independent shift, or a shift applied to
14012 the first operand of another operation. We want the whole shift
14013 operation. */
14014 if (GET_CODE (early_op) == REG)
14015 early_op = op;
14017 return !reg_overlap_mentioned_p (value, early_op);
14020 /* Return nonzero if the CONSUMER instruction (an ALU op) does not
14021 have an early register shift value dependency on the result of
14022 PRODUCER. */
14025 arm_no_early_alu_shift_value_dep (rtx producer, rtx consumer)
14027 rtx value = PATTERN (producer);
14028 rtx op = PATTERN (consumer);
14029 rtx early_op;
14031 if (GET_CODE (value) == COND_EXEC)
14032 value = COND_EXEC_CODE (value);
14033 if (GET_CODE (value) == PARALLEL)
14034 value = XVECEXP (value, 0, 0);
14035 value = XEXP (value, 0);
14036 if (GET_CODE (op) == COND_EXEC)
14037 op = COND_EXEC_CODE (op);
14038 if (GET_CODE (op) == PARALLEL)
14039 op = XVECEXP (op, 0, 0);
14040 op = XEXP (op, 1);
14042 early_op = XEXP (op, 0);
14044 /* This is either an actual independent shift, or a shift applied to
14045 the first operand of another operation. We want the value being
14046 shifted, in either case. */
14047 if (GET_CODE (early_op) != REG)
14048 early_op = XEXP (early_op, 0);
14050 return !reg_overlap_mentioned_p (value, early_op);
14053 /* Return nonzero if the CONSUMER (a mul or mac op) does not
14054 have an early register mult dependency on the result of
14055 PRODUCER. */
14058 arm_no_early_mul_dep (rtx producer, rtx consumer)
14060 rtx value = PATTERN (producer);
14061 rtx op = PATTERN (consumer);
14063 if (GET_CODE (value) == COND_EXEC)
14064 value = COND_EXEC_CODE (value);
14065 if (GET_CODE (value) == PARALLEL)
14066 value = XVECEXP (value, 0, 0);
14067 value = XEXP (value, 0);
14068 if (GET_CODE (op) == COND_EXEC)
14069 op = COND_EXEC_CODE (op);
14070 if (GET_CODE (op) == PARALLEL)
14071 op = XVECEXP (op, 0, 0);
14072 op = XEXP (op, 1);
14074 return (GET_CODE (op) == PLUS
14075 && !reg_overlap_mentioned_p (value, XEXP (op, 0)));
14079 /* We can't rely on the caller doing the proper promotion when
14080 using APCS or ATPCS. */
14082 static bool
14083 arm_promote_prototypes (tree t ATTRIBUTE_UNUSED)
14085 return !TARGET_AAPCS_BASED;
14089 /* AAPCS based ABIs use short enums by default. */
14091 static bool
14092 arm_default_short_enums (void)
14094 return TARGET_AAPCS_BASED;
14098 /* AAPCS requires that anonymous bitfields affect structure alignment. */
14100 static bool
14101 arm_align_anon_bitfield (void)
14103 return TARGET_AAPCS_BASED;
14107 /* The generic C++ ABI says 64-bit (long long). The EABI says 32-bit. */
14109 static tree
14110 arm_cxx_guard_type (void)
14112 return TARGET_AAPCS_BASED ? integer_type_node : long_long_integer_type_node;
14116 /* The EABI says test the least significan bit of a guard variable. */
14118 static bool
14119 arm_cxx_guard_mask_bit (void)
14121 return TARGET_AAPCS_BASED;
14125 /* The EABI specifies that all array cookies are 8 bytes long. */
14127 static tree
14128 arm_get_cookie_size (tree type)
14130 tree size;
14132 if (!TARGET_AAPCS_BASED)
14133 return default_cxx_get_cookie_size (type);
14135 size = build_int_cst (sizetype, 8);
14136 return size;
14140 /* The EABI says that array cookies should also contain the element size. */
14142 static bool
14143 arm_cookie_has_size (void)
14145 return TARGET_AAPCS_BASED;
14149 /* The EABI says constructors and destructors should return a pointer to
14150 the object constructed/destroyed. */
14152 static bool
14153 arm_cxx_cdtor_returns_this (void)
14155 return TARGET_AAPCS_BASED;
14158 /* The EABI says that an inline function may never be the key
14159 method. */
14161 static bool
14162 arm_cxx_key_method_may_be_inline (void)
14164 return !TARGET_AAPCS_BASED;
14167 /* The EABI says that the virtual table, etc., for a class must be
14168 exported if it has a key method. The EABI does not specific the
14169 behavior if there is no key method, but there is no harm in
14170 exporting the class data in that case too. */
14172 static bool
14173 arm_cxx_export_class_data (void)
14175 return TARGET_AAPCS_BASED;
14178 void
14179 arm_set_return_address (rtx source, rtx scratch)
14181 arm_stack_offsets *offsets;
14182 HOST_WIDE_INT delta;
14183 rtx addr;
14184 unsigned long saved_regs;
14186 saved_regs = arm_compute_save_reg_mask ();
14188 if ((saved_regs & (1 << LR_REGNUM)) == 0)
14189 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNUM), source);
14190 else
14192 if (frame_pointer_needed)
14193 addr = plus_constant(hard_frame_pointer_rtx, -4);
14194 else
14196 /* LR will be the first saved register. */
14197 offsets = arm_get_frame_offsets ();
14198 delta = offsets->outgoing_args - (offsets->frame + 4);
14201 if (delta >= 4096)
14203 emit_insn (gen_addsi3 (scratch, stack_pointer_rtx,
14204 GEN_INT (delta & ~4095)));
14205 addr = scratch;
14206 delta &= 4095;
14208 else
14209 addr = stack_pointer_rtx;
14211 addr = plus_constant (addr, delta);
14213 emit_move_insn (gen_rtx_MEM (Pmode, addr), source);
14218 void
14219 thumb_set_return_address (rtx source, rtx scratch)
14221 arm_stack_offsets *offsets;
14222 HOST_WIDE_INT delta;
14223 int reg;
14224 rtx addr;
14225 unsigned long mask;
14227 emit_insn (gen_rtx_USE (VOIDmode, source));
14229 mask = thumb_compute_save_reg_mask ();
14230 if (mask & (1 << LR_REGNUM))
14232 offsets = arm_get_frame_offsets ();
14234 /* Find the saved regs. */
14235 if (frame_pointer_needed)
14237 delta = offsets->soft_frame - offsets->saved_args;
14238 reg = THUMB_HARD_FRAME_POINTER_REGNUM;
14240 else
14242 delta = offsets->outgoing_args - offsets->saved_args;
14243 reg = SP_REGNUM;
14245 /* Allow for the stack frame. */
14246 if (TARGET_BACKTRACE)
14247 delta -= 16;
14248 /* The link register is always the first saved register. */
14249 delta -= 4;
14251 /* Construct the address. */
14252 addr = gen_rtx_REG (SImode, reg);
14253 if ((reg != SP_REGNUM && delta >= 128)
14254 || delta >= 1024)
14256 emit_insn (gen_movsi (scratch, GEN_INT (delta)));
14257 emit_insn (gen_addsi3 (scratch, scratch, stack_pointer_rtx));
14258 addr = scratch;
14260 else
14261 addr = plus_constant (addr, delta);
14263 emit_move_insn (gen_rtx_MEM (Pmode, addr), source);
14265 else
14266 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNUM), source);
14269 /* Implements target hook vector_mode_supported_p. */
14270 bool
14271 arm_vector_mode_supported_p (enum machine_mode mode)
14273 if ((mode == V2SImode)
14274 || (mode == V4HImode)
14275 || (mode == V8QImode))
14276 return true;
14278 return false;
14281 /* Implement TARGET_SHIFT_TRUNCATION_MASK. SImode shifts use normal
14282 ARM insns and therefore guarantee that the shift count is modulo 256.
14283 DImode shifts (those implemented by lib1funcs.asm or by optabs.c)
14284 guarantee no particular behavior for out-of-range counts. */
14286 static unsigned HOST_WIDE_INT
14287 arm_shift_truncation_mask (enum machine_mode mode)
14289 return mode == SImode ? 255 : 0;