(arm_is_longcall_p): Update comment describing this funciton's behaviour.
[official-gcc.git] / gcc / config / arm / arm.c
blob5731a955b75682e110aa5a0d9ccad02f6c8ae4f9
1 /* Output routines for GCC for ARM.
2 Copyright (C) 1991, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004 Free Software Foundation, Inc.
4 Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
5 and Martin Simmons (@harleqn.co.uk).
6 More major hacks by Richard Earnshaw (rearnsha@arm.com).
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it
11 under the terms of the GNU General Public License as published
12 by the Free Software Foundation; either version 2, or (at your
13 option) any later version.
15 GCC is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
17 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
18 License for more details.
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING. If not, write to
22 the Free Software Foundation, 59 Temple Place - Suite 330,
23 Boston, MA 02111-1307, USA. */
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "tm.h"
29 #include "rtl.h"
30 #include "tree.h"
31 #include "obstack.h"
32 #include "regs.h"
33 #include "hard-reg-set.h"
34 #include "real.h"
35 #include "insn-config.h"
36 #include "conditions.h"
37 #include "output.h"
38 #include "insn-attr.h"
39 #include "flags.h"
40 #include "reload.h"
41 #include "function.h"
42 #include "expr.h"
43 #include "optabs.h"
44 #include "toplev.h"
45 #include "recog.h"
46 #include "ggc.h"
47 #include "except.h"
48 #include "c-pragma.h"
49 #include "integrate.h"
50 #include "tm_p.h"
51 #include "target.h"
52 #include "target-def.h"
53 #include "debug.h"
54 #include "langhooks.h"
56 /* Forward definitions of types. */
57 typedef struct minipool_node Mnode;
58 typedef struct minipool_fixup Mfix;
60 const struct attribute_spec arm_attribute_table[];
62 /* Forward function declarations. */
63 static arm_stack_offsets *arm_get_frame_offsets (void);
64 static void arm_add_gc_roots (void);
65 static int arm_gen_constant (enum rtx_code, enum machine_mode, rtx,
66 HOST_WIDE_INT, rtx, rtx, int, int);
67 static unsigned bit_count (unsigned long);
68 static int arm_address_register_rtx_p (rtx, int);
69 static int arm_legitimate_index_p (enum machine_mode, rtx, RTX_CODE, int);
70 static int thumb_base_register_rtx_p (rtx, enum machine_mode, int);
71 inline static int thumb_index_register_rtx_p (rtx, int);
72 static int thumb_far_jump_used_p (void);
73 static int const_ok_for_op (HOST_WIDE_INT, enum rtx_code);
74 static rtx emit_multi_reg_push (int);
75 static rtx emit_sfm (int, int);
76 #ifndef AOF_ASSEMBLER
77 static bool arm_assemble_integer (rtx, unsigned int, int);
78 #endif
79 static const char *fp_const_from_val (REAL_VALUE_TYPE *);
80 static arm_cc get_arm_condition_code (rtx);
81 static HOST_WIDE_INT int_log2 (HOST_WIDE_INT);
82 static rtx is_jump_table (rtx);
83 static const char *output_multi_immediate (rtx *, const char *, const char *,
84 int, HOST_WIDE_INT);
85 static void print_multi_reg (FILE *, const char *, int, int);
86 static const char *shift_op (rtx, HOST_WIDE_INT *);
87 static struct machine_function *arm_init_machine_status (void);
88 static int number_of_first_bit_set (int);
89 static void replace_symbols_in_block (tree, rtx, rtx);
90 static void thumb_exit (FILE *, int);
91 static void thumb_pushpop (FILE *, int, int, int *, int);
92 static rtx is_jump_table (rtx);
93 static HOST_WIDE_INT get_jump_table_size (rtx);
94 static Mnode *move_minipool_fix_forward_ref (Mnode *, Mnode *, HOST_WIDE_INT);
95 static Mnode *add_minipool_forward_ref (Mfix *);
96 static Mnode *move_minipool_fix_backward_ref (Mnode *, Mnode *, HOST_WIDE_INT);
97 static Mnode *add_minipool_backward_ref (Mfix *);
98 static void assign_minipool_offsets (Mfix *);
99 static void arm_print_value (FILE *, rtx);
100 static void dump_minipool (rtx);
101 static int arm_barrier_cost (rtx);
102 static Mfix *create_fix_barrier (Mfix *, HOST_WIDE_INT);
103 static void push_minipool_barrier (rtx, HOST_WIDE_INT);
104 static void push_minipool_fix (rtx, HOST_WIDE_INT, rtx *, enum machine_mode,
105 rtx);
106 static void arm_reorg (void);
107 static bool note_invalid_constants (rtx, HOST_WIDE_INT, int);
108 static int current_file_function_operand (rtx);
109 static unsigned long arm_compute_save_reg0_reg12_mask (void);
110 static unsigned long arm_compute_save_reg_mask (void);
111 static unsigned long arm_isr_value (tree);
112 static unsigned long arm_compute_func_type (void);
113 static tree arm_handle_fndecl_attribute (tree *, tree, tree, int, bool *);
114 static tree arm_handle_isr_attribute (tree *, tree, tree, int, bool *);
115 static void arm_output_function_epilogue (FILE *, HOST_WIDE_INT);
116 static void arm_output_function_prologue (FILE *, HOST_WIDE_INT);
117 static void thumb_output_function_prologue (FILE *, HOST_WIDE_INT);
118 static int arm_comp_type_attributes (tree, tree);
119 static void arm_set_default_type_attributes (tree);
120 static int arm_adjust_cost (rtx, rtx, rtx, int);
121 static int count_insns_for_constant (HOST_WIDE_INT, int);
122 static int arm_get_strip_length (int);
123 static bool arm_function_ok_for_sibcall (tree, tree);
124 static void arm_internal_label (FILE *, const char *, unsigned long);
125 static void arm_output_mi_thunk (FILE *, tree, HOST_WIDE_INT, HOST_WIDE_INT,
126 tree);
127 static int arm_rtx_costs_1 (rtx, enum rtx_code, enum rtx_code);
128 static bool arm_slowmul_rtx_costs (rtx, int, int, int *);
129 static bool arm_fastmul_rtx_costs (rtx, int, int, int *);
130 static bool arm_xscale_rtx_costs (rtx, int, int, int *);
131 static bool arm_9e_rtx_costs (rtx, int, int, int *);
132 static int arm_address_cost (rtx);
133 static bool arm_memory_load_p (rtx);
134 static bool arm_cirrus_insn_p (rtx);
135 static void cirrus_reorg (rtx);
136 static void arm_init_builtins (void);
137 static rtx arm_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
138 static void arm_init_iwmmxt_builtins (void);
139 static rtx safe_vector_operand (rtx, enum machine_mode);
140 static rtx arm_expand_binop_builtin (enum insn_code, tree, rtx);
141 static rtx arm_expand_unop_builtin (enum insn_code, tree, rtx, int);
142 static rtx arm_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
143 static void emit_constant_insn (rtx cond, rtx pattern);
145 #ifdef OBJECT_FORMAT_ELF
146 static void arm_elf_asm_named_section (const char *, unsigned int);
147 #endif
148 #ifndef ARM_PE
149 static void arm_encode_section_info (tree, rtx, int);
150 #endif
151 #ifdef AOF_ASSEMBLER
152 static void aof_globalize_label (FILE *, const char *);
153 static void aof_dump_imports (FILE *);
154 static void aof_dump_pic_table (FILE *);
155 static void aof_file_start (void);
156 static void aof_file_end (void);
157 #endif
158 static rtx arm_struct_value_rtx (tree, int);
159 static void arm_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
160 tree, int *, int);
161 static bool arm_pass_by_reference (CUMULATIVE_ARGS *,
162 enum machine_mode, tree, bool);
163 static bool arm_promote_prototypes (tree);
164 static bool arm_default_short_enums (void);
165 static bool arm_align_anon_bitfield (void);
167 static tree arm_cxx_guard_type (void);
168 static bool arm_cxx_guard_mask_bit (void);
169 static tree arm_get_cookie_size (tree);
170 static bool arm_cookie_has_size (void);
171 static bool arm_cxx_cdtor_returns_this (void);
172 static void arm_init_libfuncs (void);
175 /* Initialize the GCC target structure. */
176 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
177 #undef TARGET_MERGE_DECL_ATTRIBUTES
178 #define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
179 #endif
181 #undef TARGET_ATTRIBUTE_TABLE
182 #define TARGET_ATTRIBUTE_TABLE arm_attribute_table
184 #ifdef AOF_ASSEMBLER
185 #undef TARGET_ASM_BYTE_OP
186 #define TARGET_ASM_BYTE_OP "\tDCB\t"
187 #undef TARGET_ASM_ALIGNED_HI_OP
188 #define TARGET_ASM_ALIGNED_HI_OP "\tDCW\t"
189 #undef TARGET_ASM_ALIGNED_SI_OP
190 #define TARGET_ASM_ALIGNED_SI_OP "\tDCD\t"
191 #undef TARGET_ASM_GLOBALIZE_LABEL
192 #define TARGET_ASM_GLOBALIZE_LABEL aof_globalize_label
193 #undef TARGET_ASM_FILE_START
194 #define TARGET_ASM_FILE_START aof_file_start
195 #undef TARGET_ASM_FILE_END
196 #define TARGET_ASM_FILE_END aof_file_end
197 #else
198 #undef TARGET_ASM_ALIGNED_SI_OP
199 #define TARGET_ASM_ALIGNED_SI_OP NULL
200 #undef TARGET_ASM_INTEGER
201 #define TARGET_ASM_INTEGER arm_assemble_integer
202 #endif
204 #undef TARGET_ASM_FUNCTION_PROLOGUE
205 #define TARGET_ASM_FUNCTION_PROLOGUE arm_output_function_prologue
207 #undef TARGET_ASM_FUNCTION_EPILOGUE
208 #define TARGET_ASM_FUNCTION_EPILOGUE arm_output_function_epilogue
210 #undef TARGET_COMP_TYPE_ATTRIBUTES
211 #define TARGET_COMP_TYPE_ATTRIBUTES arm_comp_type_attributes
213 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
214 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES arm_set_default_type_attributes
216 #undef TARGET_SCHED_ADJUST_COST
217 #define TARGET_SCHED_ADJUST_COST arm_adjust_cost
219 #undef TARGET_ENCODE_SECTION_INFO
220 #ifdef ARM_PE
221 #define TARGET_ENCODE_SECTION_INFO arm_pe_encode_section_info
222 #else
223 #define TARGET_ENCODE_SECTION_INFO arm_encode_section_info
224 #endif
226 #undef TARGET_STRIP_NAME_ENCODING
227 #define TARGET_STRIP_NAME_ENCODING arm_strip_name_encoding
229 #undef TARGET_ASM_INTERNAL_LABEL
230 #define TARGET_ASM_INTERNAL_LABEL arm_internal_label
232 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
233 #define TARGET_FUNCTION_OK_FOR_SIBCALL arm_function_ok_for_sibcall
235 #undef TARGET_ASM_OUTPUT_MI_THUNK
236 #define TARGET_ASM_OUTPUT_MI_THUNK arm_output_mi_thunk
237 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
238 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
240 /* This will be overridden in arm_override_options. */
241 #undef TARGET_RTX_COSTS
242 #define TARGET_RTX_COSTS arm_slowmul_rtx_costs
243 #undef TARGET_ADDRESS_COST
244 #define TARGET_ADDRESS_COST arm_address_cost
246 #undef TARGET_MACHINE_DEPENDENT_REORG
247 #define TARGET_MACHINE_DEPENDENT_REORG arm_reorg
249 #undef TARGET_INIT_BUILTINS
250 #define TARGET_INIT_BUILTINS arm_init_builtins
251 #undef TARGET_EXPAND_BUILTIN
252 #define TARGET_EXPAND_BUILTIN arm_expand_builtin
254 #undef TARGET_INIT_LIBFUNCS
255 #define TARGET_INIT_LIBFUNCS arm_init_libfuncs
257 #undef TARGET_PROMOTE_FUNCTION_ARGS
258 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
259 #undef TARGET_PROMOTE_FUNCTION_RETURN
260 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
261 #undef TARGET_PROMOTE_PROTOTYPES
262 #define TARGET_PROMOTE_PROTOTYPES arm_promote_prototypes
263 #undef TARGET_PASS_BY_REFERENCE
264 #define TARGET_PASS_BY_REFERENCE arm_pass_by_reference
266 #undef TARGET_STRUCT_VALUE_RTX
267 #define TARGET_STRUCT_VALUE_RTX arm_struct_value_rtx
269 #undef TARGET_SETUP_INCOMING_VARARGS
270 #define TARGET_SETUP_INCOMING_VARARGS arm_setup_incoming_varargs
272 #undef TARGET_DEFAULT_SHORT_ENUMS
273 #define TARGET_DEFAULT_SHORT_ENUMS arm_default_short_enums
275 #undef TARGET_ALIGN_ANON_BITFIELD
276 #define TARGET_ALIGN_ANON_BITFIELD arm_align_anon_bitfield
278 #undef TARGET_CXX_GUARD_TYPE
279 #define TARGET_CXX_GUARD_TYPE arm_cxx_guard_type
281 #undef TARGET_CXX_GUARD_MASK_BIT
282 #define TARGET_CXX_GUARD_MASK_BIT arm_cxx_guard_mask_bit
284 #undef TARGET_CXX_GET_COOKIE_SIZE
285 #define TARGET_CXX_GET_COOKIE_SIZE arm_get_cookie_size
287 #undef TARGET_CXX_COOKIE_HAS_SIZE
288 #define TARGET_CXX_COOKIE_HAS_SIZE arm_cookie_has_size
290 #undef TARGET_CXX_CDTOR_RETURNS_THIS
291 #define TARGET_CXX_CDTOR_RETURNS_THIS arm_cxx_cdtor_returns_this
293 struct gcc_target targetm = TARGET_INITIALIZER;
295 /* Obstack for minipool constant handling. */
296 static struct obstack minipool_obstack;
297 static char * minipool_startobj;
299 /* The maximum number of insns skipped which
300 will be conditionalised if possible. */
301 static int max_insns_skipped = 5;
303 extern FILE * asm_out_file;
305 /* True if we are currently building a constant table. */
306 int making_const_table;
308 /* Define the information needed to generate branch insns. This is
309 stored from the compare operation. */
310 rtx arm_compare_op0, arm_compare_op1;
312 /* The processor for which instructions should be scheduled. */
313 enum processor_type arm_tune = arm_none;
315 /* Which floating point model to use. */
316 enum arm_fp_model arm_fp_model;
318 /* Which floating point hardware is available. */
319 enum fputype arm_fpu_arch;
321 /* Which floating point hardware to schedule for. */
322 enum fputype arm_fpu_tune;
324 /* Whether to use floating point hardware. */
325 enum float_abi_type arm_float_abi;
327 /* Which ABI to use. */
328 enum arm_abi_type arm_abi;
330 /* Set by the -mfpu=... option. */
331 const char * target_fpu_name = NULL;
333 /* Set by the -mfpe=... option. */
334 const char * target_fpe_name = NULL;
336 /* Set by the -mfloat-abi=... option. */
337 const char * target_float_abi_name = NULL;
339 /* Set by the -mabi=... option. */
340 const char * target_abi_name = NULL;
342 /* Used to parse -mstructure_size_boundary command line option. */
343 const char * structure_size_string = NULL;
344 int arm_structure_size_boundary = DEFAULT_STRUCTURE_SIZE_BOUNDARY;
346 /* Bit values used to identify processor capabilities. */
347 #define FL_CO_PROC (1 << 0) /* Has external co-processor bus */
348 #define FL_ARCH3M (1 << 1) /* Extended multiply */
349 #define FL_MODE26 (1 << 2) /* 26-bit mode support */
350 #define FL_MODE32 (1 << 3) /* 32-bit mode support */
351 #define FL_ARCH4 (1 << 4) /* Architecture rel 4 */
352 #define FL_ARCH5 (1 << 5) /* Architecture rel 5 */
353 #define FL_THUMB (1 << 6) /* Thumb aware */
354 #define FL_LDSCHED (1 << 7) /* Load scheduling necessary */
355 #define FL_STRONG (1 << 8) /* StrongARM */
356 #define FL_ARCH5E (1 << 9) /* DSP extensions to v5 */
357 #define FL_XSCALE (1 << 10) /* XScale */
358 #define FL_CIRRUS (1 << 11) /* Cirrus/DSP. */
359 #define FL_ARCH6 (1 << 12) /* Architecture rel 6. Adds
360 media instructions. */
361 #define FL_VFPV2 (1 << 13) /* Vector Floating Point V2. */
363 #define FL_IWMMXT (1 << 29) /* XScale v2 or "Intel Wireless MMX technology". */
365 #define FL_FOR_ARCH2 0
366 #define FL_FOR_ARCH3 FL_MODE32
367 #define FL_FOR_ARCH3M (FL_FOR_ARCH3 | FL_ARCH3M)
368 #define FL_FOR_ARCH4 (FL_FOR_ARCH3M | FL_ARCH4)
369 #define FL_FOR_ARCH4T (FL_FOR_ARCH4 | FL_THUMB)
370 #define FL_FOR_ARCH5 (FL_FOR_ARCH4 | FL_ARCH5)
371 #define FL_FOR_ARCH5T (FL_FOR_ARCH5 | FL_THUMB)
372 #define FL_FOR_ARCH5E (FL_FOR_ARCH5 | FL_ARCH5E)
373 #define FL_FOR_ARCH5TE (FL_FOR_ARCH5E | FL_THUMB)
374 #define FL_FOR_ARCH5TEJ FL_FOR_ARCH5TE
375 #define FL_FOR_ARCH6 (FL_FOR_ARCH5TE | FL_ARCH6)
376 #define FL_FOR_ARCH6J FL_FOR_ARCH6
378 /* The bits in this mask specify which
379 instructions we are allowed to generate. */
380 static unsigned long insn_flags = 0;
382 /* The bits in this mask specify which instruction scheduling options should
383 be used. */
384 static unsigned long tune_flags = 0;
386 /* The following are used in the arm.md file as equivalents to bits
387 in the above two flag variables. */
389 /* Nonzero if this chip supports the ARM Architecture 3M extensions. */
390 int arm_arch3m = 0;
392 /* Nonzero if this chip supports the ARM Architecture 4 extensions. */
393 int arm_arch4 = 0;
395 /* Nonzero if this chip supports the ARM Architecture 4t extensions. */
396 int arm_arch4t = 0;
398 /* Nonzero if this chip supports the ARM Architecture 5 extensions. */
399 int arm_arch5 = 0;
401 /* Nonzero if this chip supports the ARM Architecture 5E extensions. */
402 int arm_arch5e = 0;
404 /* Nonzero if this chip supports the ARM Architecture 6 extensions. */
405 int arm_arch6 = 0;
407 /* Nonzero if this chip can benefit from load scheduling. */
408 int arm_ld_sched = 0;
410 /* Nonzero if this chip is a StrongARM. */
411 int arm_is_strong = 0;
413 /* Nonzero if this chip is a Cirrus variant. */
414 int arm_arch_cirrus = 0;
416 /* Nonzero if this chip supports Intel Wireless MMX technology. */
417 int arm_arch_iwmmxt = 0;
419 /* Nonzero if this chip is an XScale. */
420 int arm_arch_xscale = 0;
422 /* Nonzero if tuning for XScale */
423 int arm_tune_xscale = 0;
425 /* Nonzero if this chip is an ARM6 or an ARM7. */
426 int arm_is_6_or_7 = 0;
428 /* Nonzero if generating Thumb instructions. */
429 int thumb_code = 0;
431 /* Nonzero if we should define __THUMB_INTERWORK__ in the
432 preprocessor.
433 XXX This is a bit of a hack, it's intended to help work around
434 problems in GLD which doesn't understand that armv5t code is
435 interworking clean. */
436 int arm_cpp_interwork = 0;
438 /* In case of a PRE_INC, POST_INC, PRE_DEC, POST_DEC memory reference, we
439 must report the mode of the memory reference from PRINT_OPERAND to
440 PRINT_OPERAND_ADDRESS. */
441 enum machine_mode output_memory_reference_mode;
443 /* The register number to be used for the PIC offset register. */
444 const char * arm_pic_register_string = NULL;
445 int arm_pic_register = INVALID_REGNUM;
447 /* Set to 1 when a return insn is output, this means that the epilogue
448 is not needed. */
449 int return_used_this_function;
451 /* Set to 1 after arm_reorg has started. Reset to start at the start of
452 the next function. */
453 static int after_arm_reorg = 0;
455 /* The maximum number of insns to be used when loading a constant. */
456 static int arm_constant_limit = 3;
458 /* For an explanation of these variables, see final_prescan_insn below. */
459 int arm_ccfsm_state;
460 enum arm_cond_code arm_current_cc;
461 rtx arm_target_insn;
462 int arm_target_label;
464 /* The condition codes of the ARM, and the inverse function. */
465 static const char * const arm_condition_codes[] =
467 "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc",
468 "hi", "ls", "ge", "lt", "gt", "le", "al", "nv"
471 #define streq(string1, string2) (strcmp (string1, string2) == 0)
473 /* Initialization code. */
475 struct processors
477 const char *const name;
478 enum processor_type core;
479 const char *arch;
480 const unsigned long flags;
481 bool (* rtx_costs) (rtx, int, int, int *);
484 /* Not all of these give usefully different compilation alternatives,
485 but there is no simple way of generalizing them. */
486 static const struct processors all_cores[] =
488 /* ARM Cores */
489 #define ARM_CORE(NAME, ARCH, FLAGS, COSTS) \
490 {#NAME, arm_none, #ARCH, FLAGS | FL_FOR_ARCH##ARCH, arm_##COSTS##_rtx_costs},
491 #include "arm-cores.def"
492 #undef ARM_CORE
493 {NULL, arm_none, NULL, 0, NULL}
496 static const struct processors all_architectures[] =
498 /* ARM Architectures */
499 /* We don't specify rtx_costs here as it will be figured out
500 from the core. */
502 {"armv2", arm2, "2", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH2, NULL},
503 {"armv2a", arm2, "2", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH2, NULL},
504 {"armv3", arm6, "3", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH3, NULL},
505 {"armv3m", arm7m, "3M", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH3M, NULL},
506 {"armv4", arm7tdmi, "4", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH4, NULL},
507 /* Strictly, FL_MODE26 is a permitted option for v4t, but there are no
508 implementations that support it, so we will leave it out for now. */
509 {"armv4t", arm7tdmi, "4T", FL_CO_PROC | FL_FOR_ARCH4T, NULL},
510 {"armv5", arm10tdmi, "5", FL_CO_PROC | FL_FOR_ARCH5, NULL},
511 {"armv5t", arm10tdmi, "5T", FL_CO_PROC | FL_FOR_ARCH5T, NULL},
512 {"armv5e", arm1026ejs, "5E", FL_CO_PROC | FL_FOR_ARCH5E, NULL},
513 {"armv5te", arm1026ejs, "5TE", FL_CO_PROC | FL_FOR_ARCH5TE, NULL},
514 {"armv6", arm1136js, "6", FL_CO_PROC | FL_FOR_ARCH6, NULL},
515 {"armv6j", arm1136js, "6J", FL_CO_PROC | FL_FOR_ARCH6J, NULL},
516 {"ep9312", ep9312, "4T", FL_LDSCHED | FL_CIRRUS | FL_FOR_ARCH4, NULL},
517 {"iwmmxt", iwmmxt, "5TE", FL_LDSCHED | FL_STRONG | FL_FOR_ARCH5TE | FL_XSCALE | FL_IWMMXT , NULL},
518 {NULL, arm_none, NULL, 0 , NULL}
521 /* This is a magic structure. The 'string' field is magically filled in
522 with a pointer to the value specified by the user on the command line
523 assuming that the user has specified such a value. */
525 struct arm_cpu_select arm_select[] =
527 /* string name processors */
528 { NULL, "-mcpu=", all_cores },
529 { NULL, "-march=", all_architectures },
530 { NULL, "-mtune=", all_cores }
534 /* The name of the proprocessor macro to define for this architecture. */
536 char arm_arch_name[] = "__ARM_ARCH_0UNK__";
538 struct fpu_desc
540 const char * name;
541 enum fputype fpu;
545 /* Available values for for -mfpu=. */
547 static const struct fpu_desc all_fpus[] =
549 {"fpa", FPUTYPE_FPA},
550 {"fpe2", FPUTYPE_FPA_EMU2},
551 {"fpe3", FPUTYPE_FPA_EMU2},
552 {"maverick", FPUTYPE_MAVERICK},
553 {"vfp", FPUTYPE_VFP}
557 /* Floating point models used by the different hardware.
558 See fputype in arm.h. */
560 static const enum fputype fp_model_for_fpu[] =
562 /* No FP hardware. */
563 ARM_FP_MODEL_UNKNOWN, /* FPUTYPE_NONE */
564 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA */
565 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA_EMU2 */
566 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA_EMU3 */
567 ARM_FP_MODEL_MAVERICK, /* FPUTYPE_MAVERICK */
568 ARM_FP_MODEL_VFP /* FPUTYPE_VFP */
572 struct float_abi
574 const char * name;
575 enum float_abi_type abi_type;
579 /* Available values for -mfloat-abi=. */
581 static const struct float_abi all_float_abis[] =
583 {"soft", ARM_FLOAT_ABI_SOFT},
584 {"softfp", ARM_FLOAT_ABI_SOFTFP},
585 {"hard", ARM_FLOAT_ABI_HARD}
589 struct abi_name
591 const char *name;
592 enum arm_abi_type abi_type;
596 /* Available values for -mabi=. */
598 static const struct abi_name arm_all_abis[] =
600 {"apcs-gnu", ARM_ABI_APCS},
601 {"atpcs", ARM_ABI_ATPCS},
602 {"aapcs", ARM_ABI_AAPCS},
603 {"iwmmxt", ARM_ABI_IWMMXT}
606 /* Return the number of bits set in VALUE. */
607 static unsigned
608 bit_count (unsigned long value)
610 unsigned long count = 0;
612 while (value)
614 count++;
615 value &= value - 1; /* Clear the least-significant set bit. */
618 return count;
621 /* Set up library functions uqniue to ARM. */
623 static void
624 arm_init_libfuncs (void)
626 /* There are no special library functions unless we are using the
627 ARM BPABI. */
628 if (!TARGET_BPABI)
629 return;
631 /* The functions below are described in Section 4 of the "Run-Time
632 ABI for the ARM architecture", Version 1.0. */
634 /* Double-precision floating-point arithmetic. Table 2. */
635 set_optab_libfunc (add_optab, DFmode, "__aeabi_dadd");
636 set_optab_libfunc (sdiv_optab, DFmode, "__aeabi_ddiv");
637 set_optab_libfunc (smul_optab, DFmode, "__aeabi_dmul");
638 set_optab_libfunc (neg_optab, DFmode, "__aeabi_dneg");
639 set_optab_libfunc (sub_optab, DFmode, "__aeabi_dsub");
641 /* Double-precision comparisions. Table 3. */
642 set_optab_libfunc (eq_optab, DFmode, "__aeabi_dcmpeq");
643 set_optab_libfunc (ne_optab, DFmode, NULL);
644 set_optab_libfunc (lt_optab, DFmode, "__aeabi_dcmplt");
645 set_optab_libfunc (le_optab, DFmode, "__aeabi_dcmple");
646 set_optab_libfunc (ge_optab, DFmode, "__aeabi_dcmpge");
647 set_optab_libfunc (gt_optab, DFmode, "__aeabi_dcmpgt");
648 set_optab_libfunc (unord_optab, DFmode, "__aeabi_dcmpun");
650 /* Single-precision floating-point arithmetic. Table 4. */
651 set_optab_libfunc (add_optab, SFmode, "__aeabi_fadd");
652 set_optab_libfunc (sdiv_optab, SFmode, "__aeabi_fdiv");
653 set_optab_libfunc (smul_optab, SFmode, "__aeabi_fmul");
654 set_optab_libfunc (neg_optab, SFmode, "__aeabi_fneg");
655 set_optab_libfunc (sub_optab, SFmode, "__aeabi_fsub");
657 /* Single-precision comparisions. Table 5. */
658 set_optab_libfunc (eq_optab, SFmode, "__aeabi_fcmpeq");
659 set_optab_libfunc (ne_optab, SFmode, NULL);
660 set_optab_libfunc (lt_optab, SFmode, "__aeabi_fcmplt");
661 set_optab_libfunc (le_optab, SFmode, "__aeabi_fcmple");
662 set_optab_libfunc (ge_optab, SFmode, "__aeabi_fcmpge");
663 set_optab_libfunc (gt_optab, SFmode, "__aeabi_fcmpgt");
664 set_optab_libfunc (unord_optab, SFmode, "__aeabi_fcmpun");
666 /* Floating-point to integer conversions. Table 6. */
667 set_conv_libfunc (sfix_optab, SImode, DFmode, "__aeabi_d2iz");
668 set_conv_libfunc (ufix_optab, SImode, DFmode, "__aeabi_d2uiz");
669 set_conv_libfunc (sfix_optab, DImode, DFmode, "__aeabi_d2lz");
670 set_conv_libfunc (ufix_optab, DImode, DFmode, "__aeabi_d2ulz");
671 set_conv_libfunc (sfix_optab, SImode, SFmode, "__aeabi_f2iz");
672 set_conv_libfunc (ufix_optab, SImode, SFmode, "__aeabi_f2uiz");
673 set_conv_libfunc (sfix_optab, DImode, SFmode, "__aeabi_f2lz");
674 set_conv_libfunc (ufix_optab, DImode, SFmode, "__aeabi_f2ulz");
676 /* Conversions between floating types. Table 7. */
677 set_conv_libfunc (trunc_optab, SFmode, DFmode, "__aeabi_d2f");
678 set_conv_libfunc (sext_optab, DFmode, SFmode, "__aeabi_f2d");
680 /* Integer to floating-point converisons. Table 8. */
681 set_conv_libfunc (sfloat_optab, DFmode, SImode, "__aeabi_i2d");
682 set_conv_libfunc (ufloat_optab, DFmode, SImode, "__aeabi_ui2d");
683 set_conv_libfunc (sfloat_optab, DFmode, DImode, "__aeabi_l2d");
684 set_conv_libfunc (ufloat_optab, DFmode, DImode, "__aeabi_ul2d");
685 set_conv_libfunc (sfloat_optab, SFmode, SImode, "__aeabi_i2f");
686 set_conv_libfunc (ufloat_optab, SFmode, SImode, "__aeabi_ui2f");
687 set_conv_libfunc (sfloat_optab, SFmode, DImode, "__aeabi_l2f");
688 set_conv_libfunc (ufloat_optab, SFmode, DImode, "__aeabi_ul2f");
690 /* Long long. Table 9. */
691 set_optab_libfunc (smul_optab, DImode, "__aeabi_lmul");
692 set_optab_libfunc (sdivmod_optab, DImode, "__aeabi_ldivmod");
693 set_optab_libfunc (udivmod_optab, DImode, "__aeabi_uldivmod");
694 set_optab_libfunc (ashl_optab, DImode, "__aeabi_llsl");
695 set_optab_libfunc (lshr_optab, DImode, "__aeabi_llsr");
696 set_optab_libfunc (ashr_optab, DImode, "__aeabi_lasr");
697 set_optab_libfunc (cmp_optab, DImode, "__aeabi_lcmp");
698 set_optab_libfunc (ucmp_optab, DImode, "__aeabi_ulcmp");
700 /* Integer (32/32->32) division. \S 4.3.1. */
701 set_optab_libfunc (sdivmod_optab, SImode, "__aeabi_idivmod");
702 set_optab_libfunc (udivmod_optab, SImode, "__aeabi_uidivmod");
704 /* The divmod functions are designed so that they can be used for
705 plain division, even though they return both the quotient and the
706 remainder. The quotient is returned in the usual location (i.e.,
707 r0 for SImode, {r0, r1} for DImode), just as would be expected
708 for an ordinary division routine. Because the AAPCS calling
709 conventions specify that all of { r0, r1, r2, r3 } are
710 callee-saved registers, there is no need to tell the compiler
711 explicitly that those registers are clobbered by these
712 routines. */
713 set_optab_libfunc (sdiv_optab, DImode, "__aeabi_ldivmod");
714 set_optab_libfunc (udiv_optab, DImode, "__aeabi_uldivmod");
715 set_optab_libfunc (sdiv_optab, SImode, "__aeabi_idivmod");
716 set_optab_libfunc (udiv_optab, SImode, "__aeabi_uidivmod");
719 /* Fix up any incompatible options that the user has specified.
720 This has now turned into a maze. */
721 void
722 arm_override_options (void)
724 unsigned i;
726 /* Set up the flags based on the cpu/architecture selected by the user. */
727 for (i = ARRAY_SIZE (arm_select); i--;)
729 struct arm_cpu_select * ptr = arm_select + i;
731 if (ptr->string != NULL && ptr->string[0] != '\0')
733 const struct processors * sel;
735 for (sel = ptr->processors; sel->name != NULL; sel++)
736 if (streq (ptr->string, sel->name))
738 /* Set the architecture define. */
739 if (i != 2)
740 sprintf (arm_arch_name, "__ARM_ARCH_%s__", sel->arch);
742 /* Determine the processor core for which we should
743 tune code-generation. */
744 if (/* -mcpu= is a sensible default. */
745 i == 0
746 /* If -march= is used, and -mcpu= has not been used,
747 assume that we should tune for a representative
748 CPU from that architecture. */
749 || i == 1
750 /* -mtune= overrides -mcpu= and -march=. */
751 || i == 2)
752 arm_tune = (enum processor_type) (sel - ptr->processors);
754 if (i != 2)
756 /* If we have been given an architecture and a processor
757 make sure that they are compatible. We only generate
758 a warning though, and we prefer the CPU over the
759 architecture. */
760 if (insn_flags != 0 && (insn_flags ^ sel->flags))
761 warning ("switch -mcpu=%s conflicts with -march= switch",
762 ptr->string);
764 insn_flags = sel->flags;
767 break;
770 if (sel->name == NULL)
771 error ("bad value (%s) for %s switch", ptr->string, ptr->name);
775 /* If the user did not specify a processor, choose one for them. */
776 if (insn_flags == 0)
778 const struct processors * sel;
779 unsigned int sought;
780 enum processor_type cpu;
782 cpu = TARGET_CPU_DEFAULT;
783 if (cpu == arm_none)
785 #ifdef SUBTARGET_CPU_DEFAULT
786 /* Use the subtarget default CPU if none was specified by
787 configure. */
788 cpu = SUBTARGET_CPU_DEFAULT;
789 #endif
790 /* Default to ARM6. */
791 if (cpu == arm_none)
792 cpu = arm6;
794 sel = &all_cores[cpu];
796 insn_flags = sel->flags;
798 /* Now check to see if the user has specified some command line
799 switch that require certain abilities from the cpu. */
800 sought = 0;
802 if (TARGET_INTERWORK || TARGET_THUMB)
804 sought |= (FL_THUMB | FL_MODE32);
806 /* There are no ARM processors that support both APCS-26 and
807 interworking. Therefore we force FL_MODE26 to be removed
808 from insn_flags here (if it was set), so that the search
809 below will always be able to find a compatible processor. */
810 insn_flags &= ~FL_MODE26;
813 if (sought != 0 && ((sought & insn_flags) != sought))
815 /* Try to locate a CPU type that supports all of the abilities
816 of the default CPU, plus the extra abilities requested by
817 the user. */
818 for (sel = all_cores; sel->name != NULL; sel++)
819 if ((sel->flags & sought) == (sought | insn_flags))
820 break;
822 if (sel->name == NULL)
824 unsigned current_bit_count = 0;
825 const struct processors * best_fit = NULL;
827 /* Ideally we would like to issue an error message here
828 saying that it was not possible to find a CPU compatible
829 with the default CPU, but which also supports the command
830 line options specified by the programmer, and so they
831 ought to use the -mcpu=<name> command line option to
832 override the default CPU type.
834 If we cannot find a cpu that has both the
835 characteristics of the default cpu and the given
836 command line options we scan the array again looking
837 for a best match. */
838 for (sel = all_cores; sel->name != NULL; sel++)
839 if ((sel->flags & sought) == sought)
841 unsigned count;
843 count = bit_count (sel->flags & insn_flags);
845 if (count >= current_bit_count)
847 best_fit = sel;
848 current_bit_count = count;
852 if (best_fit == NULL)
853 abort ();
854 else
855 sel = best_fit;
858 insn_flags = sel->flags;
860 sprintf (arm_arch_name, "__ARM_ARCH_%s__", sel->arch);
861 if (arm_tune == arm_none)
862 arm_tune = (enum processor_type) (sel - all_cores);
865 /* The processor for which we should tune should now have been
866 chosen. */
867 if (arm_tune == arm_none)
868 abort ();
870 tune_flags = all_cores[(int)arm_tune].flags;
871 targetm.rtx_costs = all_cores[(int)arm_tune].rtx_costs;
873 /* Make sure that the processor choice does not conflict with any of the
874 other command line choices. */
875 if (TARGET_INTERWORK && !(insn_flags & FL_THUMB))
877 warning ("target CPU does not support interworking" );
878 target_flags &= ~ARM_FLAG_INTERWORK;
881 if (TARGET_THUMB && !(insn_flags & FL_THUMB))
883 warning ("target CPU does not support THUMB instructions");
884 target_flags &= ~ARM_FLAG_THUMB;
887 if (TARGET_APCS_FRAME && TARGET_THUMB)
889 /* warning ("ignoring -mapcs-frame because -mthumb was used"); */
890 target_flags &= ~ARM_FLAG_APCS_FRAME;
893 /* TARGET_BACKTRACE calls leaf_function_p, which causes a crash if done
894 from here where no function is being compiled currently. */
895 if ((target_flags & (THUMB_FLAG_LEAF_BACKTRACE | THUMB_FLAG_BACKTRACE))
896 && TARGET_ARM)
897 warning ("enabling backtrace support is only meaningful when compiling for the Thumb");
899 if (TARGET_ARM && TARGET_CALLEE_INTERWORKING)
900 warning ("enabling callee interworking support is only meaningful when compiling for the Thumb");
902 if (TARGET_ARM && TARGET_CALLER_INTERWORKING)
903 warning ("enabling caller interworking support is only meaningful when compiling for the Thumb");
905 if (TARGET_APCS_STACK && !TARGET_APCS_FRAME)
907 warning ("-mapcs-stack-check incompatible with -mno-apcs-frame");
908 target_flags |= ARM_FLAG_APCS_FRAME;
911 if (TARGET_POKE_FUNCTION_NAME)
912 target_flags |= ARM_FLAG_APCS_FRAME;
914 if (TARGET_APCS_REENT && flag_pic)
915 error ("-fpic and -mapcs-reent are incompatible");
917 if (TARGET_APCS_REENT)
918 warning ("APCS reentrant code not supported. Ignored");
920 /* If this target is normally configured to use APCS frames, warn if they
921 are turned off and debugging is turned on. */
922 if (TARGET_ARM
923 && write_symbols != NO_DEBUG
924 && !TARGET_APCS_FRAME
925 && (TARGET_DEFAULT & ARM_FLAG_APCS_FRAME))
926 warning ("-g with -mno-apcs-frame may not give sensible debugging");
928 /* If stack checking is disabled, we can use r10 as the PIC register,
929 which keeps r9 available. */
930 if (flag_pic)
931 arm_pic_register = TARGET_APCS_STACK ? 9 : 10;
933 if (TARGET_APCS_FLOAT)
934 warning ("passing floating point arguments in fp regs not yet supported");
936 /* Initialize boolean versions of the flags, for use in the arm.md file. */
937 arm_arch3m = (insn_flags & FL_ARCH3M) != 0;
938 arm_arch4 = (insn_flags & FL_ARCH4) != 0;
939 arm_arch4t = arm_arch4 & ((insn_flags & FL_THUMB) != 0);
940 arm_arch5 = (insn_flags & FL_ARCH5) != 0;
941 arm_arch5e = (insn_flags & FL_ARCH5E) != 0;
942 arm_arch6 = (insn_flags & FL_ARCH6) != 0;
943 arm_arch_xscale = (insn_flags & FL_XSCALE) != 0;
944 arm_arch_cirrus = (insn_flags & FL_CIRRUS) != 0;
946 arm_ld_sched = (tune_flags & FL_LDSCHED) != 0;
947 arm_is_strong = (tune_flags & FL_STRONG) != 0;
948 thumb_code = (TARGET_ARM == 0);
949 arm_is_6_or_7 = (((tune_flags & (FL_MODE26 | FL_MODE32))
950 && !(tune_flags & FL_ARCH4))) != 0;
951 arm_tune_xscale = (tune_flags & FL_XSCALE) != 0;
952 arm_arch_iwmmxt = (insn_flags & FL_IWMMXT) != 0;
954 /* V5 code we generate is completely interworking capable, so we turn off
955 TARGET_INTERWORK here to avoid many tests later on. */
957 /* XXX However, we must pass the right pre-processor defines to CPP
958 or GLD can get confused. This is a hack. */
959 if (TARGET_INTERWORK)
960 arm_cpp_interwork = 1;
962 if (arm_arch5)
963 target_flags &= ~ARM_FLAG_INTERWORK;
965 if (target_abi_name)
967 for (i = 0; i < ARRAY_SIZE (arm_all_abis); i++)
969 if (streq (arm_all_abis[i].name, target_abi_name))
971 arm_abi = arm_all_abis[i].abi_type;
972 break;
975 if (i == ARRAY_SIZE (arm_all_abis))
976 error ("invalid ABI option: -mabi=%s", target_abi_name);
978 else
979 arm_abi = ARM_DEFAULT_ABI;
981 if (TARGET_IWMMXT && !ARM_DOUBLEWORD_ALIGN)
982 error ("iwmmxt requires an AAPCS compatible ABI for proper operation");
984 if (TARGET_IWMMXT_ABI && !TARGET_IWMMXT)
985 error ("iwmmxt abi requires an iwmmxt capable cpu");
987 arm_fp_model = ARM_FP_MODEL_UNKNOWN;
988 if (target_fpu_name == NULL && target_fpe_name != NULL)
990 if (streq (target_fpe_name, "2"))
991 target_fpu_name = "fpe2";
992 else if (streq (target_fpe_name, "3"))
993 target_fpu_name = "fpe3";
994 else
995 error ("invalid floating point emulation option: -mfpe=%s",
996 target_fpe_name);
998 if (target_fpu_name != NULL)
1000 /* The user specified a FPU. */
1001 for (i = 0; i < ARRAY_SIZE (all_fpus); i++)
1003 if (streq (all_fpus[i].name, target_fpu_name))
1005 arm_fpu_arch = all_fpus[i].fpu;
1006 arm_fpu_tune = arm_fpu_arch;
1007 arm_fp_model = fp_model_for_fpu[arm_fpu_arch];
1008 break;
1011 if (arm_fp_model == ARM_FP_MODEL_UNKNOWN)
1012 error ("invalid floating point option: -mfpu=%s", target_fpu_name);
1014 else
1016 #ifdef FPUTYPE_DEFAULT
1017 /* Use the default if it is specified for this platform. */
1018 arm_fpu_arch = FPUTYPE_DEFAULT;
1019 arm_fpu_tune = FPUTYPE_DEFAULT;
1020 #else
1021 /* Pick one based on CPU type. */
1022 /* ??? Some targets assume FPA is the default.
1023 if ((insn_flags & FL_VFP) != 0)
1024 arm_fpu_arch = FPUTYPE_VFP;
1025 else
1027 if (arm_arch_cirrus)
1028 arm_fpu_arch = FPUTYPE_MAVERICK;
1029 else
1030 arm_fpu_arch = FPUTYPE_FPA_EMU2;
1031 #endif
1032 if (tune_flags & FL_CO_PROC && arm_fpu_arch == FPUTYPE_FPA_EMU2)
1033 arm_fpu_tune = FPUTYPE_FPA;
1034 else
1035 arm_fpu_tune = arm_fpu_arch;
1036 arm_fp_model = fp_model_for_fpu[arm_fpu_arch];
1037 if (arm_fp_model == ARM_FP_MODEL_UNKNOWN)
1038 abort ();
1041 if (target_float_abi_name != NULL)
1043 /* The user specified a FP ABI. */
1044 for (i = 0; i < ARRAY_SIZE (all_float_abis); i++)
1046 if (streq (all_float_abis[i].name, target_float_abi_name))
1048 arm_float_abi = all_float_abis[i].abi_type;
1049 break;
1052 if (i == ARRAY_SIZE (all_float_abis))
1053 error ("invalid floating point abi: -mfloat-abi=%s",
1054 target_float_abi_name);
1056 else
1058 /* Use soft-float target flag. */
1059 if (target_flags & ARM_FLAG_SOFT_FLOAT)
1060 arm_float_abi = ARM_FLOAT_ABI_SOFT;
1061 else
1062 arm_float_abi = ARM_FLOAT_ABI_HARD;
1065 if (arm_float_abi == ARM_FLOAT_ABI_SOFTFP)
1066 sorry ("-mfloat-abi=softfp");
1067 /* If soft-float is specified then don't use FPU. */
1068 if (TARGET_SOFT_FLOAT)
1069 arm_fpu_arch = FPUTYPE_NONE;
1071 /* For arm2/3 there is no need to do any scheduling if there is only
1072 a floating point emulator, or we are doing software floating-point. */
1073 if ((TARGET_SOFT_FLOAT
1074 || arm_fpu_tune == FPUTYPE_FPA_EMU2
1075 || arm_fpu_tune == FPUTYPE_FPA_EMU3)
1076 && (tune_flags & FL_MODE32) == 0)
1077 flag_schedule_insns = flag_schedule_insns_after_reload = 0;
1079 /* Override the default structure alignment for AAPCS ABI. */
1080 if (arm_abi == ARM_ABI_AAPCS)
1081 arm_structure_size_boundary = 8;
1083 if (structure_size_string != NULL)
1085 int size = strtol (structure_size_string, NULL, 0);
1087 if (size == 8 || size == 32
1088 || (ARM_DOUBLEWORD_ALIGN && size == 64))
1089 arm_structure_size_boundary = size;
1090 else
1091 warning ("structure size boundary can only be set to %s",
1092 ARM_DOUBLEWORD_ALIGN ? "8, 32 or 64": "8 or 32");
1095 if (arm_pic_register_string != NULL)
1097 int pic_register = decode_reg_name (arm_pic_register_string);
1099 if (!flag_pic)
1100 warning ("-mpic-register= is useless without -fpic");
1102 /* Prevent the user from choosing an obviously stupid PIC register. */
1103 else if (pic_register < 0 || call_used_regs[pic_register]
1104 || pic_register == HARD_FRAME_POINTER_REGNUM
1105 || pic_register == STACK_POINTER_REGNUM
1106 || pic_register >= PC_REGNUM)
1107 error ("unable to use '%s' for PIC register", arm_pic_register_string);
1108 else
1109 arm_pic_register = pic_register;
1112 if (TARGET_THUMB && flag_schedule_insns)
1114 /* Don't warn since it's on by default in -O2. */
1115 flag_schedule_insns = 0;
1118 if (optimize_size)
1120 /* There's some dispute as to whether this should be 1 or 2. However,
1121 experiments seem to show that in pathological cases a setting of
1122 1 degrades less severely than a setting of 2. This could change if
1123 other parts of the compiler change their behavior. */
1124 arm_constant_limit = 1;
1126 /* If optimizing for size, bump the number of instructions that we
1127 are prepared to conditionally execute (even on a StrongARM). */
1128 max_insns_skipped = 6;
1130 else
1132 /* For processors with load scheduling, it never costs more than
1133 2 cycles to load a constant, and the load scheduler may well
1134 reduce that to 1. */
1135 if (tune_flags & FL_LDSCHED)
1136 arm_constant_limit = 1;
1138 /* On XScale the longer latency of a load makes it more difficult
1139 to achieve a good schedule, so it's faster to synthesize
1140 constants that can be done in two insns. */
1141 if (arm_tune_xscale)
1142 arm_constant_limit = 2;
1144 /* StrongARM has early execution of branches, so a sequence
1145 that is worth skipping is shorter. */
1146 if (arm_is_strong)
1147 max_insns_skipped = 3;
1150 /* Register global variables with the garbage collector. */
1151 arm_add_gc_roots ();
1154 static void
1155 arm_add_gc_roots (void)
1157 gcc_obstack_init(&minipool_obstack);
1158 minipool_startobj = (char *) obstack_alloc (&minipool_obstack, 0);
1161 /* A table of known ARM exception types.
1162 For use with the interrupt function attribute. */
1164 typedef struct
1166 const char *const arg;
1167 const unsigned long return_value;
1169 isr_attribute_arg;
1171 static const isr_attribute_arg isr_attribute_args [] =
1173 { "IRQ", ARM_FT_ISR },
1174 { "irq", ARM_FT_ISR },
1175 { "FIQ", ARM_FT_FIQ },
1176 { "fiq", ARM_FT_FIQ },
1177 { "ABORT", ARM_FT_ISR },
1178 { "abort", ARM_FT_ISR },
1179 { "ABORT", ARM_FT_ISR },
1180 { "abort", ARM_FT_ISR },
1181 { "UNDEF", ARM_FT_EXCEPTION },
1182 { "undef", ARM_FT_EXCEPTION },
1183 { "SWI", ARM_FT_EXCEPTION },
1184 { "swi", ARM_FT_EXCEPTION },
1185 { NULL, ARM_FT_NORMAL }
1188 /* Returns the (interrupt) function type of the current
1189 function, or ARM_FT_UNKNOWN if the type cannot be determined. */
1191 static unsigned long
1192 arm_isr_value (tree argument)
1194 const isr_attribute_arg * ptr;
1195 const char * arg;
1197 /* No argument - default to IRQ. */
1198 if (argument == NULL_TREE)
1199 return ARM_FT_ISR;
1201 /* Get the value of the argument. */
1202 if (TREE_VALUE (argument) == NULL_TREE
1203 || TREE_CODE (TREE_VALUE (argument)) != STRING_CST)
1204 return ARM_FT_UNKNOWN;
1206 arg = TREE_STRING_POINTER (TREE_VALUE (argument));
1208 /* Check it against the list of known arguments. */
1209 for (ptr = isr_attribute_args; ptr->arg != NULL; ptr++)
1210 if (streq (arg, ptr->arg))
1211 return ptr->return_value;
1213 /* An unrecognized interrupt type. */
1214 return ARM_FT_UNKNOWN;
1217 /* Computes the type of the current function. */
1219 static unsigned long
1220 arm_compute_func_type (void)
1222 unsigned long type = ARM_FT_UNKNOWN;
1223 tree a;
1224 tree attr;
1226 if (TREE_CODE (current_function_decl) != FUNCTION_DECL)
1227 abort ();
1229 /* Decide if the current function is volatile. Such functions
1230 never return, and many memory cycles can be saved by not storing
1231 register values that will never be needed again. This optimization
1232 was added to speed up context switching in a kernel application. */
1233 if (optimize > 0
1234 && TREE_NOTHROW (current_function_decl)
1235 && TREE_THIS_VOLATILE (current_function_decl))
1236 type |= ARM_FT_VOLATILE;
1238 if (cfun->static_chain_decl != NULL)
1239 type |= ARM_FT_NESTED;
1241 attr = DECL_ATTRIBUTES (current_function_decl);
1243 a = lookup_attribute ("naked", attr);
1244 if (a != NULL_TREE)
1245 type |= ARM_FT_NAKED;
1247 a = lookup_attribute ("isr", attr);
1248 if (a == NULL_TREE)
1249 a = lookup_attribute ("interrupt", attr);
1251 if (a == NULL_TREE)
1252 type |= TARGET_INTERWORK ? ARM_FT_INTERWORKED : ARM_FT_NORMAL;
1253 else
1254 type |= arm_isr_value (TREE_VALUE (a));
1256 return type;
1259 /* Returns the type of the current function. */
1261 unsigned long
1262 arm_current_func_type (void)
1264 if (ARM_FUNC_TYPE (cfun->machine->func_type) == ARM_FT_UNKNOWN)
1265 cfun->machine->func_type = arm_compute_func_type ();
1267 return cfun->machine->func_type;
1270 /* Return 1 if it is possible to return using a single instruction.
1271 If SIBLING is non-null, this is a test for a return before a sibling
1272 call. SIBLING is the call insn, so we can examine its register usage. */
1275 use_return_insn (int iscond, rtx sibling)
1277 int regno;
1278 unsigned int func_type;
1279 unsigned long saved_int_regs;
1280 unsigned HOST_WIDE_INT stack_adjust;
1281 arm_stack_offsets *offsets;
1283 /* Never use a return instruction before reload has run. */
1284 if (!reload_completed)
1285 return 0;
1287 func_type = arm_current_func_type ();
1289 /* Naked functions and volatile functions need special
1290 consideration. */
1291 if (func_type & (ARM_FT_VOLATILE | ARM_FT_NAKED))
1292 return 0;
1294 /* So do interrupt functions that use the frame pointer. */
1295 if (IS_INTERRUPT (func_type) && frame_pointer_needed)
1296 return 0;
1298 offsets = arm_get_frame_offsets ();
1299 stack_adjust = offsets->outgoing_args - offsets->saved_regs;
1301 /* As do variadic functions. */
1302 if (current_function_pretend_args_size
1303 || cfun->machine->uses_anonymous_args
1304 /* Or if the function calls __builtin_eh_return () */
1305 || current_function_calls_eh_return
1306 /* Or if the function calls alloca */
1307 || current_function_calls_alloca
1308 /* Or if there is a stack adjustment. However, if the stack pointer
1309 is saved on the stack, we can use a pre-incrementing stack load. */
1310 || !(stack_adjust == 0 || (frame_pointer_needed && stack_adjust == 4)))
1311 return 0;
1313 saved_int_regs = arm_compute_save_reg_mask ();
1315 /* Unfortunately, the insn
1317 ldmib sp, {..., sp, ...}
1319 triggers a bug on most SA-110 based devices, such that the stack
1320 pointer won't be correctly restored if the instruction takes a
1321 page fault. We work around this problem by popping r3 along with
1322 the other registers, since that is never slower than executing
1323 another instruction.
1325 We test for !arm_arch5 here, because code for any architecture
1326 less than this could potentially be run on one of the buggy
1327 chips. */
1328 if (stack_adjust == 4 && !arm_arch5)
1330 /* Validate that r3 is a call-clobbered register (always true in
1331 the default abi) ... */
1332 if (!call_used_regs[3])
1333 return 0;
1335 /* ... that it isn't being used for a return value (always true
1336 until we implement return-in-regs), or for a tail-call
1337 argument ... */
1338 if (sibling)
1340 if (GET_CODE (sibling) != CALL_INSN)
1341 abort ();
1343 if (find_regno_fusage (sibling, USE, 3))
1344 return 0;
1347 /* ... and that there are no call-saved registers in r0-r2
1348 (always true in the default ABI). */
1349 if (saved_int_regs & 0x7)
1350 return 0;
1353 /* Can't be done if interworking with Thumb, and any registers have been
1354 stacked. */
1355 if (TARGET_INTERWORK && saved_int_regs != 0)
1356 return 0;
1358 /* On StrongARM, conditional returns are expensive if they aren't
1359 taken and multiple registers have been stacked. */
1360 if (iscond && arm_is_strong)
1362 /* Conditional return when just the LR is stored is a simple
1363 conditional-load instruction, that's not expensive. */
1364 if (saved_int_regs != 0 && saved_int_regs != (1 << LR_REGNUM))
1365 return 0;
1367 if (flag_pic && regs_ever_live[PIC_OFFSET_TABLE_REGNUM])
1368 return 0;
1371 /* If there are saved registers but the LR isn't saved, then we need
1372 two instructions for the return. */
1373 if (saved_int_regs && !(saved_int_regs & (1 << LR_REGNUM)))
1374 return 0;
1376 /* Can't be done if any of the FPA regs are pushed,
1377 since this also requires an insn. */
1378 if (TARGET_HARD_FLOAT && TARGET_FPA)
1379 for (regno = FIRST_FPA_REGNUM; regno <= LAST_FPA_REGNUM; regno++)
1380 if (regs_ever_live[regno] && !call_used_regs[regno])
1381 return 0;
1383 /* Likewise VFP regs. */
1384 if (TARGET_HARD_FLOAT && TARGET_VFP)
1385 for (regno = FIRST_VFP_REGNUM; regno <= LAST_VFP_REGNUM; regno++)
1386 if (regs_ever_live[regno] && !call_used_regs[regno])
1387 return 0;
1389 if (TARGET_REALLY_IWMMXT)
1390 for (regno = FIRST_IWMMXT_REGNUM; regno <= LAST_IWMMXT_REGNUM; regno++)
1391 if (regs_ever_live[regno] && ! call_used_regs [regno])
1392 return 0;
1394 return 1;
1397 /* Return TRUE if int I is a valid immediate ARM constant. */
1400 const_ok_for_arm (HOST_WIDE_INT i)
1402 unsigned HOST_WIDE_INT mask = ~(unsigned HOST_WIDE_INT)0xFF;
1404 /* For machines with >32 bit HOST_WIDE_INT, the bits above bit 31 must
1405 be all zero, or all one. */
1406 if ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff) != 0
1407 && ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff)
1408 != ((~(unsigned HOST_WIDE_INT) 0)
1409 & ~(unsigned HOST_WIDE_INT) 0xffffffff)))
1410 return FALSE;
1412 /* Fast return for 0 and powers of 2 */
1413 if ((i & (i - 1)) == 0)
1414 return TRUE;
1418 if ((i & mask & (unsigned HOST_WIDE_INT) 0xffffffff) == 0)
1419 return TRUE;
1420 mask =
1421 (mask << 2) | ((mask & (unsigned HOST_WIDE_INT) 0xffffffff)
1422 >> (32 - 2)) | ~(unsigned HOST_WIDE_INT) 0xffffffff;
1424 while (mask != ~(unsigned HOST_WIDE_INT) 0xFF);
1426 return FALSE;
1429 /* Return true if I is a valid constant for the operation CODE. */
1430 static int
1431 const_ok_for_op (HOST_WIDE_INT i, enum rtx_code code)
1433 if (const_ok_for_arm (i))
1434 return 1;
1436 switch (code)
1438 case PLUS:
1439 return const_ok_for_arm (ARM_SIGN_EXTEND (-i));
1441 case MINUS: /* Should only occur with (MINUS I reg) => rsb */
1442 case XOR:
1443 case IOR:
1444 return 0;
1446 case AND:
1447 return const_ok_for_arm (ARM_SIGN_EXTEND (~i));
1449 default:
1450 abort ();
1454 /* Emit a sequence of insns to handle a large constant.
1455 CODE is the code of the operation required, it can be any of SET, PLUS,
1456 IOR, AND, XOR, MINUS;
1457 MODE is the mode in which the operation is being performed;
1458 VAL is the integer to operate on;
1459 SOURCE is the other operand (a register, or a null-pointer for SET);
1460 SUBTARGETS means it is safe to create scratch registers if that will
1461 either produce a simpler sequence, or we will want to cse the values.
1462 Return value is the number of insns emitted. */
1465 arm_split_constant (enum rtx_code code, enum machine_mode mode, rtx insn,
1466 HOST_WIDE_INT val, rtx target, rtx source, int subtargets)
1468 rtx cond;
1470 if (insn && GET_CODE (PATTERN (insn)) == COND_EXEC)
1471 cond = COND_EXEC_TEST (PATTERN (insn));
1472 else
1473 cond = NULL_RTX;
1475 if (subtargets || code == SET
1476 || (GET_CODE (target) == REG && GET_CODE (source) == REG
1477 && REGNO (target) != REGNO (source)))
1479 /* After arm_reorg has been called, we can't fix up expensive
1480 constants by pushing them into memory so we must synthesize
1481 them in-line, regardless of the cost. This is only likely to
1482 be more costly on chips that have load delay slots and we are
1483 compiling without running the scheduler (so no splitting
1484 occurred before the final instruction emission).
1486 Ref: gcc -O1 -mcpu=strongarm gcc.c-torture/compile/980506-2.c
1488 if (!after_arm_reorg
1489 && !cond
1490 && (arm_gen_constant (code, mode, NULL_RTX, val, target, source,
1491 1, 0)
1492 > arm_constant_limit + (code != SET)))
1494 if (code == SET)
1496 /* Currently SET is the only monadic value for CODE, all
1497 the rest are diadic. */
1498 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (val)));
1499 return 1;
1501 else
1503 rtx temp = subtargets ? gen_reg_rtx (mode) : target;
1505 emit_insn (gen_rtx_SET (VOIDmode, temp, GEN_INT (val)));
1506 /* For MINUS, the value is subtracted from, since we never
1507 have subtraction of a constant. */
1508 if (code == MINUS)
1509 emit_insn (gen_rtx_SET (VOIDmode, target,
1510 gen_rtx_MINUS (mode, temp, source)));
1511 else
1512 emit_insn (gen_rtx_SET (VOIDmode, target,
1513 gen_rtx_fmt_ee (code, mode, source, temp)));
1514 return 2;
1519 return arm_gen_constant (code, mode, cond, val, target, source, subtargets,
1523 static int
1524 count_insns_for_constant (HOST_WIDE_INT remainder, int i)
1526 HOST_WIDE_INT temp1;
1527 int num_insns = 0;
1530 int end;
1532 if (i <= 0)
1533 i += 32;
1534 if (remainder & (3 << (i - 2)))
1536 end = i - 8;
1537 if (end < 0)
1538 end += 32;
1539 temp1 = remainder & ((0x0ff << end)
1540 | ((i < end) ? (0xff >> (32 - end)) : 0));
1541 remainder &= ~temp1;
1542 num_insns++;
1543 i -= 6;
1545 i -= 2;
1546 } while (remainder);
1547 return num_insns;
1550 /* Emit an instruction with the indicated PATTERN. If COND is
1551 non-NULL, conditionalize the execution of the instruction on COND
1552 being true. */
1554 static void
1555 emit_constant_insn (rtx cond, rtx pattern)
1557 if (cond)
1558 pattern = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (cond), pattern);
1559 emit_insn (pattern);
1562 /* As above, but extra parameter GENERATE which, if clear, suppresses
1563 RTL generation. */
1565 static int
1566 arm_gen_constant (enum rtx_code code, enum machine_mode mode, rtx cond,
1567 HOST_WIDE_INT val, rtx target, rtx source, int subtargets,
1568 int generate)
1570 int can_invert = 0;
1571 int can_negate = 0;
1572 int can_negate_initial = 0;
1573 int can_shift = 0;
1574 int i;
1575 int num_bits_set = 0;
1576 int set_sign_bit_copies = 0;
1577 int clear_sign_bit_copies = 0;
1578 int clear_zero_bit_copies = 0;
1579 int set_zero_bit_copies = 0;
1580 int insns = 0;
1581 unsigned HOST_WIDE_INT temp1, temp2;
1582 unsigned HOST_WIDE_INT remainder = val & 0xffffffff;
1584 /* Find out which operations are safe for a given CODE. Also do a quick
1585 check for degenerate cases; these can occur when DImode operations
1586 are split. */
1587 switch (code)
1589 case SET:
1590 can_invert = 1;
1591 can_shift = 1;
1592 can_negate = 1;
1593 break;
1595 case PLUS:
1596 can_negate = 1;
1597 can_negate_initial = 1;
1598 break;
1600 case IOR:
1601 if (remainder == 0xffffffff)
1603 if (generate)
1604 emit_constant_insn (cond,
1605 gen_rtx_SET (VOIDmode, target,
1606 GEN_INT (ARM_SIGN_EXTEND (val))));
1607 return 1;
1609 if (remainder == 0)
1611 if (reload_completed && rtx_equal_p (target, source))
1612 return 0;
1613 if (generate)
1614 emit_constant_insn (cond,
1615 gen_rtx_SET (VOIDmode, target, source));
1616 return 1;
1618 break;
1620 case AND:
1621 if (remainder == 0)
1623 if (generate)
1624 emit_constant_insn (cond,
1625 gen_rtx_SET (VOIDmode, target, const0_rtx));
1626 return 1;
1628 if (remainder == 0xffffffff)
1630 if (reload_completed && rtx_equal_p (target, source))
1631 return 0;
1632 if (generate)
1633 emit_constant_insn (cond,
1634 gen_rtx_SET (VOIDmode, target, source));
1635 return 1;
1637 can_invert = 1;
1638 break;
1640 case XOR:
1641 if (remainder == 0)
1643 if (reload_completed && rtx_equal_p (target, source))
1644 return 0;
1645 if (generate)
1646 emit_constant_insn (cond,
1647 gen_rtx_SET (VOIDmode, target, source));
1648 return 1;
1650 if (remainder == 0xffffffff)
1652 if (generate)
1653 emit_constant_insn (cond,
1654 gen_rtx_SET (VOIDmode, target,
1655 gen_rtx_NOT (mode, source)));
1656 return 1;
1659 /* We don't know how to handle this yet below. */
1660 abort ();
1662 case MINUS:
1663 /* We treat MINUS as (val - source), since (source - val) is always
1664 passed as (source + (-val)). */
1665 if (remainder == 0)
1667 if (generate)
1668 emit_constant_insn (cond,
1669 gen_rtx_SET (VOIDmode, target,
1670 gen_rtx_NEG (mode, source)));
1671 return 1;
1673 if (const_ok_for_arm (val))
1675 if (generate)
1676 emit_constant_insn (cond,
1677 gen_rtx_SET (VOIDmode, target,
1678 gen_rtx_MINUS (mode, GEN_INT (val),
1679 source)));
1680 return 1;
1682 can_negate = 1;
1684 break;
1686 default:
1687 abort ();
1690 /* If we can do it in one insn get out quickly. */
1691 if (const_ok_for_arm (val)
1692 || (can_negate_initial && const_ok_for_arm (-val))
1693 || (can_invert && const_ok_for_arm (~val)))
1695 if (generate)
1696 emit_constant_insn (cond,
1697 gen_rtx_SET (VOIDmode, target,
1698 (source
1699 ? gen_rtx_fmt_ee (code, mode, source,
1700 GEN_INT (val))
1701 : GEN_INT (val))));
1702 return 1;
1705 /* Calculate a few attributes that may be useful for specific
1706 optimizations. */
1707 for (i = 31; i >= 0; i--)
1709 if ((remainder & (1 << i)) == 0)
1710 clear_sign_bit_copies++;
1711 else
1712 break;
1715 for (i = 31; i >= 0; i--)
1717 if ((remainder & (1 << i)) != 0)
1718 set_sign_bit_copies++;
1719 else
1720 break;
1723 for (i = 0; i <= 31; i++)
1725 if ((remainder & (1 << i)) == 0)
1726 clear_zero_bit_copies++;
1727 else
1728 break;
1731 for (i = 0; i <= 31; i++)
1733 if ((remainder & (1 << i)) != 0)
1734 set_zero_bit_copies++;
1735 else
1736 break;
1739 switch (code)
1741 case SET:
1742 /* See if we can do this by sign_extending a constant that is known
1743 to be negative. This is a good, way of doing it, since the shift
1744 may well merge into a subsequent insn. */
1745 if (set_sign_bit_copies > 1)
1747 if (const_ok_for_arm
1748 (temp1 = ARM_SIGN_EXTEND (remainder
1749 << (set_sign_bit_copies - 1))))
1751 if (generate)
1753 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1754 emit_constant_insn (cond,
1755 gen_rtx_SET (VOIDmode, new_src,
1756 GEN_INT (temp1)));
1757 emit_constant_insn (cond,
1758 gen_ashrsi3 (target, new_src,
1759 GEN_INT (set_sign_bit_copies - 1)));
1761 return 2;
1763 /* For an inverted constant, we will need to set the low bits,
1764 these will be shifted out of harm's way. */
1765 temp1 |= (1 << (set_sign_bit_copies - 1)) - 1;
1766 if (const_ok_for_arm (~temp1))
1768 if (generate)
1770 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1771 emit_constant_insn (cond,
1772 gen_rtx_SET (VOIDmode, new_src,
1773 GEN_INT (temp1)));
1774 emit_constant_insn (cond,
1775 gen_ashrsi3 (target, new_src,
1776 GEN_INT (set_sign_bit_copies - 1)));
1778 return 2;
1782 /* See if we can generate this by setting the bottom (or the top)
1783 16 bits, and then shifting these into the other half of the
1784 word. We only look for the simplest cases, to do more would cost
1785 too much. Be careful, however, not to generate this when the
1786 alternative would take fewer insns. */
1787 if (val & 0xffff0000)
1789 temp1 = remainder & 0xffff0000;
1790 temp2 = remainder & 0x0000ffff;
1792 /* Overlaps outside this range are best done using other methods. */
1793 for (i = 9; i < 24; i++)
1795 if ((((temp2 | (temp2 << i)) & 0xffffffff) == remainder)
1796 && !const_ok_for_arm (temp2))
1798 rtx new_src = (subtargets
1799 ? (generate ? gen_reg_rtx (mode) : NULL_RTX)
1800 : target);
1801 insns = arm_gen_constant (code, mode, cond, temp2, new_src,
1802 source, subtargets, generate);
1803 source = new_src;
1804 if (generate)
1805 emit_constant_insn
1806 (cond,
1807 gen_rtx_SET
1808 (VOIDmode, target,
1809 gen_rtx_IOR (mode,
1810 gen_rtx_ASHIFT (mode, source,
1811 GEN_INT (i)),
1812 source)));
1813 return insns + 1;
1817 /* Don't duplicate cases already considered. */
1818 for (i = 17; i < 24; i++)
1820 if (((temp1 | (temp1 >> i)) == remainder)
1821 && !const_ok_for_arm (temp1))
1823 rtx new_src = (subtargets
1824 ? (generate ? gen_reg_rtx (mode) : NULL_RTX)
1825 : target);
1826 insns = arm_gen_constant (code, mode, cond, temp1, new_src,
1827 source, subtargets, generate);
1828 source = new_src;
1829 if (generate)
1830 emit_constant_insn
1831 (cond,
1832 gen_rtx_SET (VOIDmode, target,
1833 gen_rtx_IOR
1834 (mode,
1835 gen_rtx_LSHIFTRT (mode, source,
1836 GEN_INT (i)),
1837 source)));
1838 return insns + 1;
1842 break;
1844 case IOR:
1845 case XOR:
1846 /* If we have IOR or XOR, and the constant can be loaded in a
1847 single instruction, and we can find a temporary to put it in,
1848 then this can be done in two instructions instead of 3-4. */
1849 if (subtargets
1850 /* TARGET can't be NULL if SUBTARGETS is 0 */
1851 || (reload_completed && !reg_mentioned_p (target, source)))
1853 if (const_ok_for_arm (ARM_SIGN_EXTEND (~val)))
1855 if (generate)
1857 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1859 emit_constant_insn (cond,
1860 gen_rtx_SET (VOIDmode, sub,
1861 GEN_INT (val)));
1862 emit_constant_insn (cond,
1863 gen_rtx_SET (VOIDmode, target,
1864 gen_rtx_fmt_ee (code, mode,
1865 source, sub)));
1867 return 2;
1871 if (code == XOR)
1872 break;
1874 if (set_sign_bit_copies > 8
1875 && (val & (-1 << (32 - set_sign_bit_copies))) == val)
1877 if (generate)
1879 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1880 rtx shift = GEN_INT (set_sign_bit_copies);
1882 emit_constant_insn
1883 (cond,
1884 gen_rtx_SET (VOIDmode, sub,
1885 gen_rtx_NOT (mode,
1886 gen_rtx_ASHIFT (mode,
1887 source,
1888 shift))));
1889 emit_constant_insn
1890 (cond,
1891 gen_rtx_SET (VOIDmode, target,
1892 gen_rtx_NOT (mode,
1893 gen_rtx_LSHIFTRT (mode, sub,
1894 shift))));
1896 return 2;
1899 if (set_zero_bit_copies > 8
1900 && (remainder & ((1 << set_zero_bit_copies) - 1)) == remainder)
1902 if (generate)
1904 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1905 rtx shift = GEN_INT (set_zero_bit_copies);
1907 emit_constant_insn
1908 (cond,
1909 gen_rtx_SET (VOIDmode, sub,
1910 gen_rtx_NOT (mode,
1911 gen_rtx_LSHIFTRT (mode,
1912 source,
1913 shift))));
1914 emit_constant_insn
1915 (cond,
1916 gen_rtx_SET (VOIDmode, target,
1917 gen_rtx_NOT (mode,
1918 gen_rtx_ASHIFT (mode, sub,
1919 shift))));
1921 return 2;
1924 if (const_ok_for_arm (temp1 = ARM_SIGN_EXTEND (~val)))
1926 if (generate)
1928 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1929 emit_constant_insn (cond,
1930 gen_rtx_SET (VOIDmode, sub,
1931 gen_rtx_NOT (mode, source)));
1932 source = sub;
1933 if (subtargets)
1934 sub = gen_reg_rtx (mode);
1935 emit_constant_insn (cond,
1936 gen_rtx_SET (VOIDmode, sub,
1937 gen_rtx_AND (mode, source,
1938 GEN_INT (temp1))));
1939 emit_constant_insn (cond,
1940 gen_rtx_SET (VOIDmode, target,
1941 gen_rtx_NOT (mode, sub)));
1943 return 3;
1945 break;
1947 case AND:
1948 /* See if two shifts will do 2 or more insn's worth of work. */
1949 if (clear_sign_bit_copies >= 16 && clear_sign_bit_copies < 24)
1951 HOST_WIDE_INT shift_mask = ((0xffffffff
1952 << (32 - clear_sign_bit_copies))
1953 & 0xffffffff);
1955 if ((remainder | shift_mask) != 0xffffffff)
1957 if (generate)
1959 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1960 insns = arm_gen_constant (AND, mode, cond,
1961 remainder | shift_mask,
1962 new_src, source, subtargets, 1);
1963 source = new_src;
1965 else
1967 rtx targ = subtargets ? NULL_RTX : target;
1968 insns = arm_gen_constant (AND, mode, cond,
1969 remainder | shift_mask,
1970 targ, source, subtargets, 0);
1974 if (generate)
1976 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1977 rtx shift = GEN_INT (clear_sign_bit_copies);
1979 emit_insn (gen_ashlsi3 (new_src, source, shift));
1980 emit_insn (gen_lshrsi3 (target, new_src, shift));
1983 return insns + 2;
1986 if (clear_zero_bit_copies >= 16 && clear_zero_bit_copies < 24)
1988 HOST_WIDE_INT shift_mask = (1 << clear_zero_bit_copies) - 1;
1990 if ((remainder | shift_mask) != 0xffffffff)
1992 if (generate)
1994 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1996 insns = arm_gen_constant (AND, mode, cond,
1997 remainder | shift_mask,
1998 new_src, source, subtargets, 1);
1999 source = new_src;
2001 else
2003 rtx targ = subtargets ? NULL_RTX : target;
2005 insns = arm_gen_constant (AND, mode, cond,
2006 remainder | shift_mask,
2007 targ, source, subtargets, 0);
2011 if (generate)
2013 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2014 rtx shift = GEN_INT (clear_zero_bit_copies);
2016 emit_insn (gen_lshrsi3 (new_src, source, shift));
2017 emit_insn (gen_ashlsi3 (target, new_src, shift));
2020 return insns + 2;
2023 break;
2025 default:
2026 break;
2029 for (i = 0; i < 32; i++)
2030 if (remainder & (1 << i))
2031 num_bits_set++;
2033 if (code == AND || (can_invert && num_bits_set > 16))
2034 remainder = (~remainder) & 0xffffffff;
2035 else if (code == PLUS && num_bits_set > 16)
2036 remainder = (-remainder) & 0xffffffff;
2037 else
2039 can_invert = 0;
2040 can_negate = 0;
2043 /* Now try and find a way of doing the job in either two or three
2044 instructions.
2045 We start by looking for the largest block of zeros that are aligned on
2046 a 2-bit boundary, we then fill up the temps, wrapping around to the
2047 top of the word when we drop off the bottom.
2048 In the worst case this code should produce no more than four insns. */
2050 int best_start = 0;
2051 int best_consecutive_zeros = 0;
2053 for (i = 0; i < 32; i += 2)
2055 int consecutive_zeros = 0;
2057 if (!(remainder & (3 << i)))
2059 while ((i < 32) && !(remainder & (3 << i)))
2061 consecutive_zeros += 2;
2062 i += 2;
2064 if (consecutive_zeros > best_consecutive_zeros)
2066 best_consecutive_zeros = consecutive_zeros;
2067 best_start = i - consecutive_zeros;
2069 i -= 2;
2073 /* So long as it won't require any more insns to do so, it's
2074 desirable to emit a small constant (in bits 0...9) in the last
2075 insn. This way there is more chance that it can be combined with
2076 a later addressing insn to form a pre-indexed load or store
2077 operation. Consider:
2079 *((volatile int *)0xe0000100) = 1;
2080 *((volatile int *)0xe0000110) = 2;
2082 We want this to wind up as:
2084 mov rA, #0xe0000000
2085 mov rB, #1
2086 str rB, [rA, #0x100]
2087 mov rB, #2
2088 str rB, [rA, #0x110]
2090 rather than having to synthesize both large constants from scratch.
2092 Therefore, we calculate how many insns would be required to emit
2093 the constant starting from `best_start', and also starting from
2094 zero (ie with bit 31 first to be output). If `best_start' doesn't
2095 yield a shorter sequence, we may as well use zero. */
2096 if (best_start != 0
2097 && ((((unsigned HOST_WIDE_INT) 1) << best_start) < remainder)
2098 && (count_insns_for_constant (remainder, 0) <=
2099 count_insns_for_constant (remainder, best_start)))
2100 best_start = 0;
2102 /* Now start emitting the insns. */
2103 i = best_start;
2106 int end;
2108 if (i <= 0)
2109 i += 32;
2110 if (remainder & (3 << (i - 2)))
2112 end = i - 8;
2113 if (end < 0)
2114 end += 32;
2115 temp1 = remainder & ((0x0ff << end)
2116 | ((i < end) ? (0xff >> (32 - end)) : 0));
2117 remainder &= ~temp1;
2119 if (generate)
2121 rtx new_src, temp1_rtx;
2123 if (code == SET || code == MINUS)
2125 new_src = (subtargets ? gen_reg_rtx (mode) : target);
2126 if (can_invert && code != MINUS)
2127 temp1 = ~temp1;
2129 else
2131 if (remainder && subtargets)
2132 new_src = gen_reg_rtx (mode);
2133 else
2134 new_src = target;
2135 if (can_invert)
2136 temp1 = ~temp1;
2137 else if (can_negate)
2138 temp1 = -temp1;
2141 temp1 = trunc_int_for_mode (temp1, mode);
2142 temp1_rtx = GEN_INT (temp1);
2144 if (code == SET)
2146 else if (code == MINUS)
2147 temp1_rtx = gen_rtx_MINUS (mode, temp1_rtx, source);
2148 else
2149 temp1_rtx = gen_rtx_fmt_ee (code, mode, source, temp1_rtx);
2151 emit_constant_insn (cond,
2152 gen_rtx_SET (VOIDmode, new_src,
2153 temp1_rtx));
2154 source = new_src;
2157 if (code == SET)
2159 can_invert = 0;
2160 code = PLUS;
2162 else if (code == MINUS)
2163 code = PLUS;
2165 insns++;
2166 i -= 6;
2168 i -= 2;
2170 while (remainder);
2173 return insns;
2176 /* Canonicalize a comparison so that we are more likely to recognize it.
2177 This can be done for a few constant compares, where we can make the
2178 immediate value easier to load. */
2180 enum rtx_code
2181 arm_canonicalize_comparison (enum rtx_code code, rtx * op1)
2183 unsigned HOST_WIDE_INT i = INTVAL (*op1);
2185 switch (code)
2187 case EQ:
2188 case NE:
2189 return code;
2191 case GT:
2192 case LE:
2193 if (i != ((((unsigned HOST_WIDE_INT) 1) << (HOST_BITS_PER_WIDE_INT - 1)) - 1)
2194 && (const_ok_for_arm (i + 1) || const_ok_for_arm (-(i + 1))))
2196 *op1 = GEN_INT (i + 1);
2197 return code == GT ? GE : LT;
2199 break;
2201 case GE:
2202 case LT:
2203 if (i != (((unsigned HOST_WIDE_INT) 1) << (HOST_BITS_PER_WIDE_INT - 1))
2204 && (const_ok_for_arm (i - 1) || const_ok_for_arm (-(i - 1))))
2206 *op1 = GEN_INT (i - 1);
2207 return code == GE ? GT : LE;
2209 break;
2211 case GTU:
2212 case LEU:
2213 if (i != ~((unsigned HOST_WIDE_INT) 0)
2214 && (const_ok_for_arm (i + 1) || const_ok_for_arm (-(i + 1))))
2216 *op1 = GEN_INT (i + 1);
2217 return code == GTU ? GEU : LTU;
2219 break;
2221 case GEU:
2222 case LTU:
2223 if (i != 0
2224 && (const_ok_for_arm (i - 1) || const_ok_for_arm (-(i - 1))))
2226 *op1 = GEN_INT (i - 1);
2227 return code == GEU ? GTU : LEU;
2229 break;
2231 default:
2232 abort ();
2235 return code;
2239 /* Define how to find the value returned by a function. */
2241 rtx arm_function_value(tree type, tree func ATTRIBUTE_UNUSED)
2243 enum machine_mode mode;
2244 int unsignedp ATTRIBUTE_UNUSED;
2245 rtx r ATTRIBUTE_UNUSED;
2248 mode = TYPE_MODE (type);
2249 /* Promote integer types. */
2250 if (INTEGRAL_TYPE_P (type))
2251 PROMOTE_FUNCTION_MODE (mode, unsignedp, type);
2252 return LIBCALL_VALUE(mode);
2256 /* Decide whether a type should be returned in memory (true)
2257 or in a register (false). This is called by the macro
2258 RETURN_IN_MEMORY. */
2260 arm_return_in_memory (tree type)
2262 HOST_WIDE_INT size;
2264 if (!AGGREGATE_TYPE_P (type))
2265 /* All simple types are returned in registers. */
2266 return 0;
2268 size = int_size_in_bytes (type);
2270 if (arm_abi != ARM_ABI_APCS)
2272 /* ATPCS and later return aggregate types in memory only if they are
2273 larger than a word (or are variable size). */
2274 return (size < 0 || size > UNITS_PER_WORD);
2277 /* For the arm-wince targets we choose to be compatible with Microsoft's
2278 ARM and Thumb compilers, which always return aggregates in memory. */
2279 #ifndef ARM_WINCE
2280 /* All structures/unions bigger than one word are returned in memory.
2281 Also catch the case where int_size_in_bytes returns -1. In this case
2282 the aggregate is either huge or of variable size, and in either case
2283 we will want to return it via memory and not in a register. */
2284 if (size < 0 || size > UNITS_PER_WORD)
2285 return 1;
2287 if (TREE_CODE (type) == RECORD_TYPE)
2289 tree field;
2291 /* For a struct the APCS says that we only return in a register
2292 if the type is 'integer like' and every addressable element
2293 has an offset of zero. For practical purposes this means
2294 that the structure can have at most one non bit-field element
2295 and that this element must be the first one in the structure. */
2297 /* Find the first field, ignoring non FIELD_DECL things which will
2298 have been created by C++. */
2299 for (field = TYPE_FIELDS (type);
2300 field && TREE_CODE (field) != FIELD_DECL;
2301 field = TREE_CHAIN (field))
2302 continue;
2304 if (field == NULL)
2305 return 0; /* An empty structure. Allowed by an extension to ANSI C. */
2307 /* Check that the first field is valid for returning in a register. */
2309 /* ... Floats are not allowed */
2310 if (FLOAT_TYPE_P (TREE_TYPE (field)))
2311 return 1;
2313 /* ... Aggregates that are not themselves valid for returning in
2314 a register are not allowed. */
2315 if (RETURN_IN_MEMORY (TREE_TYPE (field)))
2316 return 1;
2318 /* Now check the remaining fields, if any. Only bitfields are allowed,
2319 since they are not addressable. */
2320 for (field = TREE_CHAIN (field);
2321 field;
2322 field = TREE_CHAIN (field))
2324 if (TREE_CODE (field) != FIELD_DECL)
2325 continue;
2327 if (!DECL_BIT_FIELD_TYPE (field))
2328 return 1;
2331 return 0;
2334 if (TREE_CODE (type) == UNION_TYPE)
2336 tree field;
2338 /* Unions can be returned in registers if every element is
2339 integral, or can be returned in an integer register. */
2340 for (field = TYPE_FIELDS (type);
2341 field;
2342 field = TREE_CHAIN (field))
2344 if (TREE_CODE (field) != FIELD_DECL)
2345 continue;
2347 if (FLOAT_TYPE_P (TREE_TYPE (field)))
2348 return 1;
2350 if (RETURN_IN_MEMORY (TREE_TYPE (field)))
2351 return 1;
2354 return 0;
2356 #endif /* not ARM_WINCE */
2358 /* Return all other types in memory. */
2359 return 1;
2362 /* Indicate whether or not words of a double are in big-endian order. */
2365 arm_float_words_big_endian (void)
2367 if (TARGET_MAVERICK)
2368 return 0;
2370 /* For FPA, float words are always big-endian. For VFP, floats words
2371 follow the memory system mode. */
2373 if (TARGET_FPA)
2375 return 1;
2378 if (TARGET_VFP)
2379 return (TARGET_BIG_END ? 1 : 0);
2381 return 1;
2384 /* Initialize a variable CUM of type CUMULATIVE_ARGS
2385 for a call to a function whose data type is FNTYPE.
2386 For a library call, FNTYPE is NULL. */
2387 void
2388 arm_init_cumulative_args (CUMULATIVE_ARGS *pcum, tree fntype,
2389 rtx libname ATTRIBUTE_UNUSED,
2390 tree fndecl ATTRIBUTE_UNUSED)
2392 /* On the ARM, the offset starts at 0. */
2393 pcum->nregs = ((fntype && aggregate_value_p (TREE_TYPE (fntype), fntype)) ? 1 : 0);
2394 pcum->iwmmxt_nregs = 0;
2395 pcum->can_split = true;
2397 pcum->call_cookie = CALL_NORMAL;
2399 if (TARGET_LONG_CALLS)
2400 pcum->call_cookie = CALL_LONG;
2402 /* Check for long call/short call attributes. The attributes
2403 override any command line option. */
2404 if (fntype)
2406 if (lookup_attribute ("short_call", TYPE_ATTRIBUTES (fntype)))
2407 pcum->call_cookie = CALL_SHORT;
2408 else if (lookup_attribute ("long_call", TYPE_ATTRIBUTES (fntype)))
2409 pcum->call_cookie = CALL_LONG;
2412 /* Varargs vectors are treated the same as long long.
2413 named_count avoids having to change the way arm handles 'named' */
2414 pcum->named_count = 0;
2415 pcum->nargs = 0;
2417 if (TARGET_REALLY_IWMMXT && fntype)
2419 tree fn_arg;
2421 for (fn_arg = TYPE_ARG_TYPES (fntype);
2422 fn_arg;
2423 fn_arg = TREE_CHAIN (fn_arg))
2424 pcum->named_count += 1;
2426 if (! pcum->named_count)
2427 pcum->named_count = INT_MAX;
2432 /* Return true if mode/type need doubleword alignment. */
2433 bool
2434 arm_needs_doubleword_align (enum machine_mode mode, tree type)
2436 return (GET_MODE_ALIGNMENT (mode) > PARM_BOUNDARY
2437 || (type && TYPE_ALIGN (type) > PARM_BOUNDARY));
2441 /* Determine where to put an argument to a function.
2442 Value is zero to push the argument on the stack,
2443 or a hard register in which to store the argument.
2445 MODE is the argument's machine mode.
2446 TYPE is the data type of the argument (as a tree).
2447 This is null for libcalls where that information may
2448 not be available.
2449 CUM is a variable of type CUMULATIVE_ARGS which gives info about
2450 the preceding args and about the function being called.
2451 NAMED is nonzero if this argument is a named parameter
2452 (otherwise it is an extra parameter matching an ellipsis). */
2455 arm_function_arg (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
2456 tree type, int named)
2458 int nregs;
2460 /* Varargs vectors are treated the same as long long.
2461 named_count avoids having to change the way arm handles 'named' */
2462 if (TARGET_IWMMXT_ABI
2463 && VECTOR_MODE_SUPPORTED_P (mode)
2464 && pcum->named_count > pcum->nargs + 1)
2466 if (pcum->iwmmxt_nregs <= 9)
2467 return gen_rtx_REG (mode, pcum->iwmmxt_nregs + FIRST_IWMMXT_REGNUM);
2468 else
2470 pcum->can_split = false;
2471 return NULL_RTX;
2475 /* Put doubleword aligned quantities in even register pairs. */
2476 if (pcum->nregs & 1
2477 && ARM_DOUBLEWORD_ALIGN
2478 && arm_needs_doubleword_align (mode, type))
2479 pcum->nregs++;
2481 if (mode == VOIDmode)
2482 /* Compute operand 2 of the call insn. */
2483 return GEN_INT (pcum->call_cookie);
2485 /* Only allow splitting an arg between regs and memory if all preceding
2486 args were allocated to regs. For args passed by reference we only count
2487 the reference pointer. */
2488 if (pcum->can_split)
2489 nregs = 1;
2490 else
2491 nregs = ARM_NUM_REGS2 (mode, type);
2493 if (!named || pcum->nregs + nregs > NUM_ARG_REGS)
2494 return NULL_RTX;
2496 return gen_rtx_REG (mode, pcum->nregs);
2499 /* Variable sized types are passed by reference. This is a GCC
2500 extension to the ARM ABI. */
2502 static bool
2503 arm_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
2504 enum machine_mode mode ATTRIBUTE_UNUSED,
2505 tree type, bool named ATTRIBUTE_UNUSED)
2507 return type && TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST;
2510 /* Encode the current state of the #pragma [no_]long_calls. */
2511 typedef enum
2513 OFF, /* No #pramgma [no_]long_calls is in effect. */
2514 LONG, /* #pragma long_calls is in effect. */
2515 SHORT /* #pragma no_long_calls is in effect. */
2516 } arm_pragma_enum;
2518 static arm_pragma_enum arm_pragma_long_calls = OFF;
2520 void
2521 arm_pr_long_calls (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2523 arm_pragma_long_calls = LONG;
2526 void
2527 arm_pr_no_long_calls (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2529 arm_pragma_long_calls = SHORT;
2532 void
2533 arm_pr_long_calls_off (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2535 arm_pragma_long_calls = OFF;
2538 /* Table of machine attributes. */
2539 const struct attribute_spec arm_attribute_table[] =
2541 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
2542 /* Function calls made to this symbol must be done indirectly, because
2543 it may lie outside of the 26 bit addressing range of a normal function
2544 call. */
2545 { "long_call", 0, 0, false, true, true, NULL },
2546 /* Whereas these functions are always known to reside within the 26 bit
2547 addressing range. */
2548 { "short_call", 0, 0, false, true, true, NULL },
2549 /* Interrupt Service Routines have special prologue and epilogue requirements. */
2550 { "isr", 0, 1, false, false, false, arm_handle_isr_attribute },
2551 { "interrupt", 0, 1, false, false, false, arm_handle_isr_attribute },
2552 { "naked", 0, 0, true, false, false, arm_handle_fndecl_attribute },
2553 #ifdef ARM_PE
2554 /* ARM/PE has three new attributes:
2555 interfacearm - ?
2556 dllexport - for exporting a function/variable that will live in a dll
2557 dllimport - for importing a function/variable from a dll
2559 Microsoft allows multiple declspecs in one __declspec, separating
2560 them with spaces. We do NOT support this. Instead, use __declspec
2561 multiple times.
2563 { "dllimport", 0, 0, true, false, false, NULL },
2564 { "dllexport", 0, 0, true, false, false, NULL },
2565 { "interfacearm", 0, 0, true, false, false, arm_handle_fndecl_attribute },
2566 #elif TARGET_DLLIMPORT_DECL_ATTRIBUTES
2567 { "dllimport", 0, 0, false, false, false, handle_dll_attribute },
2568 { "dllexport", 0, 0, false, false, false, handle_dll_attribute },
2569 #endif
2570 { NULL, 0, 0, false, false, false, NULL }
2573 /* Handle an attribute requiring a FUNCTION_DECL;
2574 arguments as in struct attribute_spec.handler. */
2575 static tree
2576 arm_handle_fndecl_attribute (tree *node, tree name, tree args ATTRIBUTE_UNUSED,
2577 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
2579 if (TREE_CODE (*node) != FUNCTION_DECL)
2581 warning ("`%s' attribute only applies to functions",
2582 IDENTIFIER_POINTER (name));
2583 *no_add_attrs = true;
2586 return NULL_TREE;
2589 /* Handle an "interrupt" or "isr" attribute;
2590 arguments as in struct attribute_spec.handler. */
2591 static tree
2592 arm_handle_isr_attribute (tree *node, tree name, tree args, int flags,
2593 bool *no_add_attrs)
2595 if (DECL_P (*node))
2597 if (TREE_CODE (*node) != FUNCTION_DECL)
2599 warning ("`%s' attribute only applies to functions",
2600 IDENTIFIER_POINTER (name));
2601 *no_add_attrs = true;
2603 /* FIXME: the argument if any is checked for type attributes;
2604 should it be checked for decl ones? */
2606 else
2608 if (TREE_CODE (*node) == FUNCTION_TYPE
2609 || TREE_CODE (*node) == METHOD_TYPE)
2611 if (arm_isr_value (args) == ARM_FT_UNKNOWN)
2613 warning ("`%s' attribute ignored", IDENTIFIER_POINTER (name));
2614 *no_add_attrs = true;
2617 else if (TREE_CODE (*node) == POINTER_TYPE
2618 && (TREE_CODE (TREE_TYPE (*node)) == FUNCTION_TYPE
2619 || TREE_CODE (TREE_TYPE (*node)) == METHOD_TYPE)
2620 && arm_isr_value (args) != ARM_FT_UNKNOWN)
2622 *node = build_type_copy (*node);
2623 TREE_TYPE (*node) = build_type_attribute_variant
2624 (TREE_TYPE (*node),
2625 tree_cons (name, args, TYPE_ATTRIBUTES (TREE_TYPE (*node))));
2626 *no_add_attrs = true;
2628 else
2630 /* Possibly pass this attribute on from the type to a decl. */
2631 if (flags & ((int) ATTR_FLAG_DECL_NEXT
2632 | (int) ATTR_FLAG_FUNCTION_NEXT
2633 | (int) ATTR_FLAG_ARRAY_NEXT))
2635 *no_add_attrs = true;
2636 return tree_cons (name, args, NULL_TREE);
2638 else
2640 warning ("`%s' attribute ignored", IDENTIFIER_POINTER (name));
2645 return NULL_TREE;
2648 /* Return 0 if the attributes for two types are incompatible, 1 if they
2649 are compatible, and 2 if they are nearly compatible (which causes a
2650 warning to be generated). */
2651 static int
2652 arm_comp_type_attributes (tree type1, tree type2)
2654 int l1, l2, s1, s2;
2656 /* Check for mismatch of non-default calling convention. */
2657 if (TREE_CODE (type1) != FUNCTION_TYPE)
2658 return 1;
2660 /* Check for mismatched call attributes. */
2661 l1 = lookup_attribute ("long_call", TYPE_ATTRIBUTES (type1)) != NULL;
2662 l2 = lookup_attribute ("long_call", TYPE_ATTRIBUTES (type2)) != NULL;
2663 s1 = lookup_attribute ("short_call", TYPE_ATTRIBUTES (type1)) != NULL;
2664 s2 = lookup_attribute ("short_call", TYPE_ATTRIBUTES (type2)) != NULL;
2666 /* Only bother to check if an attribute is defined. */
2667 if (l1 | l2 | s1 | s2)
2669 /* If one type has an attribute, the other must have the same attribute. */
2670 if ((l1 != l2) || (s1 != s2))
2671 return 0;
2673 /* Disallow mixed attributes. */
2674 if ((l1 & s2) || (l2 & s1))
2675 return 0;
2678 /* Check for mismatched ISR attribute. */
2679 l1 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type1)) != NULL;
2680 if (! l1)
2681 l1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type1)) != NULL;
2682 l2 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type2)) != NULL;
2683 if (! l2)
2684 l1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type2)) != NULL;
2685 if (l1 != l2)
2686 return 0;
2688 return 1;
2691 /* Encode long_call or short_call attribute by prefixing
2692 symbol name in DECL with a special character FLAG. */
2693 void
2694 arm_encode_call_attribute (tree decl, int flag)
2696 const char * str = XSTR (XEXP (DECL_RTL (decl), 0), 0);
2697 int len = strlen (str);
2698 char * newstr;
2700 /* Do not allow weak functions to be treated as short call. */
2701 if (DECL_WEAK (decl) && flag == SHORT_CALL_FLAG_CHAR)
2702 return;
2704 newstr = alloca (len + 2);
2705 newstr[0] = flag;
2706 strcpy (newstr + 1, str);
2708 newstr = (char *) ggc_alloc_string (newstr, len + 1);
2709 XSTR (XEXP (DECL_RTL (decl), 0), 0) = newstr;
2712 /* Assigns default attributes to newly defined type. This is used to
2713 set short_call/long_call attributes for function types of
2714 functions defined inside corresponding #pragma scopes. */
2715 static void
2716 arm_set_default_type_attributes (tree type)
2718 /* Add __attribute__ ((long_call)) to all functions, when
2719 inside #pragma long_calls or __attribute__ ((short_call)),
2720 when inside #pragma no_long_calls. */
2721 if (TREE_CODE (type) == FUNCTION_TYPE || TREE_CODE (type) == METHOD_TYPE)
2723 tree type_attr_list, attr_name;
2724 type_attr_list = TYPE_ATTRIBUTES (type);
2726 if (arm_pragma_long_calls == LONG)
2727 attr_name = get_identifier ("long_call");
2728 else if (arm_pragma_long_calls == SHORT)
2729 attr_name = get_identifier ("short_call");
2730 else
2731 return;
2733 type_attr_list = tree_cons (attr_name, NULL_TREE, type_attr_list);
2734 TYPE_ATTRIBUTES (type) = type_attr_list;
2738 /* Return 1 if the operand is a SYMBOL_REF for a function known to be
2739 defined within the current compilation unit. If this cannot be
2740 determined, then 0 is returned. */
2741 static int
2742 current_file_function_operand (rtx sym_ref)
2744 /* This is a bit of a fib. A function will have a short call flag
2745 applied to its name if it has the short call attribute, or it has
2746 already been defined within the current compilation unit. */
2747 if (ENCODED_SHORT_CALL_ATTR_P (XSTR (sym_ref, 0)))
2748 return 1;
2750 /* The current function is always defined within the current compilation
2751 unit. If it s a weak definition however, then this may not be the real
2752 definition of the function, and so we have to say no. */
2753 if (sym_ref == XEXP (DECL_RTL (current_function_decl), 0)
2754 && !DECL_WEAK (current_function_decl))
2755 return 1;
2757 /* We cannot make the determination - default to returning 0. */
2758 return 0;
2761 /* Return nonzero if a 32 bit "long_call" should be generated for
2762 this call. We generate a long_call if the function:
2764 a. has an __attribute__((long call))
2765 or b. is within the scope of a #pragma long_calls
2766 or c. the -mlong-calls command line switch has been specified
2767 . and either:
2768 1. -ffunction-sections is in effect
2769 or 2. the current function has __attribute__ ((section))
2770 or 3. the target function has __attribute__ ((section))
2772 However we do not generate a long call if the function:
2774 d. has an __attribute__ ((short_call))
2775 or e. is inside the scope of a #pragma no_long_calls
2776 or f. is defined within the current compilation unit.
2778 This function will be called by C fragments contained in the machine
2779 description file. SYM_REF and CALL_COOKIE correspond to the matched
2780 rtl operands. CALL_SYMBOL is used to distinguish between
2781 two different callers of the function. It is set to 1 in the
2782 "call_symbol" and "call_symbol_value" patterns and to 0 in the "call"
2783 and "call_value" patterns. This is because of the difference in the
2784 SYM_REFs passed by these patterns. */
2786 arm_is_longcall_p (rtx sym_ref, int call_cookie, int call_symbol)
2788 if (!call_symbol)
2790 if (GET_CODE (sym_ref) != MEM)
2791 return 0;
2793 sym_ref = XEXP (sym_ref, 0);
2796 if (GET_CODE (sym_ref) != SYMBOL_REF)
2797 return 0;
2799 if (call_cookie & CALL_SHORT)
2800 return 0;
2802 if (TARGET_LONG_CALLS)
2804 if (flag_function_sections
2805 || DECL_SECTION_NAME (current_function_decl))
2806 /* c.3 is handled by the defintion of the
2807 ARM_DECLARE_FUNCTION_SIZE macro. */
2808 return 1;
2811 if (current_file_function_operand (sym_ref))
2812 return 0;
2814 return (call_cookie & CALL_LONG)
2815 || ENCODED_LONG_CALL_ATTR_P (XSTR (sym_ref, 0))
2816 || TARGET_LONG_CALLS;
2819 /* Return nonzero if it is ok to make a tail-call to DECL. */
2820 static bool
2821 arm_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
2823 int call_type = TARGET_LONG_CALLS ? CALL_LONG : CALL_NORMAL;
2825 if (cfun->machine->sibcall_blocked)
2826 return false;
2828 /* Never tailcall something for which we have no decl, or if we
2829 are in Thumb mode. */
2830 if (decl == NULL || TARGET_THUMB)
2831 return false;
2833 /* Get the calling method. */
2834 if (lookup_attribute ("short_call", TYPE_ATTRIBUTES (TREE_TYPE (decl))))
2835 call_type = CALL_SHORT;
2836 else if (lookup_attribute ("long_call", TYPE_ATTRIBUTES (TREE_TYPE (decl))))
2837 call_type = CALL_LONG;
2839 /* Cannot tail-call to long calls, since these are out of range of
2840 a branch instruction. However, if not compiling PIC, we know
2841 we can reach the symbol if it is in this compilation unit. */
2842 if (call_type == CALL_LONG && (flag_pic || !TREE_ASM_WRITTEN (decl)))
2843 return false;
2845 /* If we are interworking and the function is not declared static
2846 then we can't tail-call it unless we know that it exists in this
2847 compilation unit (since it might be a Thumb routine). */
2848 if (TARGET_INTERWORK && TREE_PUBLIC (decl) && !TREE_ASM_WRITTEN (decl))
2849 return false;
2851 /* Never tailcall from an ISR routine - it needs a special exit sequence. */
2852 if (IS_INTERRUPT (arm_current_func_type ()))
2853 return false;
2855 /* Everything else is ok. */
2856 return true;
2860 /* Addressing mode support functions. */
2862 /* Return nonzero if X is a legitimate immediate operand when compiling
2863 for PIC. */
2865 legitimate_pic_operand_p (rtx x)
2867 if (CONSTANT_P (x)
2868 && flag_pic
2869 && (GET_CODE (x) == SYMBOL_REF
2870 || (GET_CODE (x) == CONST
2871 && GET_CODE (XEXP (x, 0)) == PLUS
2872 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF)))
2873 return 0;
2875 return 1;
2879 legitimize_pic_address (rtx orig, enum machine_mode mode, rtx reg)
2881 if (GET_CODE (orig) == SYMBOL_REF
2882 || GET_CODE (orig) == LABEL_REF)
2884 #ifndef AOF_ASSEMBLER
2885 rtx pic_ref, address;
2886 #endif
2887 rtx insn;
2888 int subregs = 0;
2890 if (reg == 0)
2892 if (no_new_pseudos)
2893 abort ();
2894 else
2895 reg = gen_reg_rtx (Pmode);
2897 subregs = 1;
2900 #ifdef AOF_ASSEMBLER
2901 /* The AOF assembler can generate relocations for these directly, and
2902 understands that the PIC register has to be added into the offset. */
2903 insn = emit_insn (gen_pic_load_addr_based (reg, orig));
2904 #else
2905 if (subregs)
2906 address = gen_reg_rtx (Pmode);
2907 else
2908 address = reg;
2910 if (TARGET_ARM)
2911 emit_insn (gen_pic_load_addr_arm (address, orig));
2912 else
2913 emit_insn (gen_pic_load_addr_thumb (address, orig));
2915 if ((GET_CODE (orig) == LABEL_REF
2916 || (GET_CODE (orig) == SYMBOL_REF &&
2917 SYMBOL_REF_LOCAL_P (orig)))
2918 && NEED_GOT_RELOC)
2919 pic_ref = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, address);
2920 else
2922 pic_ref = gen_rtx_MEM (Pmode,
2923 gen_rtx_PLUS (Pmode, pic_offset_table_rtx,
2924 address));
2925 RTX_UNCHANGING_P (pic_ref) = 1;
2928 insn = emit_move_insn (reg, pic_ref);
2929 #endif
2930 current_function_uses_pic_offset_table = 1;
2931 /* Put a REG_EQUAL note on this insn, so that it can be optimized
2932 by loop. */
2933 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EQUAL, orig,
2934 REG_NOTES (insn));
2935 return reg;
2937 else if (GET_CODE (orig) == CONST)
2939 rtx base, offset;
2941 if (GET_CODE (XEXP (orig, 0)) == PLUS
2942 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
2943 return orig;
2945 if (reg == 0)
2947 if (no_new_pseudos)
2948 abort ();
2949 else
2950 reg = gen_reg_rtx (Pmode);
2953 if (GET_CODE (XEXP (orig, 0)) == PLUS)
2955 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
2956 offset = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
2957 base == reg ? 0 : reg);
2959 else
2960 abort ();
2962 if (GET_CODE (offset) == CONST_INT)
2964 /* The base register doesn't really matter, we only want to
2965 test the index for the appropriate mode. */
2966 if (!arm_legitimate_index_p (mode, offset, SET, 0))
2968 if (!no_new_pseudos)
2969 offset = force_reg (Pmode, offset);
2970 else
2971 abort ();
2974 if (GET_CODE (offset) == CONST_INT)
2975 return plus_constant (base, INTVAL (offset));
2978 if (GET_MODE_SIZE (mode) > 4
2979 && (GET_MODE_CLASS (mode) == MODE_INT
2980 || TARGET_SOFT_FLOAT))
2982 emit_insn (gen_addsi3 (reg, base, offset));
2983 return reg;
2986 return gen_rtx_PLUS (Pmode, base, offset);
2989 return orig;
2992 /* Generate code to load the PIC register. PROLOGUE is true if
2993 called from arm_expand_prologue (in which case we want the
2994 generated insns at the start of the function); false if called
2995 by an exception receiver that needs the PIC register reloaded
2996 (in which case the insns are just dumped at the current location). */
2997 void
2998 arm_finalize_pic (int prologue ATTRIBUTE_UNUSED)
3000 #ifndef AOF_ASSEMBLER
3001 rtx l1, pic_tmp, pic_tmp2, seq, pic_rtx;
3002 rtx global_offset_table;
3004 if (current_function_uses_pic_offset_table == 0 || TARGET_SINGLE_PIC_BASE)
3005 return;
3007 if (!flag_pic)
3008 abort ();
3010 start_sequence ();
3011 l1 = gen_label_rtx ();
3013 global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
3014 /* On the ARM the PC register contains 'dot + 8' at the time of the
3015 addition, on the Thumb it is 'dot + 4'. */
3016 pic_tmp = plus_constant (gen_rtx_LABEL_REF (Pmode, l1), TARGET_ARM ? 8 : 4);
3017 if (GOT_PCREL)
3018 pic_tmp2 = gen_rtx_CONST (VOIDmode,
3019 gen_rtx_PLUS (Pmode, global_offset_table, pc_rtx));
3020 else
3021 pic_tmp2 = gen_rtx_CONST (VOIDmode, global_offset_table);
3023 pic_rtx = gen_rtx_CONST (Pmode, gen_rtx_MINUS (Pmode, pic_tmp2, pic_tmp));
3025 if (TARGET_ARM)
3027 emit_insn (gen_pic_load_addr_arm (pic_offset_table_rtx, pic_rtx));
3028 emit_insn (gen_pic_add_dot_plus_eight (pic_offset_table_rtx, l1));
3030 else
3032 emit_insn (gen_pic_load_addr_thumb (pic_offset_table_rtx, pic_rtx));
3033 emit_insn (gen_pic_add_dot_plus_four (pic_offset_table_rtx, l1));
3036 seq = get_insns ();
3037 end_sequence ();
3038 if (prologue)
3039 emit_insn_after (seq, get_insns ());
3040 else
3041 emit_insn (seq);
3043 /* Need to emit this whether or not we obey regdecls,
3044 since setjmp/longjmp can cause life info to screw up. */
3045 emit_insn (gen_rtx_USE (VOIDmode, pic_offset_table_rtx));
3046 #endif /* AOF_ASSEMBLER */
3049 /* Return nonzero if X is valid as an ARM state addressing register. */
3050 static int
3051 arm_address_register_rtx_p (rtx x, int strict_p)
3053 int regno;
3055 if (GET_CODE (x) != REG)
3056 return 0;
3058 regno = REGNO (x);
3060 if (strict_p)
3061 return ARM_REGNO_OK_FOR_BASE_P (regno);
3063 return (regno <= LAST_ARM_REGNUM
3064 || regno >= FIRST_PSEUDO_REGISTER
3065 || regno == FRAME_POINTER_REGNUM
3066 || regno == ARG_POINTER_REGNUM);
3069 /* Return nonzero if X is a valid ARM state address operand. */
3071 arm_legitimate_address_p (enum machine_mode mode, rtx x, RTX_CODE outer,
3072 int strict_p)
3074 bool use_ldrd;
3075 enum rtx_code code = GET_CODE (x);
3077 if (arm_address_register_rtx_p (x, strict_p))
3078 return 1;
3080 use_ldrd = (TARGET_LDRD
3081 && (mode == DImode
3082 || (mode == DFmode && (TARGET_SOFT_FLOAT || TARGET_VFP))));
3084 if (code == POST_INC || code == PRE_DEC
3085 || ((code == PRE_INC || code == POST_DEC)
3086 && (use_ldrd || GET_MODE_SIZE (mode) <= 4)))
3087 return arm_address_register_rtx_p (XEXP (x, 0), strict_p);
3089 else if ((code == POST_MODIFY || code == PRE_MODIFY)
3090 && arm_address_register_rtx_p (XEXP (x, 0), strict_p)
3091 && GET_CODE (XEXP (x, 1)) == PLUS
3092 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
3094 rtx addend = XEXP (XEXP (x, 1), 1);
3096 /* Don't allow ldrd post increment by register becuase it's hard
3097 to fixup invalid register choices. */
3098 if (use_ldrd
3099 && GET_CODE (x) == POST_MODIFY
3100 && GET_CODE (addend) == REG)
3101 return 0;
3103 return ((use_ldrd || GET_MODE_SIZE (mode) <= 4)
3104 && arm_legitimate_index_p (mode, addend, outer, strict_p));
3107 /* After reload constants split into minipools will have addresses
3108 from a LABEL_REF. */
3109 else if (reload_completed
3110 && (code == LABEL_REF
3111 || (code == CONST
3112 && GET_CODE (XEXP (x, 0)) == PLUS
3113 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
3114 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
3115 return 1;
3117 else if (mode == TImode)
3118 return 0;
3120 else if (code == PLUS)
3122 rtx xop0 = XEXP (x, 0);
3123 rtx xop1 = XEXP (x, 1);
3125 return ((arm_address_register_rtx_p (xop0, strict_p)
3126 && arm_legitimate_index_p (mode, xop1, outer, strict_p))
3127 || (arm_address_register_rtx_p (xop1, strict_p)
3128 && arm_legitimate_index_p (mode, xop0, outer, strict_p)));
3131 #if 0
3132 /* Reload currently can't handle MINUS, so disable this for now */
3133 else if (GET_CODE (x) == MINUS)
3135 rtx xop0 = XEXP (x, 0);
3136 rtx xop1 = XEXP (x, 1);
3138 return (arm_address_register_rtx_p (xop0, strict_p)
3139 && arm_legitimate_index_p (mode, xop1, outer, strict_p));
3141 #endif
3143 else if (GET_MODE_CLASS (mode) != MODE_FLOAT
3144 && code == SYMBOL_REF
3145 && CONSTANT_POOL_ADDRESS_P (x)
3146 && ! (flag_pic
3147 && symbol_mentioned_p (get_pool_constant (x))))
3148 return 1;
3150 return 0;
3153 /* Return nonzero if INDEX is valid for an address index operand in
3154 ARM state. */
3155 static int
3156 arm_legitimate_index_p (enum machine_mode mode, rtx index, RTX_CODE outer,
3157 int strict_p)
3159 HOST_WIDE_INT range;
3160 enum rtx_code code = GET_CODE (index);
3162 /* Standard coprocessor addressing modes. */
3163 if (TARGET_HARD_FLOAT
3164 && (TARGET_FPA || TARGET_MAVERICK)
3165 && (GET_MODE_CLASS (mode) == MODE_FLOAT
3166 || (TARGET_MAVERICK && mode == DImode)))
3167 return (code == CONST_INT && INTVAL (index) < 1024
3168 && INTVAL (index) > -1024
3169 && (INTVAL (index) & 3) == 0);
3171 if (TARGET_REALLY_IWMMXT && VALID_IWMMXT_REG_MODE (mode))
3172 return (code == CONST_INT
3173 && INTVAL (index) < 1024
3174 && INTVAL (index) > -1024
3175 && (INTVAL (index) & 3) == 0);
3177 if (arm_address_register_rtx_p (index, strict_p)
3178 && (GET_MODE_SIZE (mode) <= 4))
3179 return 1;
3181 if (mode == DImode || mode == DFmode)
3183 if (code == CONST_INT)
3185 HOST_WIDE_INT val = INTVAL (index);
3187 if (TARGET_LDRD)
3188 return val > -256 && val < 256;
3189 else
3190 return val > -4096 && val < 4092;
3193 return TARGET_LDRD && arm_address_register_rtx_p (index, strict_p);
3196 if (GET_MODE_SIZE (mode) <= 4
3197 && ! (arm_arch4
3198 && (mode == HImode
3199 || (mode == QImode && outer == SIGN_EXTEND))))
3201 if (code == MULT)
3203 rtx xiop0 = XEXP (index, 0);
3204 rtx xiop1 = XEXP (index, 1);
3206 return ((arm_address_register_rtx_p (xiop0, strict_p)
3207 && power_of_two_operand (xiop1, SImode))
3208 || (arm_address_register_rtx_p (xiop1, strict_p)
3209 && power_of_two_operand (xiop0, SImode)));
3211 else if (code == LSHIFTRT || code == ASHIFTRT
3212 || code == ASHIFT || code == ROTATERT)
3214 rtx op = XEXP (index, 1);
3216 return (arm_address_register_rtx_p (XEXP (index, 0), strict_p)
3217 && GET_CODE (op) == CONST_INT
3218 && INTVAL (op) > 0
3219 && INTVAL (op) <= 31);
3223 /* For ARM v4 we may be doing a sign-extend operation during the
3224 load. */
3225 if (arm_arch4)
3227 if (mode == HImode || (outer == SIGN_EXTEND && mode == QImode))
3228 range = 256;
3229 else
3230 range = 4096;
3232 else
3233 range = (mode == HImode) ? 4095 : 4096;
3235 return (code == CONST_INT
3236 && INTVAL (index) < range
3237 && INTVAL (index) > -range);
3240 /* Return nonzero if X is valid as a Thumb state base register. */
3241 static int
3242 thumb_base_register_rtx_p (rtx x, enum machine_mode mode, int strict_p)
3244 int regno;
3246 if (GET_CODE (x) != REG)
3247 return 0;
3249 regno = REGNO (x);
3251 if (strict_p)
3252 return THUMB_REGNO_MODE_OK_FOR_BASE_P (regno, mode);
3254 return (regno <= LAST_LO_REGNUM
3255 || regno > LAST_VIRTUAL_REGISTER
3256 || regno == FRAME_POINTER_REGNUM
3257 || (GET_MODE_SIZE (mode) >= 4
3258 && (regno == STACK_POINTER_REGNUM
3259 || regno >= FIRST_PSEUDO_REGISTER
3260 || x == hard_frame_pointer_rtx
3261 || x == arg_pointer_rtx)));
3264 /* Return nonzero if x is a legitimate index register. This is the case
3265 for any base register that can access a QImode object. */
3266 inline static int
3267 thumb_index_register_rtx_p (rtx x, int strict_p)
3269 return thumb_base_register_rtx_p (x, QImode, strict_p);
3272 /* Return nonzero if x is a legitimate Thumb-state address.
3274 The AP may be eliminated to either the SP or the FP, so we use the
3275 least common denominator, e.g. SImode, and offsets from 0 to 64.
3277 ??? Verify whether the above is the right approach.
3279 ??? Also, the FP may be eliminated to the SP, so perhaps that
3280 needs special handling also.
3282 ??? Look at how the mips16 port solves this problem. It probably uses
3283 better ways to solve some of these problems.
3285 Although it is not incorrect, we don't accept QImode and HImode
3286 addresses based on the frame pointer or arg pointer until the
3287 reload pass starts. This is so that eliminating such addresses
3288 into stack based ones won't produce impossible code. */
3290 thumb_legitimate_address_p (enum machine_mode mode, rtx x, int strict_p)
3292 /* ??? Not clear if this is right. Experiment. */
3293 if (GET_MODE_SIZE (mode) < 4
3294 && !(reload_in_progress || reload_completed)
3295 && (reg_mentioned_p (frame_pointer_rtx, x)
3296 || reg_mentioned_p (arg_pointer_rtx, x)
3297 || reg_mentioned_p (virtual_incoming_args_rtx, x)
3298 || reg_mentioned_p (virtual_outgoing_args_rtx, x)
3299 || reg_mentioned_p (virtual_stack_dynamic_rtx, x)
3300 || reg_mentioned_p (virtual_stack_vars_rtx, x)))
3301 return 0;
3303 /* Accept any base register. SP only in SImode or larger. */
3304 else if (thumb_base_register_rtx_p (x, mode, strict_p))
3305 return 1;
3307 /* This is PC relative data before arm_reorg runs. */
3308 else if (GET_MODE_SIZE (mode) >= 4 && CONSTANT_P (x)
3309 && GET_CODE (x) == SYMBOL_REF
3310 && CONSTANT_POOL_ADDRESS_P (x) && ! flag_pic)
3311 return 1;
3313 /* This is PC relative data after arm_reorg runs. */
3314 else if (GET_MODE_SIZE (mode) >= 4 && reload_completed
3315 && (GET_CODE (x) == LABEL_REF
3316 || (GET_CODE (x) == CONST
3317 && GET_CODE (XEXP (x, 0)) == PLUS
3318 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
3319 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
3320 return 1;
3322 /* Post-inc indexing only supported for SImode and larger. */
3323 else if (GET_CODE (x) == POST_INC && GET_MODE_SIZE (mode) >= 4
3324 && thumb_index_register_rtx_p (XEXP (x, 0), strict_p))
3325 return 1;
3327 else if (GET_CODE (x) == PLUS)
3329 /* REG+REG address can be any two index registers. */
3330 /* We disallow FRAME+REG addressing since we know that FRAME
3331 will be replaced with STACK, and SP relative addressing only
3332 permits SP+OFFSET. */
3333 if (GET_MODE_SIZE (mode) <= 4
3334 && XEXP (x, 0) != frame_pointer_rtx
3335 && XEXP (x, 1) != frame_pointer_rtx
3336 && thumb_index_register_rtx_p (XEXP (x, 0), strict_p)
3337 && thumb_index_register_rtx_p (XEXP (x, 1), strict_p))
3338 return 1;
3340 /* REG+const has 5-7 bit offset for non-SP registers. */
3341 else if ((thumb_index_register_rtx_p (XEXP (x, 0), strict_p)
3342 || XEXP (x, 0) == arg_pointer_rtx)
3343 && GET_CODE (XEXP (x, 1)) == CONST_INT
3344 && thumb_legitimate_offset_p (mode, INTVAL (XEXP (x, 1))))
3345 return 1;
3347 /* REG+const has 10 bit offset for SP, but only SImode and
3348 larger is supported. */
3349 /* ??? Should probably check for DI/DFmode overflow here
3350 just like GO_IF_LEGITIMATE_OFFSET does. */
3351 else if (GET_CODE (XEXP (x, 0)) == REG
3352 && REGNO (XEXP (x, 0)) == STACK_POINTER_REGNUM
3353 && GET_MODE_SIZE (mode) >= 4
3354 && GET_CODE (XEXP (x, 1)) == CONST_INT
3355 && INTVAL (XEXP (x, 1)) >= 0
3356 && INTVAL (XEXP (x, 1)) + GET_MODE_SIZE (mode) <= 1024
3357 && (INTVAL (XEXP (x, 1)) & 3) == 0)
3358 return 1;
3360 else if (GET_CODE (XEXP (x, 0)) == REG
3361 && REGNO (XEXP (x, 0)) == FRAME_POINTER_REGNUM
3362 && GET_MODE_SIZE (mode) >= 4
3363 && GET_CODE (XEXP (x, 1)) == CONST_INT
3364 && (INTVAL (XEXP (x, 1)) & 3) == 0)
3365 return 1;
3368 else if (GET_MODE_CLASS (mode) != MODE_FLOAT
3369 && GET_MODE_SIZE (mode) == 4
3370 && GET_CODE (x) == SYMBOL_REF
3371 && CONSTANT_POOL_ADDRESS_P (x)
3372 && !(flag_pic
3373 && symbol_mentioned_p (get_pool_constant (x))))
3374 return 1;
3376 return 0;
3379 /* Return nonzero if VAL can be used as an offset in a Thumb-state address
3380 instruction of mode MODE. */
3382 thumb_legitimate_offset_p (enum machine_mode mode, HOST_WIDE_INT val)
3384 switch (GET_MODE_SIZE (mode))
3386 case 1:
3387 return val >= 0 && val < 32;
3389 case 2:
3390 return val >= 0 && val < 64 && (val & 1) == 0;
3392 default:
3393 return (val >= 0
3394 && (val + GET_MODE_SIZE (mode)) <= 128
3395 && (val & 3) == 0);
3399 /* Try machine-dependent ways of modifying an illegitimate address
3400 to be legitimate. If we find one, return the new, valid address. */
3402 arm_legitimize_address (rtx x, rtx orig_x, enum machine_mode mode)
3404 if (GET_CODE (x) == PLUS)
3406 rtx xop0 = XEXP (x, 0);
3407 rtx xop1 = XEXP (x, 1);
3409 if (CONSTANT_P (xop0) && !symbol_mentioned_p (xop0))
3410 xop0 = force_reg (SImode, xop0);
3412 if (CONSTANT_P (xop1) && !symbol_mentioned_p (xop1))
3413 xop1 = force_reg (SImode, xop1);
3415 if (ARM_BASE_REGISTER_RTX_P (xop0)
3416 && GET_CODE (xop1) == CONST_INT)
3418 HOST_WIDE_INT n, low_n;
3419 rtx base_reg, val;
3420 n = INTVAL (xop1);
3422 /* VFP addressing modes actually allow greater offsets, but for
3423 now we just stick with the lowest common denominator. */
3424 if (mode == DImode
3425 || ((TARGET_SOFT_FLOAT || TARGET_VFP) && mode == DFmode))
3427 low_n = n & 0x0f;
3428 n &= ~0x0f;
3429 if (low_n > 4)
3431 n += 16;
3432 low_n -= 16;
3435 else
3437 low_n = ((mode) == TImode ? 0
3438 : n >= 0 ? (n & 0xfff) : -((-n) & 0xfff));
3439 n -= low_n;
3442 base_reg = gen_reg_rtx (SImode);
3443 val = force_operand (gen_rtx_PLUS (SImode, xop0,
3444 GEN_INT (n)), NULL_RTX);
3445 emit_move_insn (base_reg, val);
3446 x = (low_n == 0 ? base_reg
3447 : gen_rtx_PLUS (SImode, base_reg, GEN_INT (low_n)));
3449 else if (xop0 != XEXP (x, 0) || xop1 != XEXP (x, 1))
3450 x = gen_rtx_PLUS (SImode, xop0, xop1);
3453 /* XXX We don't allow MINUS any more -- see comment in
3454 arm_legitimate_address_p (). */
3455 else if (GET_CODE (x) == MINUS)
3457 rtx xop0 = XEXP (x, 0);
3458 rtx xop1 = XEXP (x, 1);
3460 if (CONSTANT_P (xop0))
3461 xop0 = force_reg (SImode, xop0);
3463 if (CONSTANT_P (xop1) && ! symbol_mentioned_p (xop1))
3464 xop1 = force_reg (SImode, xop1);
3466 if (xop0 != XEXP (x, 0) || xop1 != XEXP (x, 1))
3467 x = gen_rtx_MINUS (SImode, xop0, xop1);
3470 if (flag_pic)
3472 /* We need to find and carefully transform any SYMBOL and LABEL
3473 references; so go back to the original address expression. */
3474 rtx new_x = legitimize_pic_address (orig_x, mode, NULL_RTX);
3476 if (new_x != orig_x)
3477 x = new_x;
3480 return x;
3484 /* Try machine-dependent ways of modifying an illegitimate Thumb address
3485 to be legitimate. If we find one, return the new, valid address. */
3487 thumb_legitimize_address (rtx x, rtx orig_x, enum machine_mode mode)
3489 if (GET_CODE (x) == PLUS
3490 && GET_CODE (XEXP (x, 1)) == CONST_INT
3491 && (INTVAL (XEXP (x, 1)) >= 32 * GET_MODE_SIZE (mode)
3492 || INTVAL (XEXP (x, 1)) < 0))
3494 rtx xop0 = XEXP (x, 0);
3495 rtx xop1 = XEXP (x, 1);
3496 HOST_WIDE_INT offset = INTVAL (xop1);
3498 /* Try and fold the offset into a biasing of the base register and
3499 then offsetting that. Don't do this when optimizing for space
3500 since it can cause too many CSEs. */
3501 if (optimize_size && offset >= 0
3502 && offset < 256 + 31 * GET_MODE_SIZE (mode))
3504 HOST_WIDE_INT delta;
3506 if (offset >= 256)
3507 delta = offset - (256 - GET_MODE_SIZE (mode));
3508 else if (offset < 32 * GET_MODE_SIZE (mode) + 8)
3509 delta = 31 * GET_MODE_SIZE (mode);
3510 else
3511 delta = offset & (~31 * GET_MODE_SIZE (mode));
3513 xop0 = force_operand (plus_constant (xop0, offset - delta),
3514 NULL_RTX);
3515 x = plus_constant (xop0, delta);
3517 else if (offset < 0 && offset > -256)
3518 /* Small negative offsets are best done with a subtract before the
3519 dereference, forcing these into a register normally takes two
3520 instructions. */
3521 x = force_operand (x, NULL_RTX);
3522 else
3524 /* For the remaining cases, force the constant into a register. */
3525 xop1 = force_reg (SImode, xop1);
3526 x = gen_rtx_PLUS (SImode, xop0, xop1);
3529 else if (GET_CODE (x) == PLUS
3530 && s_register_operand (XEXP (x, 1), SImode)
3531 && !s_register_operand (XEXP (x, 0), SImode))
3533 rtx xop0 = force_operand (XEXP (x, 0), NULL_RTX);
3535 x = gen_rtx_PLUS (SImode, xop0, XEXP (x, 1));
3538 if (flag_pic)
3540 /* We need to find and carefully transform any SYMBOL and LABEL
3541 references; so go back to the original address expression. */
3542 rtx new_x = legitimize_pic_address (orig_x, mode, NULL_RTX);
3544 if (new_x != orig_x)
3545 x = new_x;
3548 return x;
3553 #define REG_OR_SUBREG_REG(X) \
3554 (GET_CODE (X) == REG \
3555 || (GET_CODE (X) == SUBREG && GET_CODE (SUBREG_REG (X)) == REG))
3557 #define REG_OR_SUBREG_RTX(X) \
3558 (GET_CODE (X) == REG ? (X) : SUBREG_REG (X))
3560 #ifndef COSTS_N_INSNS
3561 #define COSTS_N_INSNS(N) ((N) * 4 - 2)
3562 #endif
3563 static inline int
3564 thumb_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer)
3566 enum machine_mode mode = GET_MODE (x);
3568 switch (code)
3570 case ASHIFT:
3571 case ASHIFTRT:
3572 case LSHIFTRT:
3573 case ROTATERT:
3574 case PLUS:
3575 case MINUS:
3576 case COMPARE:
3577 case NEG:
3578 case NOT:
3579 return COSTS_N_INSNS (1);
3581 case MULT:
3582 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
3584 int cycles = 0;
3585 unsigned HOST_WIDE_INT i = INTVAL (XEXP (x, 1));
3587 while (i)
3589 i >>= 2;
3590 cycles++;
3592 return COSTS_N_INSNS (2) + cycles;
3594 return COSTS_N_INSNS (1) + 16;
3596 case SET:
3597 return (COSTS_N_INSNS (1)
3598 + 4 * ((GET_CODE (SET_SRC (x)) == MEM)
3599 + GET_CODE (SET_DEST (x)) == MEM));
3601 case CONST_INT:
3602 if (outer == SET)
3604 if ((unsigned HOST_WIDE_INT) INTVAL (x) < 256)
3605 return 0;
3606 if (thumb_shiftable_const (INTVAL (x)))
3607 return COSTS_N_INSNS (2);
3608 return COSTS_N_INSNS (3);
3610 else if ((outer == PLUS || outer == COMPARE)
3611 && INTVAL (x) < 256 && INTVAL (x) > -256)
3612 return 0;
3613 else if (outer == AND
3614 && INTVAL (x) < 256 && INTVAL (x) >= -256)
3615 return COSTS_N_INSNS (1);
3616 else if (outer == ASHIFT || outer == ASHIFTRT
3617 || outer == LSHIFTRT)
3618 return 0;
3619 return COSTS_N_INSNS (2);
3621 case CONST:
3622 case CONST_DOUBLE:
3623 case LABEL_REF:
3624 case SYMBOL_REF:
3625 return COSTS_N_INSNS (3);
3627 case UDIV:
3628 case UMOD:
3629 case DIV:
3630 case MOD:
3631 return 100;
3633 case TRUNCATE:
3634 return 99;
3636 case AND:
3637 case XOR:
3638 case IOR:
3639 /* XXX guess. */
3640 return 8;
3642 case MEM:
3643 /* XXX another guess. */
3644 /* Memory costs quite a lot for the first word, but subsequent words
3645 load at the equivalent of a single insn each. */
3646 return (10 + 4 * ((GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD)
3647 + ((GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
3648 ? 4 : 0));
3650 case IF_THEN_ELSE:
3651 /* XXX a guess. */
3652 if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
3653 return 14;
3654 return 2;
3656 case ZERO_EXTEND:
3657 /* XXX still guessing. */
3658 switch (GET_MODE (XEXP (x, 0)))
3660 case QImode:
3661 return (1 + (mode == DImode ? 4 : 0)
3662 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3664 case HImode:
3665 return (4 + (mode == DImode ? 4 : 0)
3666 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3668 case SImode:
3669 return (1 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3671 default:
3672 return 99;
3675 default:
3676 return 99;
3681 /* Worker routine for arm_rtx_costs. */
3682 static inline int
3683 arm_rtx_costs_1 (rtx x, enum rtx_code code, enum rtx_code outer)
3685 enum machine_mode mode = GET_MODE (x);
3686 enum rtx_code subcode;
3687 int extra_cost;
3689 switch (code)
3691 case MEM:
3692 /* Memory costs quite a lot for the first word, but subsequent words
3693 load at the equivalent of a single insn each. */
3694 return (10 + 4 * ((GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD)
3695 + (GET_CODE (x) == SYMBOL_REF
3696 && CONSTANT_POOL_ADDRESS_P (x) ? 4 : 0));
3698 case DIV:
3699 case MOD:
3700 case UDIV:
3701 case UMOD:
3702 return optimize_size ? COSTS_N_INSNS (2) : 100;
3704 case ROTATE:
3705 if (mode == SImode && GET_CODE (XEXP (x, 1)) == REG)
3706 return 4;
3707 /* Fall through */
3708 case ROTATERT:
3709 if (mode != SImode)
3710 return 8;
3711 /* Fall through */
3712 case ASHIFT: case LSHIFTRT: case ASHIFTRT:
3713 if (mode == DImode)
3714 return (8 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : 8)
3715 + ((GET_CODE (XEXP (x, 0)) == REG
3716 || (GET_CODE (XEXP (x, 0)) == SUBREG
3717 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG))
3718 ? 0 : 8));
3719 return (1 + ((GET_CODE (XEXP (x, 0)) == REG
3720 || (GET_CODE (XEXP (x, 0)) == SUBREG
3721 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG))
3722 ? 0 : 4)
3723 + ((GET_CODE (XEXP (x, 1)) == REG
3724 || (GET_CODE (XEXP (x, 1)) == SUBREG
3725 && GET_CODE (SUBREG_REG (XEXP (x, 1))) == REG)
3726 || (GET_CODE (XEXP (x, 1)) == CONST_INT))
3727 ? 0 : 4));
3729 case MINUS:
3730 if (mode == DImode)
3731 return (4 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 8)
3732 + ((REG_OR_SUBREG_REG (XEXP (x, 0))
3733 || (GET_CODE (XEXP (x, 0)) == CONST_INT
3734 && const_ok_for_arm (INTVAL (XEXP (x, 0)))))
3735 ? 0 : 8));
3737 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
3738 return (2 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
3739 || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
3740 && arm_const_double_rtx (XEXP (x, 1))))
3741 ? 0 : 8)
3742 + ((REG_OR_SUBREG_REG (XEXP (x, 0))
3743 || (GET_CODE (XEXP (x, 0)) == CONST_DOUBLE
3744 && arm_const_double_rtx (XEXP (x, 0))))
3745 ? 0 : 8));
3747 if (((GET_CODE (XEXP (x, 0)) == CONST_INT
3748 && const_ok_for_arm (INTVAL (XEXP (x, 0)))
3749 && REG_OR_SUBREG_REG (XEXP (x, 1))))
3750 || (((subcode = GET_CODE (XEXP (x, 1))) == ASHIFT
3751 || subcode == ASHIFTRT || subcode == LSHIFTRT
3752 || subcode == ROTATE || subcode == ROTATERT
3753 || (subcode == MULT
3754 && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
3755 && ((INTVAL (XEXP (XEXP (x, 1), 1)) &
3756 (INTVAL (XEXP (XEXP (x, 1), 1)) - 1)) == 0)))
3757 && REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 0))
3758 && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 1))
3759 || GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT)
3760 && REG_OR_SUBREG_REG (XEXP (x, 0))))
3761 return 1;
3762 /* Fall through */
3764 case PLUS:
3765 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
3766 return (2 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8)
3767 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
3768 || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
3769 && arm_const_double_rtx (XEXP (x, 1))))
3770 ? 0 : 8));
3772 /* Fall through */
3773 case AND: case XOR: case IOR:
3774 extra_cost = 0;
3776 /* Normally the frame registers will be spilt into reg+const during
3777 reload, so it is a bad idea to combine them with other instructions,
3778 since then they might not be moved outside of loops. As a compromise
3779 we allow integration with ops that have a constant as their second
3780 operand. */
3781 if ((REG_OR_SUBREG_REG (XEXP (x, 0))
3782 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0)))
3783 && GET_CODE (XEXP (x, 1)) != CONST_INT)
3784 || (REG_OR_SUBREG_REG (XEXP (x, 0))
3785 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0)))))
3786 extra_cost = 4;
3788 if (mode == DImode)
3789 return (4 + extra_cost + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8)
3790 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
3791 || (GET_CODE (XEXP (x, 1)) == CONST_INT
3792 && const_ok_for_op (INTVAL (XEXP (x, 1)), code)))
3793 ? 0 : 8));
3795 if (REG_OR_SUBREG_REG (XEXP (x, 0)))
3796 return (1 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : extra_cost)
3797 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
3798 || (GET_CODE (XEXP (x, 1)) == CONST_INT
3799 && const_ok_for_op (INTVAL (XEXP (x, 1)), code)))
3800 ? 0 : 4));
3802 else if (REG_OR_SUBREG_REG (XEXP (x, 1)))
3803 return (1 + extra_cost
3804 + ((((subcode = GET_CODE (XEXP (x, 0))) == ASHIFT
3805 || subcode == LSHIFTRT || subcode == ASHIFTRT
3806 || subcode == ROTATE || subcode == ROTATERT
3807 || (subcode == MULT
3808 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
3809 && ((INTVAL (XEXP (XEXP (x, 0), 1)) &
3810 (INTVAL (XEXP (XEXP (x, 0), 1)) - 1)) == 0)))
3811 && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 0)))
3812 && ((REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 1)))
3813 || GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT))
3814 ? 0 : 4));
3816 return 8;
3818 case MULT:
3819 /* This should have been handled by the CPU specific routines. */
3820 abort ();
3822 case TRUNCATE:
3823 if (arm_arch3m && mode == SImode
3824 && GET_CODE (XEXP (x, 0)) == LSHIFTRT
3825 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
3826 && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0))
3827 == GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)))
3828 && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == ZERO_EXTEND
3829 || GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == SIGN_EXTEND))
3830 return 8;
3831 return 99;
3833 case NEG:
3834 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
3835 return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 6);
3836 /* Fall through */
3837 case NOT:
3838 if (mode == DImode)
3839 return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4);
3841 return 1 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4);
3843 case IF_THEN_ELSE:
3844 if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
3845 return 14;
3846 return 2;
3848 case COMPARE:
3849 return 1;
3851 case ABS:
3852 return 4 + (mode == DImode ? 4 : 0);
3854 case SIGN_EXTEND:
3855 if (GET_MODE (XEXP (x, 0)) == QImode)
3856 return (4 + (mode == DImode ? 4 : 0)
3857 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3858 /* Fall through */
3859 case ZERO_EXTEND:
3860 switch (GET_MODE (XEXP (x, 0)))
3862 case QImode:
3863 return (1 + (mode == DImode ? 4 : 0)
3864 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3866 case HImode:
3867 return (4 + (mode == DImode ? 4 : 0)
3868 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3870 case SImode:
3871 return (1 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3873 case V8QImode:
3874 case V4HImode:
3875 case V2SImode:
3876 case V4QImode:
3877 case V2HImode:
3878 return 1;
3880 default:
3881 break;
3883 abort ();
3885 case CONST_INT:
3886 if (const_ok_for_arm (INTVAL (x)))
3887 return outer == SET ? 2 : -1;
3888 else if (outer == AND
3889 && const_ok_for_arm (~INTVAL (x)))
3890 return -1;
3891 else if ((outer == COMPARE
3892 || outer == PLUS || outer == MINUS)
3893 && const_ok_for_arm (-INTVAL (x)))
3894 return -1;
3895 else
3896 return 5;
3898 case CONST:
3899 case LABEL_REF:
3900 case SYMBOL_REF:
3901 return 6;
3903 case CONST_DOUBLE:
3904 if (arm_const_double_rtx (x))
3905 return outer == SET ? 2 : -1;
3906 else if ((outer == COMPARE || outer == PLUS)
3907 && neg_const_double_rtx_ok_for_fpa (x))
3908 return -1;
3909 return 7;
3911 default:
3912 return 99;
3916 /* RTX costs for cores with a slow MUL implementation. */
3918 static bool
3919 arm_slowmul_rtx_costs (rtx x, int code, int outer_code, int *total)
3921 enum machine_mode mode = GET_MODE (x);
3923 if (TARGET_THUMB)
3925 *total = thumb_rtx_costs (x, code, outer_code);
3926 return true;
3929 switch (code)
3931 case MULT:
3932 if (GET_MODE_CLASS (mode) == MODE_FLOAT
3933 || mode == DImode)
3935 *total = 30;
3936 return true;
3939 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
3941 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
3942 & (unsigned HOST_WIDE_INT) 0xffffffff);
3943 int cost, const_ok = const_ok_for_arm (i);
3944 int j, booth_unit_size;
3946 /* Tune as appropriate. */
3947 cost = const_ok ? 4 : 8;
3948 booth_unit_size = 2;
3949 for (j = 0; i && j < 32; j += booth_unit_size)
3951 i >>= booth_unit_size;
3952 cost += 2;
3955 *total = cost;
3956 return true;
3959 *total = 30 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
3960 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
3961 return true;
3963 default:
3964 *total = arm_rtx_costs_1 (x, code, outer_code);
3965 return true;
3970 /* RTX cost for cores with a fast multiply unit (M variants). */
3972 static bool
3973 arm_fastmul_rtx_costs (rtx x, int code, int outer_code, int *total)
3975 enum machine_mode mode = GET_MODE (x);
3977 if (TARGET_THUMB)
3979 *total = thumb_rtx_costs (x, code, outer_code);
3980 return true;
3983 switch (code)
3985 case MULT:
3986 /* There is no point basing this on the tuning, since it is always the
3987 fast variant if it exists at all. */
3988 if (mode == DImode
3989 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
3990 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
3991 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
3993 *total = 8;
3994 return true;
3998 if (GET_MODE_CLASS (mode) == MODE_FLOAT
3999 || mode == DImode)
4001 *total = 30;
4002 return true;
4005 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
4007 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
4008 & (unsigned HOST_WIDE_INT) 0xffffffff);
4009 int cost, const_ok = const_ok_for_arm (i);
4010 int j, booth_unit_size;
4012 /* Tune as appropriate. */
4013 cost = const_ok ? 4 : 8;
4014 booth_unit_size = 8;
4015 for (j = 0; i && j < 32; j += booth_unit_size)
4017 i >>= booth_unit_size;
4018 cost += 2;
4021 *total = cost;
4022 return true;
4025 *total = 8 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
4026 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
4027 return true;
4029 default:
4030 *total = arm_rtx_costs_1 (x, code, outer_code);
4031 return true;
4036 /* RTX cost for XScale CPUs. */
4038 static bool
4039 arm_xscale_rtx_costs (rtx x, int code, int outer_code, int *total)
4041 enum machine_mode mode = GET_MODE (x);
4043 if (TARGET_THUMB)
4045 *total = thumb_rtx_costs (x, code, outer_code);
4046 return true;
4049 switch (code)
4051 case MULT:
4052 /* There is no point basing this on the tuning, since it is always the
4053 fast variant if it exists at all. */
4054 if (mode == DImode
4055 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
4056 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
4057 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
4059 *total = 8;
4060 return true;
4064 if (GET_MODE_CLASS (mode) == MODE_FLOAT
4065 || mode == DImode)
4067 *total = 30;
4068 return true;
4071 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
4073 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
4074 & (unsigned HOST_WIDE_INT) 0xffffffff);
4075 int cost, const_ok = const_ok_for_arm (i);
4076 unsigned HOST_WIDE_INT masked_const;
4078 /* The cost will be related to two insns.
4079 First a load of the constant (MOV or LDR), then a multiply. */
4080 cost = 2;
4081 if (! const_ok)
4082 cost += 1; /* LDR is probably more expensive because
4083 of longer result latency. */
4084 masked_const = i & 0xffff8000;
4085 if (masked_const != 0 && masked_const != 0xffff8000)
4087 masked_const = i & 0xf8000000;
4088 if (masked_const == 0 || masked_const == 0xf8000000)
4089 cost += 1;
4090 else
4091 cost += 2;
4093 *total = cost;
4094 return true;
4097 *total = 8 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
4098 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
4099 return true;
4101 default:
4102 *total = arm_rtx_costs_1 (x, code, outer_code);
4103 return true;
4108 /* RTX costs for 9e (and later) cores. */
4110 static bool
4111 arm_9e_rtx_costs (rtx x, int code, int outer_code, int *total)
4113 enum machine_mode mode = GET_MODE (x);
4114 int nonreg_cost;
4115 int cost;
4117 if (TARGET_THUMB)
4119 switch (code)
4121 case MULT:
4122 *total = COSTS_N_INSNS (3);
4123 return true;
4125 default:
4126 *total = thumb_rtx_costs (x, code, outer_code);
4127 return true;
4131 switch (code)
4133 case MULT:
4134 /* There is no point basing this on the tuning, since it is always the
4135 fast variant if it exists at all. */
4136 if (mode == DImode
4137 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
4138 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
4139 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
4141 *total = 3;
4142 return true;
4146 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
4148 *total = 30;
4149 return true;
4151 if (mode == DImode)
4153 cost = 7;
4154 nonreg_cost = 8;
4156 else
4158 cost = 2;
4159 nonreg_cost = 4;
4163 *total = cost + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : nonreg_cost)
4164 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : nonreg_cost);
4165 return true;
4167 default:
4168 *total = arm_rtx_costs_1 (x, code, outer_code);
4169 return true;
4172 /* All address computations that can be done are free, but rtx cost returns
4173 the same for practically all of them. So we weight the different types
4174 of address here in the order (most pref first):
4175 PRE/POST_INC/DEC, SHIFT or NON-INT sum, INT sum, REG, MEM or LABEL. */
4176 static inline int
4177 arm_arm_address_cost (rtx x)
4179 enum rtx_code c = GET_CODE (x);
4181 if (c == PRE_INC || c == PRE_DEC || c == POST_INC || c == POST_DEC)
4182 return 0;
4183 if (c == MEM || c == LABEL_REF || c == SYMBOL_REF)
4184 return 10;
4186 if (c == PLUS || c == MINUS)
4188 if (GET_CODE (XEXP (x, 0)) == CONST_INT)
4189 return 2;
4191 if (ARITHMETIC_P (XEXP (x, 0)) || ARITHMETIC_P (XEXP (x, 1)))
4192 return 3;
4194 return 4;
4197 return 6;
4200 static inline int
4201 arm_thumb_address_cost (rtx x)
4203 enum rtx_code c = GET_CODE (x);
4205 if (c == REG)
4206 return 1;
4207 if (c == PLUS
4208 && GET_CODE (XEXP (x, 0)) == REG
4209 && GET_CODE (XEXP (x, 1)) == CONST_INT)
4210 return 1;
4212 return 2;
4215 static int
4216 arm_address_cost (rtx x)
4218 return TARGET_ARM ? arm_arm_address_cost (x) : arm_thumb_address_cost (x);
4221 static int
4222 arm_adjust_cost (rtx insn, rtx link, rtx dep, int cost)
4224 rtx i_pat, d_pat;
4226 /* Some true dependencies can have a higher cost depending
4227 on precisely how certain input operands are used. */
4228 if (arm_tune_xscale
4229 && REG_NOTE_KIND (link) == 0
4230 && recog_memoized (insn) >= 0
4231 && recog_memoized (dep) >= 0)
4233 int shift_opnum = get_attr_shift (insn);
4234 enum attr_type attr_type = get_attr_type (dep);
4236 /* If nonzero, SHIFT_OPNUM contains the operand number of a shifted
4237 operand for INSN. If we have a shifted input operand and the
4238 instruction we depend on is another ALU instruction, then we may
4239 have to account for an additional stall. */
4240 if (shift_opnum != 0
4241 && (attr_type == TYPE_ALU_SHIFT || attr_type == TYPE_ALU_SHIFT_REG))
4243 rtx shifted_operand;
4244 int opno;
4246 /* Get the shifted operand. */
4247 extract_insn (insn);
4248 shifted_operand = recog_data.operand[shift_opnum];
4250 /* Iterate over all the operands in DEP. If we write an operand
4251 that overlaps with SHIFTED_OPERAND, then we have increase the
4252 cost of this dependency. */
4253 extract_insn (dep);
4254 preprocess_constraints ();
4255 for (opno = 0; opno < recog_data.n_operands; opno++)
4257 /* We can ignore strict inputs. */
4258 if (recog_data.operand_type[opno] == OP_IN)
4259 continue;
4261 if (reg_overlap_mentioned_p (recog_data.operand[opno],
4262 shifted_operand))
4263 return 2;
4268 /* XXX This is not strictly true for the FPA. */
4269 if (REG_NOTE_KIND (link) == REG_DEP_ANTI
4270 || REG_NOTE_KIND (link) == REG_DEP_OUTPUT)
4271 return 0;
4273 /* Call insns don't incur a stall, even if they follow a load. */
4274 if (REG_NOTE_KIND (link) == 0
4275 && GET_CODE (insn) == CALL_INSN)
4276 return 1;
4278 if ((i_pat = single_set (insn)) != NULL
4279 && GET_CODE (SET_SRC (i_pat)) == MEM
4280 && (d_pat = single_set (dep)) != NULL
4281 && GET_CODE (SET_DEST (d_pat)) == MEM)
4283 rtx src_mem = XEXP (SET_SRC (i_pat), 0);
4284 /* This is a load after a store, there is no conflict if the load reads
4285 from a cached area. Assume that loads from the stack, and from the
4286 constant pool are cached, and that others will miss. This is a
4287 hack. */
4289 if ((GET_CODE (src_mem) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (src_mem))
4290 || reg_mentioned_p (stack_pointer_rtx, src_mem)
4291 || reg_mentioned_p (frame_pointer_rtx, src_mem)
4292 || reg_mentioned_p (hard_frame_pointer_rtx, src_mem))
4293 return 1;
4296 return cost;
4299 static int fp_consts_inited = 0;
4301 /* Only zero is valid for VFP. Other values are also valid for FPA. */
4302 static const char * const strings_fp[8] =
4304 "0", "1", "2", "3",
4305 "4", "5", "0.5", "10"
4308 static REAL_VALUE_TYPE values_fp[8];
4310 static void
4311 init_fp_table (void)
4313 int i;
4314 REAL_VALUE_TYPE r;
4316 if (TARGET_VFP)
4317 fp_consts_inited = 1;
4318 else
4319 fp_consts_inited = 8;
4321 for (i = 0; i < fp_consts_inited; i++)
4323 r = REAL_VALUE_ATOF (strings_fp[i], DFmode);
4324 values_fp[i] = r;
4328 /* Return TRUE if rtx X is a valid immediate FP constant. */
4330 arm_const_double_rtx (rtx x)
4332 REAL_VALUE_TYPE r;
4333 int i;
4335 if (!fp_consts_inited)
4336 init_fp_table ();
4338 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
4339 if (REAL_VALUE_MINUS_ZERO (r))
4340 return 0;
4342 for (i = 0; i < fp_consts_inited; i++)
4343 if (REAL_VALUES_EQUAL (r, values_fp[i]))
4344 return 1;
4346 return 0;
4349 /* Return TRUE if rtx X is a valid immediate FPA constant. */
4351 neg_const_double_rtx_ok_for_fpa (rtx x)
4353 REAL_VALUE_TYPE r;
4354 int i;
4356 if (!fp_consts_inited)
4357 init_fp_table ();
4359 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
4360 r = REAL_VALUE_NEGATE (r);
4361 if (REAL_VALUE_MINUS_ZERO (r))
4362 return 0;
4364 for (i = 0; i < 8; i++)
4365 if (REAL_VALUES_EQUAL (r, values_fp[i]))
4366 return 1;
4368 return 0;
4371 /* Predicates for `match_operand' and `match_operator'. */
4373 /* s_register_operand is the same as register_operand, but it doesn't accept
4374 (SUBREG (MEM)...).
4376 This function exists because at the time it was put in it led to better
4377 code. SUBREG(MEM) always needs a reload in the places where
4378 s_register_operand is used, and this seemed to lead to excessive
4379 reloading. */
4381 s_register_operand (rtx op, enum machine_mode mode)
4383 if (GET_MODE (op) != mode && mode != VOIDmode)
4384 return 0;
4386 if (GET_CODE (op) == SUBREG)
4387 op = SUBREG_REG (op);
4389 /* We don't consider registers whose class is NO_REGS
4390 to be a register operand. */
4391 /* XXX might have to check for lo regs only for thumb ??? */
4392 return (GET_CODE (op) == REG
4393 && (REGNO (op) >= FIRST_PSEUDO_REGISTER
4394 || REGNO_REG_CLASS (REGNO (op)) != NO_REGS));
4397 /* A hard register operand (even before reload. */
4399 arm_hard_register_operand (rtx op, enum machine_mode mode)
4401 if (GET_MODE (op) != mode && mode != VOIDmode)
4402 return 0;
4404 return (GET_CODE (op) == REG
4405 && REGNO (op) < FIRST_PSEUDO_REGISTER);
4408 /* An arm register operand. */
4410 arm_general_register_operand (rtx op, enum machine_mode mode)
4412 if (GET_MODE (op) != mode && mode != VOIDmode)
4413 return 0;
4415 if (GET_CODE (op) == SUBREG)
4416 op = SUBREG_REG (op);
4418 return (GET_CODE (op) == REG
4419 && (REGNO (op) <= LAST_ARM_REGNUM
4420 || REGNO (op) >= FIRST_PSEUDO_REGISTER));
4423 /* Only accept reg, subreg(reg), const_int. */
4425 reg_or_int_operand (rtx op, enum machine_mode mode)
4427 if (GET_CODE (op) == CONST_INT)
4428 return 1;
4430 if (GET_MODE (op) != mode && mode != VOIDmode)
4431 return 0;
4433 if (GET_CODE (op) == SUBREG)
4434 op = SUBREG_REG (op);
4436 /* We don't consider registers whose class is NO_REGS
4437 to be a register operand. */
4438 return (GET_CODE (op) == REG
4439 && (REGNO (op) >= FIRST_PSEUDO_REGISTER
4440 || REGNO_REG_CLASS (REGNO (op)) != NO_REGS));
4443 /* Return 1 if OP is an item in memory, given that we are in reload. */
4445 arm_reload_memory_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
4447 int regno = true_regnum (op);
4449 return (!CONSTANT_P (op)
4450 && (regno == -1
4451 || (GET_CODE (op) == REG
4452 && REGNO (op) >= FIRST_PSEUDO_REGISTER)));
4455 /* Return TRUE for valid operands for the rhs of an ARM instruction. */
4457 arm_rhs_operand (rtx op, enum machine_mode mode)
4459 return (s_register_operand (op, mode)
4460 || (GET_CODE (op) == CONST_INT && const_ok_for_arm (INTVAL (op))));
4463 /* Return TRUE for valid operands for the
4464 rhs of an ARM instruction, or a load. */
4466 arm_rhsm_operand (rtx op, enum machine_mode mode)
4468 return (s_register_operand (op, mode)
4469 || (GET_CODE (op) == CONST_INT && const_ok_for_arm (INTVAL (op)))
4470 || memory_operand (op, mode));
4473 /* Return TRUE for valid operands for the rhs of an ARM instruction, or if a
4474 constant that is valid when negated. */
4476 arm_add_operand (rtx op, enum machine_mode mode)
4478 if (TARGET_THUMB)
4479 return thumb_cmp_operand (op, mode);
4481 return (s_register_operand (op, mode)
4482 || (GET_CODE (op) == CONST_INT
4483 && (const_ok_for_arm (INTVAL (op))
4484 || const_ok_for_arm (-INTVAL (op)))));
4487 /* Return TRUE for valid ARM constants (or when valid if negated). */
4489 arm_addimm_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
4491 return (GET_CODE (op) == CONST_INT
4492 && (const_ok_for_arm (INTVAL (op))
4493 || const_ok_for_arm (-INTVAL (op))));
4497 arm_not_operand (rtx op, enum machine_mode mode)
4499 return (s_register_operand (op, mode)
4500 || (GET_CODE (op) == CONST_INT
4501 && (const_ok_for_arm (INTVAL (op))
4502 || const_ok_for_arm (~INTVAL (op)))));
4505 /* Return TRUE if the operand is a memory reference which contains an
4506 offsettable address. */
4508 offsettable_memory_operand (rtx op, enum machine_mode mode)
4510 if (mode == VOIDmode)
4511 mode = GET_MODE (op);
4513 return (mode == GET_MODE (op)
4514 && GET_CODE (op) == MEM
4515 && offsettable_address_p (reload_completed | reload_in_progress,
4516 mode, XEXP (op, 0)));
4519 /* Return TRUE if the operand is a memory reference which is, or can be
4520 made word aligned by adjusting the offset. */
4522 alignable_memory_operand (rtx op, enum machine_mode mode)
4524 rtx reg;
4526 if (mode == VOIDmode)
4527 mode = GET_MODE (op);
4529 if (mode != GET_MODE (op) || GET_CODE (op) != MEM)
4530 return 0;
4532 op = XEXP (op, 0);
4534 return ((GET_CODE (reg = op) == REG
4535 || (GET_CODE (op) == SUBREG
4536 && GET_CODE (reg = SUBREG_REG (op)) == REG)
4537 || (GET_CODE (op) == PLUS
4538 && GET_CODE (XEXP (op, 1)) == CONST_INT
4539 && (GET_CODE (reg = XEXP (op, 0)) == REG
4540 || (GET_CODE (XEXP (op, 0)) == SUBREG
4541 && GET_CODE (reg = SUBREG_REG (XEXP (op, 0))) == REG))))
4542 && REGNO_POINTER_ALIGN (REGNO (reg)) >= 32);
4545 /* Similar to s_register_operand, but does not allow hard integer
4546 registers. */
4548 f_register_operand (rtx op, enum machine_mode mode)
4550 if (GET_MODE (op) != mode && mode != VOIDmode)
4551 return 0;
4553 if (GET_CODE (op) == SUBREG)
4554 op = SUBREG_REG (op);
4556 /* We don't consider registers whose class is NO_REGS
4557 to be a register operand. */
4558 return (GET_CODE (op) == REG
4559 && (REGNO (op) >= FIRST_PSEUDO_REGISTER
4560 || REGNO_REG_CLASS (REGNO (op)) == FPA_REGS));
4563 /* Return TRUE for valid operands for the rhs of an floating point insns.
4564 Allows regs or certain consts on FPA, just regs for everything else. */
4566 arm_float_rhs_operand (rtx op, enum machine_mode mode)
4568 if (s_register_operand (op, mode))
4569 return TRUE;
4571 if (GET_MODE (op) != mode && mode != VOIDmode)
4572 return FALSE;
4574 if (TARGET_FPA && GET_CODE (op) == CONST_DOUBLE)
4575 return arm_const_double_rtx (op);
4577 return FALSE;
4581 arm_float_add_operand (rtx op, enum machine_mode mode)
4583 if (s_register_operand (op, mode))
4584 return TRUE;
4586 if (GET_MODE (op) != mode && mode != VOIDmode)
4587 return FALSE;
4589 if (TARGET_FPA && GET_CODE (op) == CONST_DOUBLE)
4590 return (arm_const_double_rtx (op)
4591 || neg_const_double_rtx_ok_for_fpa (op));
4593 return FALSE;
4597 /* Return TRUE if OP is suitable for the rhs of a floating point comparison.
4598 Depends which fpu we are targeting. */
4601 arm_float_compare_operand (rtx op, enum machine_mode mode)
4603 if (TARGET_VFP)
4604 return vfp_compare_operand (op, mode);
4605 else
4606 return arm_float_rhs_operand (op, mode);
4610 /* Return nonzero if OP is a valid Cirrus memory address pattern. */
4612 cirrus_memory_offset (rtx op)
4614 /* Reject eliminable registers. */
4615 if (! (reload_in_progress || reload_completed)
4616 && ( reg_mentioned_p (frame_pointer_rtx, op)
4617 || reg_mentioned_p (arg_pointer_rtx, op)
4618 || reg_mentioned_p (virtual_incoming_args_rtx, op)
4619 || reg_mentioned_p (virtual_outgoing_args_rtx, op)
4620 || reg_mentioned_p (virtual_stack_dynamic_rtx, op)
4621 || reg_mentioned_p (virtual_stack_vars_rtx, op)))
4622 return 0;
4624 if (GET_CODE (op) == MEM)
4626 rtx ind;
4628 ind = XEXP (op, 0);
4630 /* Match: (mem (reg)). */
4631 if (GET_CODE (ind) == REG)
4632 return 1;
4634 /* Match:
4635 (mem (plus (reg)
4636 (const))). */
4637 if (GET_CODE (ind) == PLUS
4638 && GET_CODE (XEXP (ind, 0)) == REG
4639 && REG_MODE_OK_FOR_BASE_P (XEXP (ind, 0), VOIDmode)
4640 && GET_CODE (XEXP (ind, 1)) == CONST_INT)
4641 return 1;
4644 return 0;
4648 arm_extendqisi_mem_op (rtx op, enum machine_mode mode)
4650 if (!memory_operand (op, mode))
4651 return 0;
4653 return arm_legitimate_address_p (mode, XEXP (op, 0), SIGN_EXTEND, 0);
4656 /* Return nonzero if OP is a Cirrus or general register. */
4658 cirrus_register_operand (rtx op, enum machine_mode mode)
4660 if (GET_MODE (op) != mode && mode != VOIDmode)
4661 return FALSE;
4663 if (GET_CODE (op) == SUBREG)
4664 op = SUBREG_REG (op);
4666 return (GET_CODE (op) == REG
4667 && (REGNO_REG_CLASS (REGNO (op)) == CIRRUS_REGS
4668 || REGNO_REG_CLASS (REGNO (op)) == GENERAL_REGS));
4671 /* Return nonzero if OP is a cirrus FP register. */
4673 cirrus_fp_register (rtx op, enum machine_mode mode)
4675 if (GET_MODE (op) != mode && mode != VOIDmode)
4676 return FALSE;
4678 if (GET_CODE (op) == SUBREG)
4679 op = SUBREG_REG (op);
4681 return (GET_CODE (op) == REG
4682 && (REGNO (op) >= FIRST_PSEUDO_REGISTER
4683 || REGNO_REG_CLASS (REGNO (op)) == CIRRUS_REGS));
4686 /* Return nonzero if OP is a 6bit constant (0..63). */
4688 cirrus_shift_const (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
4690 return (GET_CODE (op) == CONST_INT
4691 && INTVAL (op) >= 0
4692 && INTVAL (op) < 64);
4696 /* Return TRUE if OP is a valid VFP memory address pattern.
4697 WB if true if writeback address modes are allowed. */
4700 arm_coproc_mem_operand (rtx op, bool wb)
4702 rtx ind;
4704 /* Reject eliminable registers. */
4705 if (! (reload_in_progress || reload_completed)
4706 && ( reg_mentioned_p (frame_pointer_rtx, op)
4707 || reg_mentioned_p (arg_pointer_rtx, op)
4708 || reg_mentioned_p (virtual_incoming_args_rtx, op)
4709 || reg_mentioned_p (virtual_outgoing_args_rtx, op)
4710 || reg_mentioned_p (virtual_stack_dynamic_rtx, op)
4711 || reg_mentioned_p (virtual_stack_vars_rtx, op)))
4712 return FALSE;
4714 /* Constants are converted into offsets from labels. */
4715 if (GET_CODE (op) != MEM)
4716 return FALSE;
4718 ind = XEXP (op, 0);
4720 if (reload_completed
4721 && (GET_CODE (ind) == LABEL_REF
4722 || (GET_CODE (ind) == CONST
4723 && GET_CODE (XEXP (ind, 0)) == PLUS
4724 && GET_CODE (XEXP (XEXP (ind, 0), 0)) == LABEL_REF
4725 && GET_CODE (XEXP (XEXP (ind, 0), 1)) == CONST_INT)))
4726 return TRUE;
4728 /* Match: (mem (reg)). */
4729 if (GET_CODE (ind) == REG)
4730 return arm_address_register_rtx_p (ind, 0);
4732 /* Autoincremment addressing modes. */
4733 if (wb
4734 && (GET_CODE (ind) == PRE_INC
4735 || GET_CODE (ind) == POST_INC
4736 || GET_CODE (ind) == PRE_DEC
4737 || GET_CODE (ind) == POST_DEC))
4738 return arm_address_register_rtx_p (XEXP (ind, 0), 0);
4740 if (wb
4741 && (GET_CODE (ind) == POST_MODIFY || GET_CODE (ind) == PRE_MODIFY)
4742 && arm_address_register_rtx_p (XEXP (ind, 0), 0)
4743 && GET_CODE (XEXP (ind, 1)) == PLUS
4744 && rtx_equal_p (XEXP (XEXP (ind, 1), 0), XEXP (ind, 0)))
4745 ind = XEXP (ind, 1);
4747 /* Match:
4748 (plus (reg)
4749 (const)). */
4750 if (GET_CODE (ind) == PLUS
4751 && GET_CODE (XEXP (ind, 0)) == REG
4752 && REG_MODE_OK_FOR_BASE_P (XEXP (ind, 0), VOIDmode)
4753 && GET_CODE (XEXP (ind, 1)) == CONST_INT
4754 && INTVAL (XEXP (ind, 1)) > -1024
4755 && INTVAL (XEXP (ind, 1)) < 1024
4756 && (INTVAL (XEXP (ind, 1)) & 3) == 0)
4757 return TRUE;
4759 return FALSE;
4763 /* Return TRUE if OP is a REG or constant zero. */
4765 vfp_compare_operand (rtx op, enum machine_mode mode)
4767 if (s_register_operand (op, mode))
4768 return TRUE;
4770 return (GET_CODE (op) == CONST_DOUBLE
4771 && arm_const_double_rtx (op));
4775 /* Return GENERAL_REGS if a scratch register required to reload x to/from
4776 VFP registers. Otherwise return NO_REGS. */
4778 enum reg_class
4779 vfp_secondary_reload_class (enum machine_mode mode, rtx x)
4781 if (arm_coproc_mem_operand (x, FALSE) || s_register_operand (x, mode))
4782 return NO_REGS;
4784 return GENERAL_REGS;
4788 /* Returns TRUE if INSN is an "LDR REG, ADDR" instruction.
4789 Use by the Cirrus Maverick code which has to workaround
4790 a hardware bug triggered by such instructions. */
4791 static bool
4792 arm_memory_load_p (rtx insn)
4794 rtx body, lhs, rhs;;
4796 if (insn == NULL_RTX || GET_CODE (insn) != INSN)
4797 return false;
4799 body = PATTERN (insn);
4801 if (GET_CODE (body) != SET)
4802 return false;
4804 lhs = XEXP (body, 0);
4805 rhs = XEXP (body, 1);
4807 lhs = REG_OR_SUBREG_RTX (lhs);
4809 /* If the destination is not a general purpose
4810 register we do not have to worry. */
4811 if (GET_CODE (lhs) != REG
4812 || REGNO_REG_CLASS (REGNO (lhs)) != GENERAL_REGS)
4813 return false;
4815 /* As well as loads from memory we also have to react
4816 to loads of invalid constants which will be turned
4817 into loads from the minipool. */
4818 return (GET_CODE (rhs) == MEM
4819 || GET_CODE (rhs) == SYMBOL_REF
4820 || note_invalid_constants (insn, -1, false));
4823 /* Return TRUE if INSN is a Cirrus instruction. */
4824 static bool
4825 arm_cirrus_insn_p (rtx insn)
4827 enum attr_cirrus attr;
4829 /* get_attr aborts on USE and CLOBBER. */
4830 if (!insn
4831 || GET_CODE (insn) != INSN
4832 || GET_CODE (PATTERN (insn)) == USE
4833 || GET_CODE (PATTERN (insn)) == CLOBBER)
4834 return 0;
4836 attr = get_attr_cirrus (insn);
4838 return attr != CIRRUS_NOT;
4841 /* Cirrus reorg for invalid instruction combinations. */
4842 static void
4843 cirrus_reorg (rtx first)
4845 enum attr_cirrus attr;
4846 rtx body = PATTERN (first);
4847 rtx t;
4848 int nops;
4850 /* Any branch must be followed by 2 non Cirrus instructions. */
4851 if (GET_CODE (first) == JUMP_INSN && GET_CODE (body) != RETURN)
4853 nops = 0;
4854 t = next_nonnote_insn (first);
4856 if (arm_cirrus_insn_p (t))
4857 ++ nops;
4859 if (arm_cirrus_insn_p (next_nonnote_insn (t)))
4860 ++ nops;
4862 while (nops --)
4863 emit_insn_after (gen_nop (), first);
4865 return;
4868 /* (float (blah)) is in parallel with a clobber. */
4869 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) > 0)
4870 body = XVECEXP (body, 0, 0);
4872 if (GET_CODE (body) == SET)
4874 rtx lhs = XEXP (body, 0), rhs = XEXP (body, 1);
4876 /* cfldrd, cfldr64, cfstrd, cfstr64 must
4877 be followed by a non Cirrus insn. */
4878 if (get_attr_cirrus (first) == CIRRUS_DOUBLE)
4880 if (arm_cirrus_insn_p (next_nonnote_insn (first)))
4881 emit_insn_after (gen_nop (), first);
4883 return;
4885 else if (arm_memory_load_p (first))
4887 unsigned int arm_regno;
4889 /* Any ldr/cfmvdlr, ldr/cfmvdhr, ldr/cfmvsr, ldr/cfmv64lr,
4890 ldr/cfmv64hr combination where the Rd field is the same
4891 in both instructions must be split with a non Cirrus
4892 insn. Example:
4894 ldr r0, blah
4896 cfmvsr mvf0, r0. */
4898 /* Get Arm register number for ldr insn. */
4899 if (GET_CODE (lhs) == REG)
4900 arm_regno = REGNO (lhs);
4901 else if (GET_CODE (rhs) == REG)
4902 arm_regno = REGNO (rhs);
4903 else
4904 abort ();
4906 /* Next insn. */
4907 first = next_nonnote_insn (first);
4909 if (! arm_cirrus_insn_p (first))
4910 return;
4912 body = PATTERN (first);
4914 /* (float (blah)) is in parallel with a clobber. */
4915 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0))
4916 body = XVECEXP (body, 0, 0);
4918 if (GET_CODE (body) == FLOAT)
4919 body = XEXP (body, 0);
4921 if (get_attr_cirrus (first) == CIRRUS_MOVE
4922 && GET_CODE (XEXP (body, 1)) == REG
4923 && arm_regno == REGNO (XEXP (body, 1)))
4924 emit_insn_after (gen_nop (), first);
4926 return;
4930 /* get_attr aborts on USE and CLOBBER. */
4931 if (!first
4932 || GET_CODE (first) != INSN
4933 || GET_CODE (PATTERN (first)) == USE
4934 || GET_CODE (PATTERN (first)) == CLOBBER)
4935 return;
4937 attr = get_attr_cirrus (first);
4939 /* Any coprocessor compare instruction (cfcmps, cfcmpd, ...)
4940 must be followed by a non-coprocessor instruction. */
4941 if (attr == CIRRUS_COMPARE)
4943 nops = 0;
4945 t = next_nonnote_insn (first);
4947 if (arm_cirrus_insn_p (t))
4948 ++ nops;
4950 if (arm_cirrus_insn_p (next_nonnote_insn (t)))
4951 ++ nops;
4953 while (nops --)
4954 emit_insn_after (gen_nop (), first);
4956 return;
4960 /* Return nonzero if OP is a constant power of two. */
4962 power_of_two_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
4964 if (GET_CODE (op) == CONST_INT)
4966 HOST_WIDE_INT value = INTVAL (op);
4968 return value != 0 && (value & (value - 1)) == 0;
4971 return FALSE;
4974 /* Return TRUE for a valid operand of a DImode operation.
4975 Either: REG, SUBREG, CONST_DOUBLE or MEM(DImode_address).
4976 Note that this disallows MEM(REG+REG), but allows
4977 MEM(PRE/POST_INC/DEC(REG)). */
4979 di_operand (rtx op, enum machine_mode mode)
4981 if (s_register_operand (op, mode))
4982 return TRUE;
4984 if (mode != VOIDmode && GET_MODE (op) != VOIDmode && GET_MODE (op) != DImode)
4985 return FALSE;
4987 if (GET_CODE (op) == SUBREG)
4988 op = SUBREG_REG (op);
4990 switch (GET_CODE (op))
4992 case CONST_DOUBLE:
4993 case CONST_INT:
4994 return TRUE;
4996 case MEM:
4997 return memory_address_p (DImode, XEXP (op, 0));
4999 default:
5000 return FALSE;
5004 /* Like di_operand, but don't accept constants. */
5006 nonimmediate_di_operand (rtx op, enum machine_mode mode)
5008 if (s_register_operand (op, mode))
5009 return TRUE;
5011 if (mode != VOIDmode && GET_MODE (op) != VOIDmode && GET_MODE (op) != DImode)
5012 return FALSE;
5014 if (GET_CODE (op) == SUBREG)
5015 op = SUBREG_REG (op);
5017 if (GET_CODE (op) == MEM)
5018 return memory_address_p (DImode, XEXP (op, 0));
5020 return FALSE;
5023 /* Return TRUE for a valid operand of a DFmode operation when soft-float.
5024 Either: REG, SUBREG, CONST_DOUBLE or MEM(DImode_address).
5025 Note that this disallows MEM(REG+REG), but allows
5026 MEM(PRE/POST_INC/DEC(REG)). */
5028 soft_df_operand (rtx op, enum machine_mode mode)
5030 if (s_register_operand (op, mode))
5031 return TRUE;
5033 if (mode != VOIDmode && GET_MODE (op) != mode)
5034 return FALSE;
5036 if (GET_CODE (op) == SUBREG && CONSTANT_P (SUBREG_REG (op)))
5037 return FALSE;
5039 if (GET_CODE (op) == SUBREG)
5040 op = SUBREG_REG (op);
5042 switch (GET_CODE (op))
5044 case CONST_DOUBLE:
5045 return TRUE;
5047 case MEM:
5048 return memory_address_p (DFmode, XEXP (op, 0));
5050 default:
5051 return FALSE;
5055 /* Like soft_df_operand, but don't accept constants. */
5057 nonimmediate_soft_df_operand (rtx op, enum machine_mode mode)
5059 if (s_register_operand (op, mode))
5060 return TRUE;
5062 if (mode != VOIDmode && GET_MODE (op) != mode)
5063 return FALSE;
5065 if (GET_CODE (op) == SUBREG)
5066 op = SUBREG_REG (op);
5068 if (GET_CODE (op) == MEM)
5069 return memory_address_p (DFmode, XEXP (op, 0));
5070 return FALSE;
5073 /* Return TRUE for valid index operands. */
5075 index_operand (rtx op, enum machine_mode mode)
5077 return (s_register_operand (op, mode)
5078 || (immediate_operand (op, mode)
5079 && (GET_CODE (op) != CONST_INT
5080 || (INTVAL (op) < 4096 && INTVAL (op) > -4096))));
5083 /* Return TRUE for valid shifts by a constant. This also accepts any
5084 power of two on the (somewhat overly relaxed) assumption that the
5085 shift operator in this case was a mult. */
5087 const_shift_operand (rtx op, enum machine_mode mode)
5089 return (power_of_two_operand (op, mode)
5090 || (immediate_operand (op, mode)
5091 && (GET_CODE (op) != CONST_INT
5092 || (INTVAL (op) < 32 && INTVAL (op) > 0))));
5095 /* Return TRUE for arithmetic operators which can be combined with a multiply
5096 (shift). */
5098 shiftable_operator (rtx x, enum machine_mode mode)
5100 enum rtx_code code;
5102 if (GET_MODE (x) != mode)
5103 return FALSE;
5105 code = GET_CODE (x);
5107 return (code == PLUS || code == MINUS
5108 || code == IOR || code == XOR || code == AND);
5111 /* Return TRUE for binary logical operators. */
5113 logical_binary_operator (rtx x, enum machine_mode mode)
5115 enum rtx_code code;
5117 if (GET_MODE (x) != mode)
5118 return FALSE;
5120 code = GET_CODE (x);
5122 return (code == IOR || code == XOR || code == AND);
5125 /* Return TRUE for shift operators. */
5127 shift_operator (rtx x,enum machine_mode mode)
5129 enum rtx_code code;
5131 if (GET_MODE (x) != mode)
5132 return FALSE;
5134 code = GET_CODE (x);
5136 if (code == MULT)
5137 return power_of_two_operand (XEXP (x, 1), mode);
5139 return (code == ASHIFT || code == ASHIFTRT || code == LSHIFTRT
5140 || code == ROTATERT);
5143 /* Return TRUE if x is EQ or NE. */
5145 equality_operator (rtx x, enum machine_mode mode ATTRIBUTE_UNUSED)
5147 return GET_CODE (x) == EQ || GET_CODE (x) == NE;
5150 /* Return TRUE if x is a comparison operator other than LTGT or UNEQ. */
5152 arm_comparison_operator (rtx x, enum machine_mode mode)
5154 return (comparison_operator (x, mode)
5155 && GET_CODE (x) != LTGT
5156 && GET_CODE (x) != UNEQ);
5159 /* Return TRUE for SMIN SMAX UMIN UMAX operators. */
5161 minmax_operator (rtx x, enum machine_mode mode)
5163 enum rtx_code code = GET_CODE (x);
5165 if (GET_MODE (x) != mode)
5166 return FALSE;
5168 return code == SMIN || code == SMAX || code == UMIN || code == UMAX;
5171 /* Return TRUE if this is the condition code register, if we aren't given
5172 a mode, accept any class CCmode register. */
5174 cc_register (rtx x, enum machine_mode mode)
5176 if (mode == VOIDmode)
5178 mode = GET_MODE (x);
5180 if (GET_MODE_CLASS (mode) != MODE_CC)
5181 return FALSE;
5184 if ( GET_MODE (x) == mode
5185 && GET_CODE (x) == REG
5186 && REGNO (x) == CC_REGNUM)
5187 return TRUE;
5189 return FALSE;
5192 /* Return TRUE if this is the condition code register, if we aren't given
5193 a mode, accept any class CCmode register which indicates a dominance
5194 expression. */
5196 dominant_cc_register (rtx x, enum machine_mode mode)
5198 if (mode == VOIDmode)
5200 mode = GET_MODE (x);
5202 if (GET_MODE_CLASS (mode) != MODE_CC)
5203 return FALSE;
5206 if (mode != CC_DNEmode && mode != CC_DEQmode
5207 && mode != CC_DLEmode && mode != CC_DLTmode
5208 && mode != CC_DGEmode && mode != CC_DGTmode
5209 && mode != CC_DLEUmode && mode != CC_DLTUmode
5210 && mode != CC_DGEUmode && mode != CC_DGTUmode)
5211 return FALSE;
5213 return cc_register (x, mode);
5216 /* Return TRUE if X references a SYMBOL_REF. */
5218 symbol_mentioned_p (rtx x)
5220 const char * fmt;
5221 int i;
5223 if (GET_CODE (x) == SYMBOL_REF)
5224 return 1;
5226 fmt = GET_RTX_FORMAT (GET_CODE (x));
5228 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5230 if (fmt[i] == 'E')
5232 int j;
5234 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5235 if (symbol_mentioned_p (XVECEXP (x, i, j)))
5236 return 1;
5238 else if (fmt[i] == 'e' && symbol_mentioned_p (XEXP (x, i)))
5239 return 1;
5242 return 0;
5245 /* Return TRUE if X references a LABEL_REF. */
5247 label_mentioned_p (rtx x)
5249 const char * fmt;
5250 int i;
5252 if (GET_CODE (x) == LABEL_REF)
5253 return 1;
5255 fmt = GET_RTX_FORMAT (GET_CODE (x));
5256 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5258 if (fmt[i] == 'E')
5260 int j;
5262 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5263 if (label_mentioned_p (XVECEXP (x, i, j)))
5264 return 1;
5266 else if (fmt[i] == 'e' && label_mentioned_p (XEXP (x, i)))
5267 return 1;
5270 return 0;
5273 enum rtx_code
5274 minmax_code (rtx x)
5276 enum rtx_code code = GET_CODE (x);
5278 if (code == SMAX)
5279 return GE;
5280 else if (code == SMIN)
5281 return LE;
5282 else if (code == UMIN)
5283 return LEU;
5284 else if (code == UMAX)
5285 return GEU;
5287 abort ();
5290 /* Return 1 if memory locations are adjacent. */
5292 adjacent_mem_locations (rtx a, rtx b)
5294 if ((GET_CODE (XEXP (a, 0)) == REG
5295 || (GET_CODE (XEXP (a, 0)) == PLUS
5296 && GET_CODE (XEXP (XEXP (a, 0), 1)) == CONST_INT))
5297 && (GET_CODE (XEXP (b, 0)) == REG
5298 || (GET_CODE (XEXP (b, 0)) == PLUS
5299 && GET_CODE (XEXP (XEXP (b, 0), 1)) == CONST_INT)))
5301 int val0 = 0, val1 = 0;
5302 int reg0, reg1;
5304 if (GET_CODE (XEXP (a, 0)) == PLUS)
5306 reg0 = REGNO (XEXP (XEXP (a, 0), 0));
5307 val0 = INTVAL (XEXP (XEXP (a, 0), 1));
5309 else
5310 reg0 = REGNO (XEXP (a, 0));
5312 if (GET_CODE (XEXP (b, 0)) == PLUS)
5314 reg1 = REGNO (XEXP (XEXP (b, 0), 0));
5315 val1 = INTVAL (XEXP (XEXP (b, 0), 1));
5317 else
5318 reg1 = REGNO (XEXP (b, 0));
5320 /* Don't accept any offset that will require multiple
5321 instructions to handle, since this would cause the
5322 arith_adjacentmem pattern to output an overlong sequence. */
5323 if (!const_ok_for_op (PLUS, val0) || !const_ok_for_op (PLUS, val1))
5324 return 0;
5326 return (reg0 == reg1) && ((val1 - val0) == 4 || (val0 - val1) == 4);
5328 return 0;
5331 /* Return 1 if OP is a load multiple operation. It is known to be
5332 parallel and the first section will be tested. */
5334 load_multiple_operation (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
5336 HOST_WIDE_INT count = XVECLEN (op, 0);
5337 int dest_regno;
5338 rtx src_addr;
5339 HOST_WIDE_INT i = 1, base = 0;
5340 rtx elt;
5342 if (count <= 1
5343 || GET_CODE (XVECEXP (op, 0, 0)) != SET)
5344 return 0;
5346 /* Check to see if this might be a write-back. */
5347 if (GET_CODE (SET_SRC (elt = XVECEXP (op, 0, 0))) == PLUS)
5349 i++;
5350 base = 1;
5352 /* Now check it more carefully. */
5353 if (GET_CODE (SET_DEST (elt)) != REG
5354 || GET_CODE (XEXP (SET_SRC (elt), 0)) != REG
5355 || GET_CODE (XEXP (SET_SRC (elt), 1)) != CONST_INT
5356 || INTVAL (XEXP (SET_SRC (elt), 1)) != (count - 1) * 4)
5357 return 0;
5360 /* Perform a quick check so we don't blow up below. */
5361 if (count <= i
5362 || GET_CODE (XVECEXP (op, 0, i - 1)) != SET
5363 || GET_CODE (SET_DEST (XVECEXP (op, 0, i - 1))) != REG
5364 || GET_CODE (SET_SRC (XVECEXP (op, 0, i - 1))) != MEM)
5365 return 0;
5367 dest_regno = REGNO (SET_DEST (XVECEXP (op, 0, i - 1)));
5368 src_addr = XEXP (SET_SRC (XVECEXP (op, 0, i - 1)), 0);
5370 for (; i < count; i++)
5372 elt = XVECEXP (op, 0, i);
5374 if (GET_CODE (elt) != SET
5375 || GET_CODE (SET_DEST (elt)) != REG
5376 || GET_MODE (SET_DEST (elt)) != SImode
5377 || REGNO (SET_DEST (elt)) != (unsigned int)(dest_regno + i - base)
5378 || GET_CODE (SET_SRC (elt)) != MEM
5379 || GET_MODE (SET_SRC (elt)) != SImode
5380 || GET_CODE (XEXP (SET_SRC (elt), 0)) != PLUS
5381 || !rtx_equal_p (XEXP (XEXP (SET_SRC (elt), 0), 0), src_addr)
5382 || GET_CODE (XEXP (XEXP (SET_SRC (elt), 0), 1)) != CONST_INT
5383 || INTVAL (XEXP (XEXP (SET_SRC (elt), 0), 1)) != (i - base) * 4)
5384 return 0;
5387 return 1;
5390 /* Return 1 if OP is a store multiple operation. It is known to be
5391 parallel and the first section will be tested. */
5393 store_multiple_operation (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
5395 HOST_WIDE_INT count = XVECLEN (op, 0);
5396 int src_regno;
5397 rtx dest_addr;
5398 HOST_WIDE_INT i = 1, base = 0;
5399 rtx elt;
5401 if (count <= 1
5402 || GET_CODE (XVECEXP (op, 0, 0)) != SET)
5403 return 0;
5405 /* Check to see if this might be a write-back. */
5406 if (GET_CODE (SET_SRC (elt = XVECEXP (op, 0, 0))) == PLUS)
5408 i++;
5409 base = 1;
5411 /* Now check it more carefully. */
5412 if (GET_CODE (SET_DEST (elt)) != REG
5413 || GET_CODE (XEXP (SET_SRC (elt), 0)) != REG
5414 || GET_CODE (XEXP (SET_SRC (elt), 1)) != CONST_INT
5415 || INTVAL (XEXP (SET_SRC (elt), 1)) != (count - 1) * 4)
5416 return 0;
5419 /* Perform a quick check so we don't blow up below. */
5420 if (count <= i
5421 || GET_CODE (XVECEXP (op, 0, i - 1)) != SET
5422 || GET_CODE (SET_DEST (XVECEXP (op, 0, i - 1))) != MEM
5423 || GET_CODE (SET_SRC (XVECEXP (op, 0, i - 1))) != REG)
5424 return 0;
5426 src_regno = REGNO (SET_SRC (XVECEXP (op, 0, i - 1)));
5427 dest_addr = XEXP (SET_DEST (XVECEXP (op, 0, i - 1)), 0);
5429 for (; i < count; i++)
5431 elt = XVECEXP (op, 0, i);
5433 if (GET_CODE (elt) != SET
5434 || GET_CODE (SET_SRC (elt)) != REG
5435 || GET_MODE (SET_SRC (elt)) != SImode
5436 || REGNO (SET_SRC (elt)) != (unsigned int)(src_regno + i - base)
5437 || GET_CODE (SET_DEST (elt)) != MEM
5438 || GET_MODE (SET_DEST (elt)) != SImode
5439 || GET_CODE (XEXP (SET_DEST (elt), 0)) != PLUS
5440 || !rtx_equal_p (XEXP (XEXP (SET_DEST (elt), 0), 0), dest_addr)
5441 || GET_CODE (XEXP (XEXP (SET_DEST (elt), 0), 1)) != CONST_INT
5442 || INTVAL (XEXP (XEXP (SET_DEST (elt), 0), 1)) != (i - base) * 4)
5443 return 0;
5446 return 1;
5450 load_multiple_sequence (rtx *operands, int nops, int *regs, int *base,
5451 HOST_WIDE_INT *load_offset)
5453 int unsorted_regs[4];
5454 HOST_WIDE_INT unsorted_offsets[4];
5455 int order[4];
5456 int base_reg = -1;
5457 int i;
5459 /* Can only handle 2, 3, or 4 insns at present,
5460 though could be easily extended if required. */
5461 if (nops < 2 || nops > 4)
5462 abort ();
5464 /* Loop over the operands and check that the memory references are
5465 suitable (ie immediate offsets from the same base register). At
5466 the same time, extract the target register, and the memory
5467 offsets. */
5468 for (i = 0; i < nops; i++)
5470 rtx reg;
5471 rtx offset;
5473 /* Convert a subreg of a mem into the mem itself. */
5474 if (GET_CODE (operands[nops + i]) == SUBREG)
5475 operands[nops + i] = alter_subreg (operands + (nops + i));
5477 if (GET_CODE (operands[nops + i]) != MEM)
5478 abort ();
5480 /* Don't reorder volatile memory references; it doesn't seem worth
5481 looking for the case where the order is ok anyway. */
5482 if (MEM_VOLATILE_P (operands[nops + i]))
5483 return 0;
5485 offset = const0_rtx;
5487 if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG
5488 || (GET_CODE (reg) == SUBREG
5489 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5490 || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
5491 && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0))
5492 == REG)
5493 || (GET_CODE (reg) == SUBREG
5494 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5495 && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1))
5496 == CONST_INT)))
5498 if (i == 0)
5500 base_reg = REGNO (reg);
5501 unsorted_regs[0] = (GET_CODE (operands[i]) == REG
5502 ? REGNO (operands[i])
5503 : REGNO (SUBREG_REG (operands[i])));
5504 order[0] = 0;
5506 else
5508 if (base_reg != (int) REGNO (reg))
5509 /* Not addressed from the same base register. */
5510 return 0;
5512 unsorted_regs[i] = (GET_CODE (operands[i]) == REG
5513 ? REGNO (operands[i])
5514 : REGNO (SUBREG_REG (operands[i])));
5515 if (unsorted_regs[i] < unsorted_regs[order[0]])
5516 order[0] = i;
5519 /* If it isn't an integer register, or if it overwrites the
5520 base register but isn't the last insn in the list, then
5521 we can't do this. */
5522 if (unsorted_regs[i] < 0 || unsorted_regs[i] > 14
5523 || (i != nops - 1 && unsorted_regs[i] == base_reg))
5524 return 0;
5526 unsorted_offsets[i] = INTVAL (offset);
5528 else
5529 /* Not a suitable memory address. */
5530 return 0;
5533 /* All the useful information has now been extracted from the
5534 operands into unsorted_regs and unsorted_offsets; additionally,
5535 order[0] has been set to the lowest numbered register in the
5536 list. Sort the registers into order, and check that the memory
5537 offsets are ascending and adjacent. */
5539 for (i = 1; i < nops; i++)
5541 int j;
5543 order[i] = order[i - 1];
5544 for (j = 0; j < nops; j++)
5545 if (unsorted_regs[j] > unsorted_regs[order[i - 1]]
5546 && (order[i] == order[i - 1]
5547 || unsorted_regs[j] < unsorted_regs[order[i]]))
5548 order[i] = j;
5550 /* Have we found a suitable register? if not, one must be used more
5551 than once. */
5552 if (order[i] == order[i - 1])
5553 return 0;
5555 /* Is the memory address adjacent and ascending? */
5556 if (unsorted_offsets[order[i]] != unsorted_offsets[order[i - 1]] + 4)
5557 return 0;
5560 if (base)
5562 *base = base_reg;
5564 for (i = 0; i < nops; i++)
5565 regs[i] = unsorted_regs[order[i]];
5567 *load_offset = unsorted_offsets[order[0]];
5570 if (unsorted_offsets[order[0]] == 0)
5571 return 1; /* ldmia */
5573 if (unsorted_offsets[order[0]] == 4)
5574 return 2; /* ldmib */
5576 if (unsorted_offsets[order[nops - 1]] == 0)
5577 return 3; /* ldmda */
5579 if (unsorted_offsets[order[nops - 1]] == -4)
5580 return 4; /* ldmdb */
5582 /* For ARM8,9 & StrongARM, 2 ldr instructions are faster than an ldm
5583 if the offset isn't small enough. The reason 2 ldrs are faster
5584 is because these ARMs are able to do more than one cache access
5585 in a single cycle. The ARM9 and StrongARM have Harvard caches,
5586 whilst the ARM8 has a double bandwidth cache. This means that
5587 these cores can do both an instruction fetch and a data fetch in
5588 a single cycle, so the trick of calculating the address into a
5589 scratch register (one of the result regs) and then doing a load
5590 multiple actually becomes slower (and no smaller in code size).
5591 That is the transformation
5593 ldr rd1, [rbase + offset]
5594 ldr rd2, [rbase + offset + 4]
5598 add rd1, rbase, offset
5599 ldmia rd1, {rd1, rd2}
5601 produces worse code -- '3 cycles + any stalls on rd2' instead of
5602 '2 cycles + any stalls on rd2'. On ARMs with only one cache
5603 access per cycle, the first sequence could never complete in less
5604 than 6 cycles, whereas the ldm sequence would only take 5 and
5605 would make better use of sequential accesses if not hitting the
5606 cache.
5608 We cheat here and test 'arm_ld_sched' which we currently know to
5609 only be true for the ARM8, ARM9 and StrongARM. If this ever
5610 changes, then the test below needs to be reworked. */
5611 if (nops == 2 && arm_ld_sched)
5612 return 0;
5614 /* Can't do it without setting up the offset, only do this if it takes
5615 no more than one insn. */
5616 return (const_ok_for_arm (unsorted_offsets[order[0]])
5617 || const_ok_for_arm (-unsorted_offsets[order[0]])) ? 5 : 0;
5620 const char *
5621 emit_ldm_seq (rtx *operands, int nops)
5623 int regs[4];
5624 int base_reg;
5625 HOST_WIDE_INT offset;
5626 char buf[100];
5627 int i;
5629 switch (load_multiple_sequence (operands, nops, regs, &base_reg, &offset))
5631 case 1:
5632 strcpy (buf, "ldm%?ia\t");
5633 break;
5635 case 2:
5636 strcpy (buf, "ldm%?ib\t");
5637 break;
5639 case 3:
5640 strcpy (buf, "ldm%?da\t");
5641 break;
5643 case 4:
5644 strcpy (buf, "ldm%?db\t");
5645 break;
5647 case 5:
5648 if (offset >= 0)
5649 sprintf (buf, "add%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX,
5650 reg_names[regs[0]], REGISTER_PREFIX, reg_names[base_reg],
5651 (long) offset);
5652 else
5653 sprintf (buf, "sub%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX,
5654 reg_names[regs[0]], REGISTER_PREFIX, reg_names[base_reg],
5655 (long) -offset);
5656 output_asm_insn (buf, operands);
5657 base_reg = regs[0];
5658 strcpy (buf, "ldm%?ia\t");
5659 break;
5661 default:
5662 abort ();
5665 sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
5666 reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
5668 for (i = 1; i < nops; i++)
5669 sprintf (buf + strlen (buf), ", %s%s", REGISTER_PREFIX,
5670 reg_names[regs[i]]);
5672 strcat (buf, "}\t%@ phole ldm");
5674 output_asm_insn (buf, operands);
5675 return "";
5679 store_multiple_sequence (rtx *operands, int nops, int *regs, int *base,
5680 HOST_WIDE_INT * load_offset)
5682 int unsorted_regs[4];
5683 HOST_WIDE_INT unsorted_offsets[4];
5684 int order[4];
5685 int base_reg = -1;
5686 int i;
5688 /* Can only handle 2, 3, or 4 insns at present, though could be easily
5689 extended if required. */
5690 if (nops < 2 || nops > 4)
5691 abort ();
5693 /* Loop over the operands and check that the memory references are
5694 suitable (ie immediate offsets from the same base register). At
5695 the same time, extract the target register, and the memory
5696 offsets. */
5697 for (i = 0; i < nops; i++)
5699 rtx reg;
5700 rtx offset;
5702 /* Convert a subreg of a mem into the mem itself. */
5703 if (GET_CODE (operands[nops + i]) == SUBREG)
5704 operands[nops + i] = alter_subreg (operands + (nops + i));
5706 if (GET_CODE (operands[nops + i]) != MEM)
5707 abort ();
5709 /* Don't reorder volatile memory references; it doesn't seem worth
5710 looking for the case where the order is ok anyway. */
5711 if (MEM_VOLATILE_P (operands[nops + i]))
5712 return 0;
5714 offset = const0_rtx;
5716 if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG
5717 || (GET_CODE (reg) == SUBREG
5718 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5719 || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
5720 && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0))
5721 == REG)
5722 || (GET_CODE (reg) == SUBREG
5723 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5724 && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1))
5725 == CONST_INT)))
5727 if (i == 0)
5729 base_reg = REGNO (reg);
5730 unsorted_regs[0] = (GET_CODE (operands[i]) == REG
5731 ? REGNO (operands[i])
5732 : REGNO (SUBREG_REG (operands[i])));
5733 order[0] = 0;
5735 else
5737 if (base_reg != (int) REGNO (reg))
5738 /* Not addressed from the same base register. */
5739 return 0;
5741 unsorted_regs[i] = (GET_CODE (operands[i]) == REG
5742 ? REGNO (operands[i])
5743 : REGNO (SUBREG_REG (operands[i])));
5744 if (unsorted_regs[i] < unsorted_regs[order[0]])
5745 order[0] = i;
5748 /* If it isn't an integer register, then we can't do this. */
5749 if (unsorted_regs[i] < 0 || unsorted_regs[i] > 14)
5750 return 0;
5752 unsorted_offsets[i] = INTVAL (offset);
5754 else
5755 /* Not a suitable memory address. */
5756 return 0;
5759 /* All the useful information has now been extracted from the
5760 operands into unsorted_regs and unsorted_offsets; additionally,
5761 order[0] has been set to the lowest numbered register in the
5762 list. Sort the registers into order, and check that the memory
5763 offsets are ascending and adjacent. */
5765 for (i = 1; i < nops; i++)
5767 int j;
5769 order[i] = order[i - 1];
5770 for (j = 0; j < nops; j++)
5771 if (unsorted_regs[j] > unsorted_regs[order[i - 1]]
5772 && (order[i] == order[i - 1]
5773 || unsorted_regs[j] < unsorted_regs[order[i]]))
5774 order[i] = j;
5776 /* Have we found a suitable register? if not, one must be used more
5777 than once. */
5778 if (order[i] == order[i - 1])
5779 return 0;
5781 /* Is the memory address adjacent and ascending? */
5782 if (unsorted_offsets[order[i]] != unsorted_offsets[order[i - 1]] + 4)
5783 return 0;
5786 if (base)
5788 *base = base_reg;
5790 for (i = 0; i < nops; i++)
5791 regs[i] = unsorted_regs[order[i]];
5793 *load_offset = unsorted_offsets[order[0]];
5796 if (unsorted_offsets[order[0]] == 0)
5797 return 1; /* stmia */
5799 if (unsorted_offsets[order[0]] == 4)
5800 return 2; /* stmib */
5802 if (unsorted_offsets[order[nops - 1]] == 0)
5803 return 3; /* stmda */
5805 if (unsorted_offsets[order[nops - 1]] == -4)
5806 return 4; /* stmdb */
5808 return 0;
5811 const char *
5812 emit_stm_seq (rtx *operands, int nops)
5814 int regs[4];
5815 int base_reg;
5816 HOST_WIDE_INT offset;
5817 char buf[100];
5818 int i;
5820 switch (store_multiple_sequence (operands, nops, regs, &base_reg, &offset))
5822 case 1:
5823 strcpy (buf, "stm%?ia\t");
5824 break;
5826 case 2:
5827 strcpy (buf, "stm%?ib\t");
5828 break;
5830 case 3:
5831 strcpy (buf, "stm%?da\t");
5832 break;
5834 case 4:
5835 strcpy (buf, "stm%?db\t");
5836 break;
5838 default:
5839 abort ();
5842 sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
5843 reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
5845 for (i = 1; i < nops; i++)
5846 sprintf (buf + strlen (buf), ", %s%s", REGISTER_PREFIX,
5847 reg_names[regs[i]]);
5849 strcat (buf, "}\t%@ phole stm");
5851 output_asm_insn (buf, operands);
5852 return "";
5856 multi_register_push (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
5858 if (GET_CODE (op) != PARALLEL
5859 || (GET_CODE (XVECEXP (op, 0, 0)) != SET)
5860 || (GET_CODE (SET_SRC (XVECEXP (op, 0, 0))) != UNSPEC)
5861 || (XINT (SET_SRC (XVECEXP (op, 0, 0)), 1) != UNSPEC_PUSH_MULT))
5862 return 0;
5864 return 1;
5867 /* Routines for use in generating RTL. */
5870 arm_gen_load_multiple (int base_regno, int count, rtx from, int up,
5871 int write_back, int unchanging_p, int in_struct_p,
5872 int scalar_p)
5874 int i = 0, j;
5875 rtx result;
5876 int sign = up ? 1 : -1;
5877 rtx mem;
5879 /* XScale has load-store double instructions, but they have stricter
5880 alignment requirements than load-store multiple, so we can not
5881 use them.
5883 For XScale ldm requires 2 + NREGS cycles to complete and blocks
5884 the pipeline until completion.
5886 NREGS CYCLES
5892 An ldr instruction takes 1-3 cycles, but does not block the
5893 pipeline.
5895 NREGS CYCLES
5896 1 1-3
5897 2 2-6
5898 3 3-9
5899 4 4-12
5901 Best case ldr will always win. However, the more ldr instructions
5902 we issue, the less likely we are to be able to schedule them well.
5903 Using ldr instructions also increases code size.
5905 As a compromise, we use ldr for counts of 1 or 2 regs, and ldm
5906 for counts of 3 or 4 regs. */
5907 if (arm_tune_xscale && count <= 2 && ! optimize_size)
5909 rtx seq;
5911 start_sequence ();
5913 for (i = 0; i < count; i++)
5915 mem = gen_rtx_MEM (SImode, plus_constant (from, i * 4 * sign));
5916 RTX_UNCHANGING_P (mem) = unchanging_p;
5917 MEM_IN_STRUCT_P (mem) = in_struct_p;
5918 MEM_SCALAR_P (mem) = scalar_p;
5919 emit_move_insn (gen_rtx_REG (SImode, base_regno + i), mem);
5922 if (write_back)
5923 emit_move_insn (from, plus_constant (from, count * 4 * sign));
5925 seq = get_insns ();
5926 end_sequence ();
5928 return seq;
5931 result = gen_rtx_PARALLEL (VOIDmode,
5932 rtvec_alloc (count + (write_back ? 1 : 0)));
5933 if (write_back)
5935 XVECEXP (result, 0, 0)
5936 = gen_rtx_SET (GET_MODE (from), from,
5937 plus_constant (from, count * 4 * sign));
5938 i = 1;
5939 count++;
5942 for (j = 0; i < count; i++, j++)
5944 mem = gen_rtx_MEM (SImode, plus_constant (from, j * 4 * sign));
5945 RTX_UNCHANGING_P (mem) = unchanging_p;
5946 MEM_IN_STRUCT_P (mem) = in_struct_p;
5947 MEM_SCALAR_P (mem) = scalar_p;
5948 XVECEXP (result, 0, i)
5949 = gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, base_regno + j), mem);
5952 return result;
5956 arm_gen_store_multiple (int base_regno, int count, rtx to, int up,
5957 int write_back, int unchanging_p, int in_struct_p,
5958 int scalar_p)
5960 int i = 0, j;
5961 rtx result;
5962 int sign = up ? 1 : -1;
5963 rtx mem;
5965 /* See arm_gen_load_multiple for discussion of
5966 the pros/cons of ldm/stm usage for XScale. */
5967 if (arm_tune_xscale && count <= 2 && ! optimize_size)
5969 rtx seq;
5971 start_sequence ();
5973 for (i = 0; i < count; i++)
5975 mem = gen_rtx_MEM (SImode, plus_constant (to, i * 4 * sign));
5976 RTX_UNCHANGING_P (mem) = unchanging_p;
5977 MEM_IN_STRUCT_P (mem) = in_struct_p;
5978 MEM_SCALAR_P (mem) = scalar_p;
5979 emit_move_insn (mem, gen_rtx_REG (SImode, base_regno + i));
5982 if (write_back)
5983 emit_move_insn (to, plus_constant (to, count * 4 * sign));
5985 seq = get_insns ();
5986 end_sequence ();
5988 return seq;
5991 result = gen_rtx_PARALLEL (VOIDmode,
5992 rtvec_alloc (count + (write_back ? 1 : 0)));
5993 if (write_back)
5995 XVECEXP (result, 0, 0)
5996 = gen_rtx_SET (GET_MODE (to), to,
5997 plus_constant (to, count * 4 * sign));
5998 i = 1;
5999 count++;
6002 for (j = 0; i < count; i++, j++)
6004 mem = gen_rtx_MEM (SImode, plus_constant (to, j * 4 * sign));
6005 RTX_UNCHANGING_P (mem) = unchanging_p;
6006 MEM_IN_STRUCT_P (mem) = in_struct_p;
6007 MEM_SCALAR_P (mem) = scalar_p;
6009 XVECEXP (result, 0, i)
6010 = gen_rtx_SET (VOIDmode, mem, gen_rtx_REG (SImode, base_regno + j));
6013 return result;
6017 arm_gen_movmemqi (rtx *operands)
6019 HOST_WIDE_INT in_words_to_go, out_words_to_go, last_bytes;
6020 int i;
6021 rtx src, dst;
6022 rtx st_src, st_dst, fin_src, fin_dst;
6023 rtx part_bytes_reg = NULL;
6024 rtx mem;
6025 int dst_unchanging_p, dst_in_struct_p, src_unchanging_p, src_in_struct_p;
6026 int dst_scalar_p, src_scalar_p;
6028 if (GET_CODE (operands[2]) != CONST_INT
6029 || GET_CODE (operands[3]) != CONST_INT
6030 || INTVAL (operands[2]) > 64
6031 || INTVAL (operands[3]) & 3)
6032 return 0;
6034 st_dst = XEXP (operands[0], 0);
6035 st_src = XEXP (operands[1], 0);
6037 dst_unchanging_p = RTX_UNCHANGING_P (operands[0]);
6038 dst_in_struct_p = MEM_IN_STRUCT_P (operands[0]);
6039 dst_scalar_p = MEM_SCALAR_P (operands[0]);
6040 src_unchanging_p = RTX_UNCHANGING_P (operands[1]);
6041 src_in_struct_p = MEM_IN_STRUCT_P (operands[1]);
6042 src_scalar_p = MEM_SCALAR_P (operands[1]);
6044 fin_dst = dst = copy_to_mode_reg (SImode, st_dst);
6045 fin_src = src = copy_to_mode_reg (SImode, st_src);
6047 in_words_to_go = ARM_NUM_INTS (INTVAL (operands[2]));
6048 out_words_to_go = INTVAL (operands[2]) / 4;
6049 last_bytes = INTVAL (operands[2]) & 3;
6051 if (out_words_to_go != in_words_to_go && ((in_words_to_go - 1) & 3) != 0)
6052 part_bytes_reg = gen_rtx_REG (SImode, (in_words_to_go - 1) & 3);
6054 for (i = 0; in_words_to_go >= 2; i+=4)
6056 if (in_words_to_go > 4)
6057 emit_insn (arm_gen_load_multiple (0, 4, src, TRUE, TRUE,
6058 src_unchanging_p,
6059 src_in_struct_p,
6060 src_scalar_p));
6061 else
6062 emit_insn (arm_gen_load_multiple (0, in_words_to_go, src, TRUE,
6063 FALSE, src_unchanging_p,
6064 src_in_struct_p, src_scalar_p));
6066 if (out_words_to_go)
6068 if (out_words_to_go > 4)
6069 emit_insn (arm_gen_store_multiple (0, 4, dst, TRUE, TRUE,
6070 dst_unchanging_p,
6071 dst_in_struct_p,
6072 dst_scalar_p));
6073 else if (out_words_to_go != 1)
6074 emit_insn (arm_gen_store_multiple (0, out_words_to_go,
6075 dst, TRUE,
6076 (last_bytes == 0
6077 ? FALSE : TRUE),
6078 dst_unchanging_p,
6079 dst_in_struct_p,
6080 dst_scalar_p));
6081 else
6083 mem = gen_rtx_MEM (SImode, dst);
6084 RTX_UNCHANGING_P (mem) = dst_unchanging_p;
6085 MEM_IN_STRUCT_P (mem) = dst_in_struct_p;
6086 MEM_SCALAR_P (mem) = dst_scalar_p;
6087 emit_move_insn (mem, gen_rtx_REG (SImode, 0));
6088 if (last_bytes != 0)
6089 emit_insn (gen_addsi3 (dst, dst, GEN_INT (4)));
6093 in_words_to_go -= in_words_to_go < 4 ? in_words_to_go : 4;
6094 out_words_to_go -= out_words_to_go < 4 ? out_words_to_go : 4;
6097 /* OUT_WORDS_TO_GO will be zero here if there are byte stores to do. */
6098 if (out_words_to_go)
6100 rtx sreg;
6102 mem = gen_rtx_MEM (SImode, src);
6103 RTX_UNCHANGING_P (mem) = src_unchanging_p;
6104 MEM_IN_STRUCT_P (mem) = src_in_struct_p;
6105 MEM_SCALAR_P (mem) = src_scalar_p;
6106 emit_move_insn (sreg = gen_reg_rtx (SImode), mem);
6107 emit_move_insn (fin_src = gen_reg_rtx (SImode), plus_constant (src, 4));
6109 mem = gen_rtx_MEM (SImode, dst);
6110 RTX_UNCHANGING_P (mem) = dst_unchanging_p;
6111 MEM_IN_STRUCT_P (mem) = dst_in_struct_p;
6112 MEM_SCALAR_P (mem) = dst_scalar_p;
6113 emit_move_insn (mem, sreg);
6114 emit_move_insn (fin_dst = gen_reg_rtx (SImode), plus_constant (dst, 4));
6115 in_words_to_go--;
6117 if (in_words_to_go) /* Sanity check */
6118 abort ();
6121 if (in_words_to_go)
6123 if (in_words_to_go < 0)
6124 abort ();
6126 mem = gen_rtx_MEM (SImode, src);
6127 RTX_UNCHANGING_P (mem) = src_unchanging_p;
6128 MEM_IN_STRUCT_P (mem) = src_in_struct_p;
6129 MEM_SCALAR_P (mem) = src_scalar_p;
6130 part_bytes_reg = copy_to_mode_reg (SImode, mem);
6133 if (last_bytes && part_bytes_reg == NULL)
6134 abort ();
6136 if (BYTES_BIG_ENDIAN && last_bytes)
6138 rtx tmp = gen_reg_rtx (SImode);
6140 /* The bytes we want are in the top end of the word. */
6141 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg,
6142 GEN_INT (8 * (4 - last_bytes))));
6143 part_bytes_reg = tmp;
6145 while (last_bytes)
6147 mem = gen_rtx_MEM (QImode, plus_constant (dst, last_bytes - 1));
6148 RTX_UNCHANGING_P (mem) = dst_unchanging_p;
6149 MEM_IN_STRUCT_P (mem) = dst_in_struct_p;
6150 MEM_SCALAR_P (mem) = dst_scalar_p;
6151 emit_move_insn (mem, gen_lowpart (QImode, part_bytes_reg));
6153 if (--last_bytes)
6155 tmp = gen_reg_rtx (SImode);
6156 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (8)));
6157 part_bytes_reg = tmp;
6162 else
6164 if (last_bytes > 1)
6166 mem = gen_rtx_MEM (HImode, dst);
6167 RTX_UNCHANGING_P (mem) = dst_unchanging_p;
6168 MEM_IN_STRUCT_P (mem) = dst_in_struct_p;
6169 MEM_SCALAR_P (mem) = dst_scalar_p;
6170 emit_move_insn (mem, gen_lowpart (HImode, part_bytes_reg));
6171 last_bytes -= 2;
6172 if (last_bytes)
6174 rtx tmp = gen_reg_rtx (SImode);
6176 emit_insn (gen_addsi3 (dst, dst, const2_rtx));
6177 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (16)));
6178 part_bytes_reg = tmp;
6182 if (last_bytes)
6184 mem = gen_rtx_MEM (QImode, dst);
6185 RTX_UNCHANGING_P (mem) = dst_unchanging_p;
6186 MEM_IN_STRUCT_P (mem) = dst_in_struct_p;
6187 MEM_SCALAR_P (mem) = dst_scalar_p;
6188 emit_move_insn (mem, gen_lowpart (QImode, part_bytes_reg));
6192 return 1;
6195 /* Generate a memory reference for a half word, such that it will be loaded
6196 into the top 16 bits of the word. We can assume that the address is
6197 known to be alignable and of the form reg, or plus (reg, const). */
6200 arm_gen_rotated_half_load (rtx memref)
6202 HOST_WIDE_INT offset = 0;
6203 rtx base = XEXP (memref, 0);
6205 if (GET_CODE (base) == PLUS)
6207 offset = INTVAL (XEXP (base, 1));
6208 base = XEXP (base, 0);
6211 /* If we aren't allowed to generate unaligned addresses, then fail. */
6212 if ((BYTES_BIG_ENDIAN ? 1 : 0) ^ ((offset & 2) == 0))
6213 return NULL;
6215 base = gen_rtx_MEM (SImode, plus_constant (base, offset & ~2));
6217 if ((BYTES_BIG_ENDIAN ? 1 : 0) ^ ((offset & 2) == 2))
6218 return base;
6220 return gen_rtx_ROTATE (SImode, base, GEN_INT (16));
6223 /* Select a dominance comparison mode if possible for a test of the general
6224 form (OP (COND_OR (X) (Y)) (const_int 0)). We support three forms.
6225 COND_OR == DOM_CC_X_AND_Y => (X && Y)
6226 COND_OR == DOM_CC_NX_OR_Y => ((! X) || Y)
6227 COND_OR == DOM_CC_X_OR_Y => (X || Y)
6228 In all cases OP will be either EQ or NE, but we don't need to know which
6229 here. If we are unable to support a dominance comparison we return
6230 CC mode. This will then fail to match for the RTL expressions that
6231 generate this call. */
6232 enum machine_mode
6233 arm_select_dominance_cc_mode (rtx x, rtx y, HOST_WIDE_INT cond_or)
6235 enum rtx_code cond1, cond2;
6236 int swapped = 0;
6238 /* Currently we will probably get the wrong result if the individual
6239 comparisons are not simple. This also ensures that it is safe to
6240 reverse a comparison if necessary. */
6241 if ((arm_select_cc_mode (cond1 = GET_CODE (x), XEXP (x, 0), XEXP (x, 1))
6242 != CCmode)
6243 || (arm_select_cc_mode (cond2 = GET_CODE (y), XEXP (y, 0), XEXP (y, 1))
6244 != CCmode))
6245 return CCmode;
6247 /* The if_then_else variant of this tests the second condition if the
6248 first passes, but is true if the first fails. Reverse the first
6249 condition to get a true "inclusive-or" expression. */
6250 if (cond_or == DOM_CC_NX_OR_Y)
6251 cond1 = reverse_condition (cond1);
6253 /* If the comparisons are not equal, and one doesn't dominate the other,
6254 then we can't do this. */
6255 if (cond1 != cond2
6256 && !comparison_dominates_p (cond1, cond2)
6257 && (swapped = 1, !comparison_dominates_p (cond2, cond1)))
6258 return CCmode;
6260 if (swapped)
6262 enum rtx_code temp = cond1;
6263 cond1 = cond2;
6264 cond2 = temp;
6267 switch (cond1)
6269 case EQ:
6270 if (cond2 == EQ || cond_or == DOM_CC_X_AND_Y)
6271 return CC_DEQmode;
6273 switch (cond2)
6275 case LE: return CC_DLEmode;
6276 case LEU: return CC_DLEUmode;
6277 case GE: return CC_DGEmode;
6278 case GEU: return CC_DGEUmode;
6279 default: break;
6282 break;
6284 case LT:
6285 if (cond2 == LT || cond_or == DOM_CC_X_AND_Y)
6286 return CC_DLTmode;
6287 if (cond2 == LE)
6288 return CC_DLEmode;
6289 if (cond2 == NE)
6290 return CC_DNEmode;
6291 break;
6293 case GT:
6294 if (cond2 == GT || cond_or == DOM_CC_X_AND_Y)
6295 return CC_DGTmode;
6296 if (cond2 == GE)
6297 return CC_DGEmode;
6298 if (cond2 == NE)
6299 return CC_DNEmode;
6300 break;
6302 case LTU:
6303 if (cond2 == LTU || cond_or == DOM_CC_X_AND_Y)
6304 return CC_DLTUmode;
6305 if (cond2 == LEU)
6306 return CC_DLEUmode;
6307 if (cond2 == NE)
6308 return CC_DNEmode;
6309 break;
6311 case GTU:
6312 if (cond2 == GTU || cond_or == DOM_CC_X_AND_Y)
6313 return CC_DGTUmode;
6314 if (cond2 == GEU)
6315 return CC_DGEUmode;
6316 if (cond2 == NE)
6317 return CC_DNEmode;
6318 break;
6320 /* The remaining cases only occur when both comparisons are the
6321 same. */
6322 case NE:
6323 return CC_DNEmode;
6325 case LE:
6326 return CC_DLEmode;
6328 case GE:
6329 return CC_DGEmode;
6331 case LEU:
6332 return CC_DLEUmode;
6334 case GEU:
6335 return CC_DGEUmode;
6337 default:
6338 break;
6341 abort ();
6344 enum machine_mode
6345 arm_select_cc_mode (enum rtx_code op, rtx x, rtx y)
6347 /* All floating point compares return CCFP if it is an equality
6348 comparison, and CCFPE otherwise. */
6349 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
6351 switch (op)
6353 case EQ:
6354 case NE:
6355 case UNORDERED:
6356 case ORDERED:
6357 case UNLT:
6358 case UNLE:
6359 case UNGT:
6360 case UNGE:
6361 case UNEQ:
6362 case LTGT:
6363 return CCFPmode;
6365 case LT:
6366 case LE:
6367 case GT:
6368 case GE:
6369 if (TARGET_HARD_FLOAT && TARGET_MAVERICK)
6370 return CCFPmode;
6371 return CCFPEmode;
6373 default:
6374 abort ();
6378 /* A compare with a shifted operand. Because of canonicalization, the
6379 comparison will have to be swapped when we emit the assembler. */
6380 if (GET_MODE (y) == SImode && GET_CODE (y) == REG
6381 && (GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
6382 || GET_CODE (x) == LSHIFTRT || GET_CODE (x) == ROTATE
6383 || GET_CODE (x) == ROTATERT))
6384 return CC_SWPmode;
6386 /* This is a special case that is used by combine to allow a
6387 comparison of a shifted byte load to be split into a zero-extend
6388 followed by a comparison of the shifted integer (only valid for
6389 equalities and unsigned inequalities). */
6390 if (GET_MODE (x) == SImode
6391 && GET_CODE (x) == ASHIFT
6392 && GET_CODE (XEXP (x, 1)) == CONST_INT && INTVAL (XEXP (x, 1)) == 24
6393 && GET_CODE (XEXP (x, 0)) == SUBREG
6394 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == MEM
6395 && GET_MODE (SUBREG_REG (XEXP (x, 0))) == QImode
6396 && (op == EQ || op == NE
6397 || op == GEU || op == GTU || op == LTU || op == LEU)
6398 && GET_CODE (y) == CONST_INT)
6399 return CC_Zmode;
6401 /* A construct for a conditional compare, if the false arm contains
6402 0, then both conditions must be true, otherwise either condition
6403 must be true. Not all conditions are possible, so CCmode is
6404 returned if it can't be done. */
6405 if (GET_CODE (x) == IF_THEN_ELSE
6406 && (XEXP (x, 2) == const0_rtx
6407 || XEXP (x, 2) == const1_rtx)
6408 && COMPARISON_P (XEXP (x, 0))
6409 && COMPARISON_P (XEXP (x, 1)))
6410 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
6411 INTVAL (XEXP (x, 2)));
6413 /* Alternate canonicalizations of the above. These are somewhat cleaner. */
6414 if (GET_CODE (x) == AND
6415 && COMPARISON_P (XEXP (x, 0))
6416 && COMPARISON_P (XEXP (x, 1)))
6417 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
6418 DOM_CC_X_AND_Y);
6420 if (GET_CODE (x) == IOR
6421 && COMPARISON_P (XEXP (x, 0))
6422 && COMPARISON_P (XEXP (x, 1)))
6423 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
6424 DOM_CC_X_OR_Y);
6426 /* An operation (on Thumb) where we want to test for a single bit.
6427 This is done by shifting that bit up into the top bit of a
6428 scratch register; we can then branch on the sign bit. */
6429 if (TARGET_THUMB
6430 && GET_MODE (x) == SImode
6431 && (op == EQ || op == NE)
6432 && (GET_CODE (x) == ZERO_EXTRACT))
6433 return CC_Nmode;
6435 /* An operation that sets the condition codes as a side-effect, the
6436 V flag is not set correctly, so we can only use comparisons where
6437 this doesn't matter. (For LT and GE we can use "mi" and "pl"
6438 instead.) */
6439 if (GET_MODE (x) == SImode
6440 && y == const0_rtx
6441 && (op == EQ || op == NE || op == LT || op == GE)
6442 && (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
6443 || GET_CODE (x) == AND || GET_CODE (x) == IOR
6444 || GET_CODE (x) == XOR || GET_CODE (x) == MULT
6445 || GET_CODE (x) == NOT || GET_CODE (x) == NEG
6446 || GET_CODE (x) == LSHIFTRT
6447 || GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
6448 || GET_CODE (x) == ROTATERT
6449 || (TARGET_ARM && GET_CODE (x) == ZERO_EXTRACT)))
6450 return CC_NOOVmode;
6452 if (GET_MODE (x) == QImode && (op == EQ || op == NE))
6453 return CC_Zmode;
6455 if (GET_MODE (x) == SImode && (op == LTU || op == GEU)
6456 && GET_CODE (x) == PLUS
6457 && (rtx_equal_p (XEXP (x, 0), y) || rtx_equal_p (XEXP (x, 1), y)))
6458 return CC_Cmode;
6460 return CCmode;
6463 /* X and Y are two things to compare using CODE. Emit the compare insn and
6464 return the rtx for register 0 in the proper mode. FP means this is a
6465 floating point compare: I don't think that it is needed on the arm. */
6467 arm_gen_compare_reg (enum rtx_code code, rtx x, rtx y)
6469 enum machine_mode mode = SELECT_CC_MODE (code, x, y);
6470 rtx cc_reg = gen_rtx_REG (mode, CC_REGNUM);
6472 emit_insn (gen_rtx_SET (VOIDmode, cc_reg,
6473 gen_rtx_COMPARE (mode, x, y)));
6475 return cc_reg;
6478 /* Generate a sequence of insns that will generate the correct return
6479 address mask depending on the physical architecture that the program
6480 is running on. */
6482 arm_gen_return_addr_mask (void)
6484 rtx reg = gen_reg_rtx (Pmode);
6486 emit_insn (gen_return_addr_mask (reg));
6487 return reg;
6490 void
6491 arm_reload_in_hi (rtx *operands)
6493 rtx ref = operands[1];
6494 rtx base, scratch;
6495 HOST_WIDE_INT offset = 0;
6497 if (GET_CODE (ref) == SUBREG)
6499 offset = SUBREG_BYTE (ref);
6500 ref = SUBREG_REG (ref);
6503 if (GET_CODE (ref) == REG)
6505 /* We have a pseudo which has been spilt onto the stack; there
6506 are two cases here: the first where there is a simple
6507 stack-slot replacement and a second where the stack-slot is
6508 out of range, or is used as a subreg. */
6509 if (reg_equiv_mem[REGNO (ref)])
6511 ref = reg_equiv_mem[REGNO (ref)];
6512 base = find_replacement (&XEXP (ref, 0));
6514 else
6515 /* The slot is out of range, or was dressed up in a SUBREG. */
6516 base = reg_equiv_address[REGNO (ref)];
6518 else
6519 base = find_replacement (&XEXP (ref, 0));
6521 /* Handle the case where the address is too complex to be offset by 1. */
6522 if (GET_CODE (base) == MINUS
6523 || (GET_CODE (base) == PLUS && GET_CODE (XEXP (base, 1)) != CONST_INT))
6525 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6527 emit_insn (gen_rtx_SET (VOIDmode, base_plus, base));
6528 base = base_plus;
6530 else if (GET_CODE (base) == PLUS)
6532 /* The addend must be CONST_INT, or we would have dealt with it above. */
6533 HOST_WIDE_INT hi, lo;
6535 offset += INTVAL (XEXP (base, 1));
6536 base = XEXP (base, 0);
6538 /* Rework the address into a legal sequence of insns. */
6539 /* Valid range for lo is -4095 -> 4095 */
6540 lo = (offset >= 0
6541 ? (offset & 0xfff)
6542 : -((-offset) & 0xfff));
6544 /* Corner case, if lo is the max offset then we would be out of range
6545 once we have added the additional 1 below, so bump the msb into the
6546 pre-loading insn(s). */
6547 if (lo == 4095)
6548 lo &= 0x7ff;
6550 hi = ((((offset - lo) & (HOST_WIDE_INT) 0xffffffff)
6551 ^ (HOST_WIDE_INT) 0x80000000)
6552 - (HOST_WIDE_INT) 0x80000000);
6554 if (hi + lo != offset)
6555 abort ();
6557 if (hi != 0)
6559 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6561 /* Get the base address; addsi3 knows how to handle constants
6562 that require more than one insn. */
6563 emit_insn (gen_addsi3 (base_plus, base, GEN_INT (hi)));
6564 base = base_plus;
6565 offset = lo;
6569 /* Operands[2] may overlap operands[0] (though it won't overlap
6570 operands[1]), that's why we asked for a DImode reg -- so we can
6571 use the bit that does not overlap. */
6572 if (REGNO (operands[2]) == REGNO (operands[0]))
6573 scratch = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6574 else
6575 scratch = gen_rtx_REG (SImode, REGNO (operands[2]));
6577 emit_insn (gen_zero_extendqisi2 (scratch,
6578 gen_rtx_MEM (QImode,
6579 plus_constant (base,
6580 offset))));
6581 emit_insn (gen_zero_extendqisi2 (gen_rtx_SUBREG (SImode, operands[0], 0),
6582 gen_rtx_MEM (QImode,
6583 plus_constant (base,
6584 offset + 1))));
6585 if (!BYTES_BIG_ENDIAN)
6586 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_SUBREG (SImode, operands[0], 0),
6587 gen_rtx_IOR (SImode,
6588 gen_rtx_ASHIFT
6589 (SImode,
6590 gen_rtx_SUBREG (SImode, operands[0], 0),
6591 GEN_INT (8)),
6592 scratch)));
6593 else
6594 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_SUBREG (SImode, operands[0], 0),
6595 gen_rtx_IOR (SImode,
6596 gen_rtx_ASHIFT (SImode, scratch,
6597 GEN_INT (8)),
6598 gen_rtx_SUBREG (SImode, operands[0],
6599 0))));
6602 /* Handle storing a half-word to memory during reload by synthesizing as two
6603 byte stores. Take care not to clobber the input values until after we
6604 have moved them somewhere safe. This code assumes that if the DImode
6605 scratch in operands[2] overlaps either the input value or output address
6606 in some way, then that value must die in this insn (we absolutely need
6607 two scratch registers for some corner cases). */
6608 void
6609 arm_reload_out_hi (rtx *operands)
6611 rtx ref = operands[0];
6612 rtx outval = operands[1];
6613 rtx base, scratch;
6614 HOST_WIDE_INT offset = 0;
6616 if (GET_CODE (ref) == SUBREG)
6618 offset = SUBREG_BYTE (ref);
6619 ref = SUBREG_REG (ref);
6622 if (GET_CODE (ref) == REG)
6624 /* We have a pseudo which has been spilt onto the stack; there
6625 are two cases here: the first where there is a simple
6626 stack-slot replacement and a second where the stack-slot is
6627 out of range, or is used as a subreg. */
6628 if (reg_equiv_mem[REGNO (ref)])
6630 ref = reg_equiv_mem[REGNO (ref)];
6631 base = find_replacement (&XEXP (ref, 0));
6633 else
6634 /* The slot is out of range, or was dressed up in a SUBREG. */
6635 base = reg_equiv_address[REGNO (ref)];
6637 else
6638 base = find_replacement (&XEXP (ref, 0));
6640 scratch = gen_rtx_REG (SImode, REGNO (operands[2]));
6642 /* Handle the case where the address is too complex to be offset by 1. */
6643 if (GET_CODE (base) == MINUS
6644 || (GET_CODE (base) == PLUS && GET_CODE (XEXP (base, 1)) != CONST_INT))
6646 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6648 /* Be careful not to destroy OUTVAL. */
6649 if (reg_overlap_mentioned_p (base_plus, outval))
6651 /* Updating base_plus might destroy outval, see if we can
6652 swap the scratch and base_plus. */
6653 if (!reg_overlap_mentioned_p (scratch, outval))
6655 rtx tmp = scratch;
6656 scratch = base_plus;
6657 base_plus = tmp;
6659 else
6661 rtx scratch_hi = gen_rtx_REG (HImode, REGNO (operands[2]));
6663 /* Be conservative and copy OUTVAL into the scratch now,
6664 this should only be necessary if outval is a subreg
6665 of something larger than a word. */
6666 /* XXX Might this clobber base? I can't see how it can,
6667 since scratch is known to overlap with OUTVAL, and
6668 must be wider than a word. */
6669 emit_insn (gen_movhi (scratch_hi, outval));
6670 outval = scratch_hi;
6674 emit_insn (gen_rtx_SET (VOIDmode, base_plus, base));
6675 base = base_plus;
6677 else if (GET_CODE (base) == PLUS)
6679 /* The addend must be CONST_INT, or we would have dealt with it above. */
6680 HOST_WIDE_INT hi, lo;
6682 offset += INTVAL (XEXP (base, 1));
6683 base = XEXP (base, 0);
6685 /* Rework the address into a legal sequence of insns. */
6686 /* Valid range for lo is -4095 -> 4095 */
6687 lo = (offset >= 0
6688 ? (offset & 0xfff)
6689 : -((-offset) & 0xfff));
6691 /* Corner case, if lo is the max offset then we would be out of range
6692 once we have added the additional 1 below, so bump the msb into the
6693 pre-loading insn(s). */
6694 if (lo == 4095)
6695 lo &= 0x7ff;
6697 hi = ((((offset - lo) & (HOST_WIDE_INT) 0xffffffff)
6698 ^ (HOST_WIDE_INT) 0x80000000)
6699 - (HOST_WIDE_INT) 0x80000000);
6701 if (hi + lo != offset)
6702 abort ();
6704 if (hi != 0)
6706 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6708 /* Be careful not to destroy OUTVAL. */
6709 if (reg_overlap_mentioned_p (base_plus, outval))
6711 /* Updating base_plus might destroy outval, see if we
6712 can swap the scratch and base_plus. */
6713 if (!reg_overlap_mentioned_p (scratch, outval))
6715 rtx tmp = scratch;
6716 scratch = base_plus;
6717 base_plus = tmp;
6719 else
6721 rtx scratch_hi = gen_rtx_REG (HImode, REGNO (operands[2]));
6723 /* Be conservative and copy outval into scratch now,
6724 this should only be necessary if outval is a
6725 subreg of something larger than a word. */
6726 /* XXX Might this clobber base? I can't see how it
6727 can, since scratch is known to overlap with
6728 outval. */
6729 emit_insn (gen_movhi (scratch_hi, outval));
6730 outval = scratch_hi;
6734 /* Get the base address; addsi3 knows how to handle constants
6735 that require more than one insn. */
6736 emit_insn (gen_addsi3 (base_plus, base, GEN_INT (hi)));
6737 base = base_plus;
6738 offset = lo;
6742 if (BYTES_BIG_ENDIAN)
6744 emit_insn (gen_movqi (gen_rtx_MEM (QImode,
6745 plus_constant (base, offset + 1)),
6746 gen_lowpart (QImode, outval)));
6747 emit_insn (gen_lshrsi3 (scratch,
6748 gen_rtx_SUBREG (SImode, outval, 0),
6749 GEN_INT (8)));
6750 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (base, offset)),
6751 gen_lowpart (QImode, scratch)));
6753 else
6755 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (base, offset)),
6756 gen_lowpart (QImode, outval)));
6757 emit_insn (gen_lshrsi3 (scratch,
6758 gen_rtx_SUBREG (SImode, outval, 0),
6759 GEN_INT (8)));
6760 emit_insn (gen_movqi (gen_rtx_MEM (QImode,
6761 plus_constant (base, offset + 1)),
6762 gen_lowpart (QImode, scratch)));
6766 /* Print a symbolic form of X to the debug file, F. */
6767 static void
6768 arm_print_value (FILE *f, rtx x)
6770 switch (GET_CODE (x))
6772 case CONST_INT:
6773 fprintf (f, HOST_WIDE_INT_PRINT_HEX, INTVAL (x));
6774 return;
6776 case CONST_DOUBLE:
6777 fprintf (f, "<0x%lx,0x%lx>", (long)XWINT (x, 2), (long)XWINT (x, 3));
6778 return;
6780 case CONST_VECTOR:
6782 int i;
6784 fprintf (f, "<");
6785 for (i = 0; i < CONST_VECTOR_NUNITS (x); i++)
6787 fprintf (f, HOST_WIDE_INT_PRINT_HEX, INTVAL (CONST_VECTOR_ELT (x, i)));
6788 if (i < (CONST_VECTOR_NUNITS (x) - 1))
6789 fputc (',', f);
6791 fprintf (f, ">");
6793 return;
6795 case CONST_STRING:
6796 fprintf (f, "\"%s\"", XSTR (x, 0));
6797 return;
6799 case SYMBOL_REF:
6800 fprintf (f, "`%s'", XSTR (x, 0));
6801 return;
6803 case LABEL_REF:
6804 fprintf (f, "L%d", INSN_UID (XEXP (x, 0)));
6805 return;
6807 case CONST:
6808 arm_print_value (f, XEXP (x, 0));
6809 return;
6811 case PLUS:
6812 arm_print_value (f, XEXP (x, 0));
6813 fprintf (f, "+");
6814 arm_print_value (f, XEXP (x, 1));
6815 return;
6817 case PC:
6818 fprintf (f, "pc");
6819 return;
6821 default:
6822 fprintf (f, "????");
6823 return;
6827 /* Routines for manipulation of the constant pool. */
6829 /* Arm instructions cannot load a large constant directly into a
6830 register; they have to come from a pc relative load. The constant
6831 must therefore be placed in the addressable range of the pc
6832 relative load. Depending on the precise pc relative load
6833 instruction the range is somewhere between 256 bytes and 4k. This
6834 means that we often have to dump a constant inside a function, and
6835 generate code to branch around it.
6837 It is important to minimize this, since the branches will slow
6838 things down and make the code larger.
6840 Normally we can hide the table after an existing unconditional
6841 branch so that there is no interruption of the flow, but in the
6842 worst case the code looks like this:
6844 ldr rn, L1
6846 b L2
6847 align
6848 L1: .long value
6852 ldr rn, L3
6854 b L4
6855 align
6856 L3: .long value
6860 We fix this by performing a scan after scheduling, which notices
6861 which instructions need to have their operands fetched from the
6862 constant table and builds the table.
6864 The algorithm starts by building a table of all the constants that
6865 need fixing up and all the natural barriers in the function (places
6866 where a constant table can be dropped without breaking the flow).
6867 For each fixup we note how far the pc-relative replacement will be
6868 able to reach and the offset of the instruction into the function.
6870 Having built the table we then group the fixes together to form
6871 tables that are as large as possible (subject to addressing
6872 constraints) and emit each table of constants after the last
6873 barrier that is within range of all the instructions in the group.
6874 If a group does not contain a barrier, then we forcibly create one
6875 by inserting a jump instruction into the flow. Once the table has
6876 been inserted, the insns are then modified to reference the
6877 relevant entry in the pool.
6879 Possible enhancements to the algorithm (not implemented) are:
6881 1) For some processors and object formats, there may be benefit in
6882 aligning the pools to the start of cache lines; this alignment
6883 would need to be taken into account when calculating addressability
6884 of a pool. */
6886 /* These typedefs are located at the start of this file, so that
6887 they can be used in the prototypes there. This comment is to
6888 remind readers of that fact so that the following structures
6889 can be understood more easily.
6891 typedef struct minipool_node Mnode;
6892 typedef struct minipool_fixup Mfix; */
6894 struct minipool_node
6896 /* Doubly linked chain of entries. */
6897 Mnode * next;
6898 Mnode * prev;
6899 /* The maximum offset into the code that this entry can be placed. While
6900 pushing fixes for forward references, all entries are sorted in order
6901 of increasing max_address. */
6902 HOST_WIDE_INT max_address;
6903 /* Similarly for an entry inserted for a backwards ref. */
6904 HOST_WIDE_INT min_address;
6905 /* The number of fixes referencing this entry. This can become zero
6906 if we "unpush" an entry. In this case we ignore the entry when we
6907 come to emit the code. */
6908 int refcount;
6909 /* The offset from the start of the minipool. */
6910 HOST_WIDE_INT offset;
6911 /* The value in table. */
6912 rtx value;
6913 /* The mode of value. */
6914 enum machine_mode mode;
6915 /* The size of the value. With iWMMXt enabled
6916 sizes > 4 also imply an alignment of 8-bytes. */
6917 int fix_size;
6920 struct minipool_fixup
6922 Mfix * next;
6923 rtx insn;
6924 HOST_WIDE_INT address;
6925 rtx * loc;
6926 enum machine_mode mode;
6927 int fix_size;
6928 rtx value;
6929 Mnode * minipool;
6930 HOST_WIDE_INT forwards;
6931 HOST_WIDE_INT backwards;
6934 /* Fixes less than a word need padding out to a word boundary. */
6935 #define MINIPOOL_FIX_SIZE(mode) \
6936 (GET_MODE_SIZE ((mode)) >= 4 ? GET_MODE_SIZE ((mode)) : 4)
6938 static Mnode * minipool_vector_head;
6939 static Mnode * minipool_vector_tail;
6940 static rtx minipool_vector_label;
6942 /* The linked list of all minipool fixes required for this function. */
6943 Mfix * minipool_fix_head;
6944 Mfix * minipool_fix_tail;
6945 /* The fix entry for the current minipool, once it has been placed. */
6946 Mfix * minipool_barrier;
6948 /* Determines if INSN is the start of a jump table. Returns the end
6949 of the TABLE or NULL_RTX. */
6950 static rtx
6951 is_jump_table (rtx insn)
6953 rtx table;
6955 if (GET_CODE (insn) == JUMP_INSN
6956 && JUMP_LABEL (insn) != NULL
6957 && ((table = next_real_insn (JUMP_LABEL (insn)))
6958 == next_real_insn (insn))
6959 && table != NULL
6960 && GET_CODE (table) == JUMP_INSN
6961 && (GET_CODE (PATTERN (table)) == ADDR_VEC
6962 || GET_CODE (PATTERN (table)) == ADDR_DIFF_VEC))
6963 return table;
6965 return NULL_RTX;
6968 #ifndef JUMP_TABLES_IN_TEXT_SECTION
6969 #define JUMP_TABLES_IN_TEXT_SECTION 0
6970 #endif
6972 static HOST_WIDE_INT
6973 get_jump_table_size (rtx insn)
6975 /* ADDR_VECs only take room if read-only data does into the text
6976 section. */
6977 if (JUMP_TABLES_IN_TEXT_SECTION
6978 #if !defined(READONLY_DATA_SECTION) && !defined(READONLY_DATA_SECTION_ASM_OP)
6979 || 1
6980 #endif
6983 rtx body = PATTERN (insn);
6984 int elt = GET_CODE (body) == ADDR_DIFF_VEC ? 1 : 0;
6986 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, elt);
6989 return 0;
6992 /* Move a minipool fix MP from its current location to before MAX_MP.
6993 If MAX_MP is NULL, then MP doesn't need moving, but the addressing
6994 constraints may need updating. */
6995 static Mnode *
6996 move_minipool_fix_forward_ref (Mnode *mp, Mnode *max_mp,
6997 HOST_WIDE_INT max_address)
6999 /* This should never be true and the code below assumes these are
7000 different. */
7001 if (mp == max_mp)
7002 abort ();
7004 if (max_mp == NULL)
7006 if (max_address < mp->max_address)
7007 mp->max_address = max_address;
7009 else
7011 if (max_address > max_mp->max_address - mp->fix_size)
7012 mp->max_address = max_mp->max_address - mp->fix_size;
7013 else
7014 mp->max_address = max_address;
7016 /* Unlink MP from its current position. Since max_mp is non-null,
7017 mp->prev must be non-null. */
7018 mp->prev->next = mp->next;
7019 if (mp->next != NULL)
7020 mp->next->prev = mp->prev;
7021 else
7022 minipool_vector_tail = mp->prev;
7024 /* Re-insert it before MAX_MP. */
7025 mp->next = max_mp;
7026 mp->prev = max_mp->prev;
7027 max_mp->prev = mp;
7029 if (mp->prev != NULL)
7030 mp->prev->next = mp;
7031 else
7032 minipool_vector_head = mp;
7035 /* Save the new entry. */
7036 max_mp = mp;
7038 /* Scan over the preceding entries and adjust their addresses as
7039 required. */
7040 while (mp->prev != NULL
7041 && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
7043 mp->prev->max_address = mp->max_address - mp->prev->fix_size;
7044 mp = mp->prev;
7047 return max_mp;
7050 /* Add a constant to the minipool for a forward reference. Returns the
7051 node added or NULL if the constant will not fit in this pool. */
7052 static Mnode *
7053 add_minipool_forward_ref (Mfix *fix)
7055 /* If set, max_mp is the first pool_entry that has a lower
7056 constraint than the one we are trying to add. */
7057 Mnode * max_mp = NULL;
7058 HOST_WIDE_INT max_address = fix->address + fix->forwards;
7059 Mnode * mp;
7061 /* If this fix's address is greater than the address of the first
7062 entry, then we can't put the fix in this pool. We subtract the
7063 size of the current fix to ensure that if the table is fully
7064 packed we still have enough room to insert this value by suffling
7065 the other fixes forwards. */
7066 if (minipool_vector_head &&
7067 fix->address >= minipool_vector_head->max_address - fix->fix_size)
7068 return NULL;
7070 /* Scan the pool to see if a constant with the same value has
7071 already been added. While we are doing this, also note the
7072 location where we must insert the constant if it doesn't already
7073 exist. */
7074 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7076 if (GET_CODE (fix->value) == GET_CODE (mp->value)
7077 && fix->mode == mp->mode
7078 && (GET_CODE (fix->value) != CODE_LABEL
7079 || (CODE_LABEL_NUMBER (fix->value)
7080 == CODE_LABEL_NUMBER (mp->value)))
7081 && rtx_equal_p (fix->value, mp->value))
7083 /* More than one fix references this entry. */
7084 mp->refcount++;
7085 return move_minipool_fix_forward_ref (mp, max_mp, max_address);
7088 /* Note the insertion point if necessary. */
7089 if (max_mp == NULL
7090 && mp->max_address > max_address)
7091 max_mp = mp;
7093 /* If we are inserting an 8-bytes aligned quantity and
7094 we have not already found an insertion point, then
7095 make sure that all such 8-byte aligned quantities are
7096 placed at the start of the pool. */
7097 if (ARM_DOUBLEWORD_ALIGN
7098 && max_mp == NULL
7099 && fix->fix_size == 8
7100 && mp->fix_size != 8)
7102 max_mp = mp;
7103 max_address = mp->max_address;
7107 /* The value is not currently in the minipool, so we need to create
7108 a new entry for it. If MAX_MP is NULL, the entry will be put on
7109 the end of the list since the placement is less constrained than
7110 any existing entry. Otherwise, we insert the new fix before
7111 MAX_MP and, if necessary, adjust the constraints on the other
7112 entries. */
7113 mp = xmalloc (sizeof (* mp));
7114 mp->fix_size = fix->fix_size;
7115 mp->mode = fix->mode;
7116 mp->value = fix->value;
7117 mp->refcount = 1;
7118 /* Not yet required for a backwards ref. */
7119 mp->min_address = -65536;
7121 if (max_mp == NULL)
7123 mp->max_address = max_address;
7124 mp->next = NULL;
7125 mp->prev = minipool_vector_tail;
7127 if (mp->prev == NULL)
7129 minipool_vector_head = mp;
7130 minipool_vector_label = gen_label_rtx ();
7132 else
7133 mp->prev->next = mp;
7135 minipool_vector_tail = mp;
7137 else
7139 if (max_address > max_mp->max_address - mp->fix_size)
7140 mp->max_address = max_mp->max_address - mp->fix_size;
7141 else
7142 mp->max_address = max_address;
7144 mp->next = max_mp;
7145 mp->prev = max_mp->prev;
7146 max_mp->prev = mp;
7147 if (mp->prev != NULL)
7148 mp->prev->next = mp;
7149 else
7150 minipool_vector_head = mp;
7153 /* Save the new entry. */
7154 max_mp = mp;
7156 /* Scan over the preceding entries and adjust their addresses as
7157 required. */
7158 while (mp->prev != NULL
7159 && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
7161 mp->prev->max_address = mp->max_address - mp->prev->fix_size;
7162 mp = mp->prev;
7165 return max_mp;
7168 static Mnode *
7169 move_minipool_fix_backward_ref (Mnode *mp, Mnode *min_mp,
7170 HOST_WIDE_INT min_address)
7172 HOST_WIDE_INT offset;
7174 /* This should never be true, and the code below assumes these are
7175 different. */
7176 if (mp == min_mp)
7177 abort ();
7179 if (min_mp == NULL)
7181 if (min_address > mp->min_address)
7182 mp->min_address = min_address;
7184 else
7186 /* We will adjust this below if it is too loose. */
7187 mp->min_address = min_address;
7189 /* Unlink MP from its current position. Since min_mp is non-null,
7190 mp->next must be non-null. */
7191 mp->next->prev = mp->prev;
7192 if (mp->prev != NULL)
7193 mp->prev->next = mp->next;
7194 else
7195 minipool_vector_head = mp->next;
7197 /* Reinsert it after MIN_MP. */
7198 mp->prev = min_mp;
7199 mp->next = min_mp->next;
7200 min_mp->next = mp;
7201 if (mp->next != NULL)
7202 mp->next->prev = mp;
7203 else
7204 minipool_vector_tail = mp;
7207 min_mp = mp;
7209 offset = 0;
7210 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7212 mp->offset = offset;
7213 if (mp->refcount > 0)
7214 offset += mp->fix_size;
7216 if (mp->next && mp->next->min_address < mp->min_address + mp->fix_size)
7217 mp->next->min_address = mp->min_address + mp->fix_size;
7220 return min_mp;
7223 /* Add a constant to the minipool for a backward reference. Returns the
7224 node added or NULL if the constant will not fit in this pool.
7226 Note that the code for insertion for a backwards reference can be
7227 somewhat confusing because the calculated offsets for each fix do
7228 not take into account the size of the pool (which is still under
7229 construction. */
7230 static Mnode *
7231 add_minipool_backward_ref (Mfix *fix)
7233 /* If set, min_mp is the last pool_entry that has a lower constraint
7234 than the one we are trying to add. */
7235 Mnode *min_mp = NULL;
7236 /* This can be negative, since it is only a constraint. */
7237 HOST_WIDE_INT min_address = fix->address - fix->backwards;
7238 Mnode *mp;
7240 /* If we can't reach the current pool from this insn, or if we can't
7241 insert this entry at the end of the pool without pushing other
7242 fixes out of range, then we don't try. This ensures that we
7243 can't fail later on. */
7244 if (min_address >= minipool_barrier->address
7245 || (minipool_vector_tail->min_address + fix->fix_size
7246 >= minipool_barrier->address))
7247 return NULL;
7249 /* Scan the pool to see if a constant with the same value has
7250 already been added. While we are doing this, also note the
7251 location where we must insert the constant if it doesn't already
7252 exist. */
7253 for (mp = minipool_vector_tail; mp != NULL; mp = mp->prev)
7255 if (GET_CODE (fix->value) == GET_CODE (mp->value)
7256 && fix->mode == mp->mode
7257 && (GET_CODE (fix->value) != CODE_LABEL
7258 || (CODE_LABEL_NUMBER (fix->value)
7259 == CODE_LABEL_NUMBER (mp->value)))
7260 && rtx_equal_p (fix->value, mp->value)
7261 /* Check that there is enough slack to move this entry to the
7262 end of the table (this is conservative). */
7263 && (mp->max_address
7264 > (minipool_barrier->address
7265 + minipool_vector_tail->offset
7266 + minipool_vector_tail->fix_size)))
7268 mp->refcount++;
7269 return move_minipool_fix_backward_ref (mp, min_mp, min_address);
7272 if (min_mp != NULL)
7273 mp->min_address += fix->fix_size;
7274 else
7276 /* Note the insertion point if necessary. */
7277 if (mp->min_address < min_address)
7279 /* For now, we do not allow the insertion of 8-byte alignment
7280 requiring nodes anywhere but at the start of the pool. */
7281 if (ARM_DOUBLEWORD_ALIGN
7282 && fix->fix_size == 8 && mp->fix_size != 8)
7283 return NULL;
7284 else
7285 min_mp = mp;
7287 else if (mp->max_address
7288 < minipool_barrier->address + mp->offset + fix->fix_size)
7290 /* Inserting before this entry would push the fix beyond
7291 its maximum address (which can happen if we have
7292 re-located a forwards fix); force the new fix to come
7293 after it. */
7294 min_mp = mp;
7295 min_address = mp->min_address + fix->fix_size;
7297 /* If we are inserting an 8-bytes aligned quantity and
7298 we have not already found an insertion point, then
7299 make sure that all such 8-byte aligned quantities are
7300 placed at the start of the pool. */
7301 else if (ARM_DOUBLEWORD_ALIGN
7302 && min_mp == NULL
7303 && fix->fix_size == 8
7304 && mp->fix_size < 8)
7306 min_mp = mp;
7307 min_address = mp->min_address + fix->fix_size;
7312 /* We need to create a new entry. */
7313 mp = xmalloc (sizeof (* mp));
7314 mp->fix_size = fix->fix_size;
7315 mp->mode = fix->mode;
7316 mp->value = fix->value;
7317 mp->refcount = 1;
7318 mp->max_address = minipool_barrier->address + 65536;
7320 mp->min_address = min_address;
7322 if (min_mp == NULL)
7324 mp->prev = NULL;
7325 mp->next = minipool_vector_head;
7327 if (mp->next == NULL)
7329 minipool_vector_tail = mp;
7330 minipool_vector_label = gen_label_rtx ();
7332 else
7333 mp->next->prev = mp;
7335 minipool_vector_head = mp;
7337 else
7339 mp->next = min_mp->next;
7340 mp->prev = min_mp;
7341 min_mp->next = mp;
7343 if (mp->next != NULL)
7344 mp->next->prev = mp;
7345 else
7346 minipool_vector_tail = mp;
7349 /* Save the new entry. */
7350 min_mp = mp;
7352 if (mp->prev)
7353 mp = mp->prev;
7354 else
7355 mp->offset = 0;
7357 /* Scan over the following entries and adjust their offsets. */
7358 while (mp->next != NULL)
7360 if (mp->next->min_address < mp->min_address + mp->fix_size)
7361 mp->next->min_address = mp->min_address + mp->fix_size;
7363 if (mp->refcount)
7364 mp->next->offset = mp->offset + mp->fix_size;
7365 else
7366 mp->next->offset = mp->offset;
7368 mp = mp->next;
7371 return min_mp;
7374 static void
7375 assign_minipool_offsets (Mfix *barrier)
7377 HOST_WIDE_INT offset = 0;
7378 Mnode *mp;
7380 minipool_barrier = barrier;
7382 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7384 mp->offset = offset;
7386 if (mp->refcount > 0)
7387 offset += mp->fix_size;
7391 /* Output the literal table */
7392 static void
7393 dump_minipool (rtx scan)
7395 Mnode * mp;
7396 Mnode * nmp;
7397 int align64 = 0;
7399 if (ARM_DOUBLEWORD_ALIGN)
7400 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7401 if (mp->refcount > 0 && mp->fix_size == 8)
7403 align64 = 1;
7404 break;
7407 if (dump_file)
7408 fprintf (dump_file,
7409 ";; Emitting minipool after insn %u; address %ld; align %d (bytes)\n",
7410 INSN_UID (scan), (unsigned long) minipool_barrier->address, align64 ? 8 : 4);
7412 scan = emit_label_after (gen_label_rtx (), scan);
7413 scan = emit_insn_after (align64 ? gen_align_8 () : gen_align_4 (), scan);
7414 scan = emit_label_after (minipool_vector_label, scan);
7416 for (mp = minipool_vector_head; mp != NULL; mp = nmp)
7418 if (mp->refcount > 0)
7420 if (dump_file)
7422 fprintf (dump_file,
7423 ";; Offset %u, min %ld, max %ld ",
7424 (unsigned) mp->offset, (unsigned long) mp->min_address,
7425 (unsigned long) mp->max_address);
7426 arm_print_value (dump_file, mp->value);
7427 fputc ('\n', dump_file);
7430 switch (mp->fix_size)
7432 #ifdef HAVE_consttable_1
7433 case 1:
7434 scan = emit_insn_after (gen_consttable_1 (mp->value), scan);
7435 break;
7437 #endif
7438 #ifdef HAVE_consttable_2
7439 case 2:
7440 scan = emit_insn_after (gen_consttable_2 (mp->value), scan);
7441 break;
7443 #endif
7444 #ifdef HAVE_consttable_4
7445 case 4:
7446 scan = emit_insn_after (gen_consttable_4 (mp->value), scan);
7447 break;
7449 #endif
7450 #ifdef HAVE_consttable_8
7451 case 8:
7452 scan = emit_insn_after (gen_consttable_8 (mp->value), scan);
7453 break;
7455 #endif
7456 default:
7457 abort ();
7458 break;
7462 nmp = mp->next;
7463 free (mp);
7466 minipool_vector_head = minipool_vector_tail = NULL;
7467 scan = emit_insn_after (gen_consttable_end (), scan);
7468 scan = emit_barrier_after (scan);
7471 /* Return the cost of forcibly inserting a barrier after INSN. */
7472 static int
7473 arm_barrier_cost (rtx insn)
7475 /* Basing the location of the pool on the loop depth is preferable,
7476 but at the moment, the basic block information seems to be
7477 corrupt by this stage of the compilation. */
7478 int base_cost = 50;
7479 rtx next = next_nonnote_insn (insn);
7481 if (next != NULL && GET_CODE (next) == CODE_LABEL)
7482 base_cost -= 20;
7484 switch (GET_CODE (insn))
7486 case CODE_LABEL:
7487 /* It will always be better to place the table before the label, rather
7488 than after it. */
7489 return 50;
7491 case INSN:
7492 case CALL_INSN:
7493 return base_cost;
7495 case JUMP_INSN:
7496 return base_cost - 10;
7498 default:
7499 return base_cost + 10;
7503 /* Find the best place in the insn stream in the range
7504 (FIX->address,MAX_ADDRESS) to forcibly insert a minipool barrier.
7505 Create the barrier by inserting a jump and add a new fix entry for
7506 it. */
7507 static Mfix *
7508 create_fix_barrier (Mfix *fix, HOST_WIDE_INT max_address)
7510 HOST_WIDE_INT count = 0;
7511 rtx barrier;
7512 rtx from = fix->insn;
7513 rtx selected = from;
7514 int selected_cost;
7515 HOST_WIDE_INT selected_address;
7516 Mfix * new_fix;
7517 HOST_WIDE_INT max_count = max_address - fix->address;
7518 rtx label = gen_label_rtx ();
7520 selected_cost = arm_barrier_cost (from);
7521 selected_address = fix->address;
7523 while (from && count < max_count)
7525 rtx tmp;
7526 int new_cost;
7528 /* This code shouldn't have been called if there was a natural barrier
7529 within range. */
7530 if (GET_CODE (from) == BARRIER)
7531 abort ();
7533 /* Count the length of this insn. */
7534 count += get_attr_length (from);
7536 /* If there is a jump table, add its length. */
7537 tmp = is_jump_table (from);
7538 if (tmp != NULL)
7540 count += get_jump_table_size (tmp);
7542 /* Jump tables aren't in a basic block, so base the cost on
7543 the dispatch insn. If we select this location, we will
7544 still put the pool after the table. */
7545 new_cost = arm_barrier_cost (from);
7547 if (count < max_count && new_cost <= selected_cost)
7549 selected = tmp;
7550 selected_cost = new_cost;
7551 selected_address = fix->address + count;
7554 /* Continue after the dispatch table. */
7555 from = NEXT_INSN (tmp);
7556 continue;
7559 new_cost = arm_barrier_cost (from);
7561 if (count < max_count && new_cost <= selected_cost)
7563 selected = from;
7564 selected_cost = new_cost;
7565 selected_address = fix->address + count;
7568 from = NEXT_INSN (from);
7571 /* Create a new JUMP_INSN that branches around a barrier. */
7572 from = emit_jump_insn_after (gen_jump (label), selected);
7573 JUMP_LABEL (from) = label;
7574 barrier = emit_barrier_after (from);
7575 emit_label_after (label, barrier);
7577 /* Create a minipool barrier entry for the new barrier. */
7578 new_fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* new_fix));
7579 new_fix->insn = barrier;
7580 new_fix->address = selected_address;
7581 new_fix->next = fix->next;
7582 fix->next = new_fix;
7584 return new_fix;
7587 /* Record that there is a natural barrier in the insn stream at
7588 ADDRESS. */
7589 static void
7590 push_minipool_barrier (rtx insn, HOST_WIDE_INT address)
7592 Mfix * fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* fix));
7594 fix->insn = insn;
7595 fix->address = address;
7597 fix->next = NULL;
7598 if (minipool_fix_head != NULL)
7599 minipool_fix_tail->next = fix;
7600 else
7601 minipool_fix_head = fix;
7603 minipool_fix_tail = fix;
7606 /* Record INSN, which will need fixing up to load a value from the
7607 minipool. ADDRESS is the offset of the insn since the start of the
7608 function; LOC is a pointer to the part of the insn which requires
7609 fixing; VALUE is the constant that must be loaded, which is of type
7610 MODE. */
7611 static void
7612 push_minipool_fix (rtx insn, HOST_WIDE_INT address, rtx *loc,
7613 enum machine_mode mode, rtx value)
7615 Mfix * fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* fix));
7617 #ifdef AOF_ASSEMBLER
7618 /* PIC symbol references need to be converted into offsets into the
7619 based area. */
7620 /* XXX This shouldn't be done here. */
7621 if (flag_pic && GET_CODE (value) == SYMBOL_REF)
7622 value = aof_pic_entry (value);
7623 #endif /* AOF_ASSEMBLER */
7625 fix->insn = insn;
7626 fix->address = address;
7627 fix->loc = loc;
7628 fix->mode = mode;
7629 fix->fix_size = MINIPOOL_FIX_SIZE (mode);
7630 fix->value = value;
7631 fix->forwards = get_attr_pool_range (insn);
7632 fix->backwards = get_attr_neg_pool_range (insn);
7633 fix->minipool = NULL;
7635 /* If an insn doesn't have a range defined for it, then it isn't
7636 expecting to be reworked by this code. Better to abort now than
7637 to generate duff assembly code. */
7638 if (fix->forwards == 0 && fix->backwards == 0)
7639 abort ();
7641 /* With AAPCS/iWMMXt enabled, the pool is aligned to an 8-byte boundary.
7642 So there might be an empty word before the start of the pool.
7643 Hence we reduce the forward range by 4 to allow for this
7644 possibility. */
7645 if (ARM_DOUBLEWORD_ALIGN && fix->fix_size == 8)
7646 fix->forwards -= 4;
7648 if (dump_file)
7650 fprintf (dump_file,
7651 ";; %smode fixup for i%d; addr %lu, range (%ld,%ld): ",
7652 GET_MODE_NAME (mode),
7653 INSN_UID (insn), (unsigned long) address,
7654 -1 * (long)fix->backwards, (long)fix->forwards);
7655 arm_print_value (dump_file, fix->value);
7656 fprintf (dump_file, "\n");
7659 /* Add it to the chain of fixes. */
7660 fix->next = NULL;
7662 if (minipool_fix_head != NULL)
7663 minipool_fix_tail->next = fix;
7664 else
7665 minipool_fix_head = fix;
7667 minipool_fix_tail = fix;
7670 /* Scan INSN and note any of its operands that need fixing.
7671 If DO_PUSHES is false we do not actually push any of the fixups
7672 needed. The function returns TRUE is any fixups were needed/pushed.
7673 This is used by arm_memory_load_p() which needs to know about loads
7674 of constants that will be converted into minipool loads. */
7675 static bool
7676 note_invalid_constants (rtx insn, HOST_WIDE_INT address, int do_pushes)
7678 bool result = false;
7679 int opno;
7681 extract_insn (insn);
7683 if (!constrain_operands (1))
7684 fatal_insn_not_found (insn);
7686 if (recog_data.n_alternatives == 0)
7687 return false;
7689 /* Fill in recog_op_alt with information about the constraints of this insn. */
7690 preprocess_constraints ();
7692 for (opno = 0; opno < recog_data.n_operands; opno++)
7694 /* Things we need to fix can only occur in inputs. */
7695 if (recog_data.operand_type[opno] != OP_IN)
7696 continue;
7698 /* If this alternative is a memory reference, then any mention
7699 of constants in this alternative is really to fool reload
7700 into allowing us to accept one there. We need to fix them up
7701 now so that we output the right code. */
7702 if (recog_op_alt[opno][which_alternative].memory_ok)
7704 rtx op = recog_data.operand[opno];
7706 if (CONSTANT_P (op))
7708 if (do_pushes)
7709 push_minipool_fix (insn, address, recog_data.operand_loc[opno],
7710 recog_data.operand_mode[opno], op);
7711 result = true;
7713 else if (GET_CODE (op) == MEM
7714 && GET_CODE (XEXP (op, 0)) == SYMBOL_REF
7715 && CONSTANT_POOL_ADDRESS_P (XEXP (op, 0)))
7717 if (do_pushes)
7719 rtx cop = avoid_constant_pool_reference (op);
7721 /* Casting the address of something to a mode narrower
7722 than a word can cause avoid_constant_pool_reference()
7723 to return the pool reference itself. That's no good to
7724 us here. Lets just hope that we can use the
7725 constant pool value directly. */
7726 if (op == cop)
7727 cop = get_pool_constant (XEXP (op, 0));
7729 push_minipool_fix (insn, address,
7730 recog_data.operand_loc[opno],
7731 recog_data.operand_mode[opno], cop);
7734 result = true;
7739 return result;
7742 /* Gcc puts the pool in the wrong place for ARM, since we can only
7743 load addresses a limited distance around the pc. We do some
7744 special munging to move the constant pool values to the correct
7745 point in the code. */
7746 static void
7747 arm_reorg (void)
7749 rtx insn;
7750 HOST_WIDE_INT address = 0;
7751 Mfix * fix;
7753 minipool_fix_head = minipool_fix_tail = NULL;
7755 /* The first insn must always be a note, or the code below won't
7756 scan it properly. */
7757 insn = get_insns ();
7758 if (GET_CODE (insn) != NOTE)
7759 abort ();
7761 /* Scan all the insns and record the operands that will need fixing. */
7762 for (insn = next_nonnote_insn (insn); insn; insn = next_nonnote_insn (insn))
7764 if (TARGET_CIRRUS_FIX_INVALID_INSNS
7765 && (arm_cirrus_insn_p (insn)
7766 || GET_CODE (insn) == JUMP_INSN
7767 || arm_memory_load_p (insn)))
7768 cirrus_reorg (insn);
7770 if (GET_CODE (insn) == BARRIER)
7771 push_minipool_barrier (insn, address);
7772 else if (INSN_P (insn))
7774 rtx table;
7776 note_invalid_constants (insn, address, true);
7777 address += get_attr_length (insn);
7779 /* If the insn is a vector jump, add the size of the table
7780 and skip the table. */
7781 if ((table = is_jump_table (insn)) != NULL)
7783 address += get_jump_table_size (table);
7784 insn = table;
7789 fix = minipool_fix_head;
7791 /* Now scan the fixups and perform the required changes. */
7792 while (fix)
7794 Mfix * ftmp;
7795 Mfix * fdel;
7796 Mfix * last_added_fix;
7797 Mfix * last_barrier = NULL;
7798 Mfix * this_fix;
7800 /* Skip any further barriers before the next fix. */
7801 while (fix && GET_CODE (fix->insn) == BARRIER)
7802 fix = fix->next;
7804 /* No more fixes. */
7805 if (fix == NULL)
7806 break;
7808 last_added_fix = NULL;
7810 for (ftmp = fix; ftmp; ftmp = ftmp->next)
7812 if (GET_CODE (ftmp->insn) == BARRIER)
7814 if (ftmp->address >= minipool_vector_head->max_address)
7815 break;
7817 last_barrier = ftmp;
7819 else if ((ftmp->minipool = add_minipool_forward_ref (ftmp)) == NULL)
7820 break;
7822 last_added_fix = ftmp; /* Keep track of the last fix added. */
7825 /* If we found a barrier, drop back to that; any fixes that we
7826 could have reached but come after the barrier will now go in
7827 the next mini-pool. */
7828 if (last_barrier != NULL)
7830 /* Reduce the refcount for those fixes that won't go into this
7831 pool after all. */
7832 for (fdel = last_barrier->next;
7833 fdel && fdel != ftmp;
7834 fdel = fdel->next)
7836 fdel->minipool->refcount--;
7837 fdel->minipool = NULL;
7840 ftmp = last_barrier;
7842 else
7844 /* ftmp is first fix that we can't fit into this pool and
7845 there no natural barriers that we could use. Insert a
7846 new barrier in the code somewhere between the previous
7847 fix and this one, and arrange to jump around it. */
7848 HOST_WIDE_INT max_address;
7850 /* The last item on the list of fixes must be a barrier, so
7851 we can never run off the end of the list of fixes without
7852 last_barrier being set. */
7853 if (ftmp == NULL)
7854 abort ();
7856 max_address = minipool_vector_head->max_address;
7857 /* Check that there isn't another fix that is in range that
7858 we couldn't fit into this pool because the pool was
7859 already too large: we need to put the pool before such an
7860 instruction. */
7861 if (ftmp->address < max_address)
7862 max_address = ftmp->address;
7864 last_barrier = create_fix_barrier (last_added_fix, max_address);
7867 assign_minipool_offsets (last_barrier);
7869 while (ftmp)
7871 if (GET_CODE (ftmp->insn) != BARRIER
7872 && ((ftmp->minipool = add_minipool_backward_ref (ftmp))
7873 == NULL))
7874 break;
7876 ftmp = ftmp->next;
7879 /* Scan over the fixes we have identified for this pool, fixing them
7880 up and adding the constants to the pool itself. */
7881 for (this_fix = fix; this_fix && ftmp != this_fix;
7882 this_fix = this_fix->next)
7883 if (GET_CODE (this_fix->insn) != BARRIER)
7885 rtx addr
7886 = plus_constant (gen_rtx_LABEL_REF (VOIDmode,
7887 minipool_vector_label),
7888 this_fix->minipool->offset);
7889 *this_fix->loc = gen_rtx_MEM (this_fix->mode, addr);
7892 dump_minipool (last_barrier->insn);
7893 fix = ftmp;
7896 /* From now on we must synthesize any constants that we can't handle
7897 directly. This can happen if the RTL gets split during final
7898 instruction generation. */
7899 after_arm_reorg = 1;
7901 /* Free the minipool memory. */
7902 obstack_free (&minipool_obstack, minipool_startobj);
7905 /* Routines to output assembly language. */
7907 /* If the rtx is the correct value then return the string of the number.
7908 In this way we can ensure that valid double constants are generated even
7909 when cross compiling. */
7910 const char *
7911 fp_immediate_constant (rtx x)
7913 REAL_VALUE_TYPE r;
7914 int i;
7916 if (!fp_consts_inited)
7917 init_fp_table ();
7919 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
7920 for (i = 0; i < 8; i++)
7921 if (REAL_VALUES_EQUAL (r, values_fp[i]))
7922 return strings_fp[i];
7924 abort ();
7927 /* As for fp_immediate_constant, but value is passed directly, not in rtx. */
7928 static const char *
7929 fp_const_from_val (REAL_VALUE_TYPE *r)
7931 int i;
7933 if (!fp_consts_inited)
7934 init_fp_table ();
7936 for (i = 0; i < 8; i++)
7937 if (REAL_VALUES_EQUAL (*r, values_fp[i]))
7938 return strings_fp[i];
7940 abort ();
7943 /* Output the operands of a LDM/STM instruction to STREAM.
7944 MASK is the ARM register set mask of which only bits 0-15 are important.
7945 REG is the base register, either the frame pointer or the stack pointer,
7946 INSTR is the possibly suffixed load or store instruction. */
7947 static void
7948 print_multi_reg (FILE *stream, const char *instr, int reg, int mask)
7950 int i;
7951 int not_first = FALSE;
7953 fputc ('\t', stream);
7954 asm_fprintf (stream, instr, reg);
7955 fputs (", {", stream);
7957 for (i = 0; i <= LAST_ARM_REGNUM; i++)
7958 if (mask & (1 << i))
7960 if (not_first)
7961 fprintf (stream, ", ");
7963 asm_fprintf (stream, "%r", i);
7964 not_first = TRUE;
7967 fprintf (stream, "}\n");
7971 /* Output a FLDMX instruction to STREAM.
7972 BASE if the register containing the address.
7973 REG and COUNT specify the register range.
7974 Extra registers may be added to avoid hardware bugs. */
7976 static void
7977 arm_output_fldmx (FILE * stream, unsigned int base, int reg, int count)
7979 int i;
7981 /* Workaround ARM10 VFPr1 bug. */
7982 if (count == 2 && !arm_arch6)
7984 if (reg == 15)
7985 reg--;
7986 count++;
7989 fputc ('\t', stream);
7990 asm_fprintf (stream, "fldmfdx\t%r!, {", base);
7992 for (i = reg; i < reg + count; i++)
7994 if (i > reg)
7995 fputs (", ", stream);
7996 asm_fprintf (stream, "d%d", i);
7998 fputs ("}\n", stream);
8003 /* Output the assembly for a store multiple. */
8005 const char *
8006 vfp_output_fstmx (rtx * operands)
8008 char pattern[100];
8009 int p;
8010 int base;
8011 int i;
8013 strcpy (pattern, "fstmfdx\t%m0!, {%P1");
8014 p = strlen (pattern);
8016 if (GET_CODE (operands[1]) != REG)
8017 abort ();
8019 base = (REGNO (operands[1]) - FIRST_VFP_REGNUM) / 2;
8020 for (i = 1; i < XVECLEN (operands[2], 0); i++)
8022 p += sprintf (&pattern[p], ", d%d", base + i);
8024 strcpy (&pattern[p], "}");
8026 output_asm_insn (pattern, operands);
8027 return "";
8031 /* Emit RTL to save block of VFP register pairs to the stack. Returns the
8032 number of bytes pushed. */
8034 static int
8035 vfp_emit_fstmx (int base_reg, int count)
8037 rtx par;
8038 rtx dwarf;
8039 rtx tmp, reg;
8040 int i;
8042 /* Workaround ARM10 VFPr1 bug. Data corruption can occur when exactly two
8043 register pairs are stored by a store multiple insn. We avoid this
8044 by pushing an extra pair. */
8045 if (count == 2 && !arm_arch6)
8047 if (base_reg == LAST_VFP_REGNUM - 3)
8048 base_reg -= 2;
8049 count++;
8052 /* ??? The frame layout is implementation defined. We describe
8053 standard format 1 (equivalent to a FSTMD insn and unused pad word).
8054 We really need some way of representing the whole block so that the
8055 unwinder can figure it out at runtime. */
8056 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
8057 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (count + 1));
8059 reg = gen_rtx_REG (DFmode, base_reg);
8060 base_reg += 2;
8062 XVECEXP (par, 0, 0)
8063 = gen_rtx_SET (VOIDmode,
8064 gen_rtx_MEM (BLKmode,
8065 gen_rtx_PRE_DEC (BLKmode, stack_pointer_rtx)),
8066 gen_rtx_UNSPEC (BLKmode,
8067 gen_rtvec (1, reg),
8068 UNSPEC_PUSH_MULT));
8070 tmp = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8071 gen_rtx_PLUS (SImode, stack_pointer_rtx,
8072 GEN_INT (-(count * 8 + 4))));
8073 RTX_FRAME_RELATED_P (tmp) = 1;
8074 XVECEXP (dwarf, 0, 0) = tmp;
8076 tmp = gen_rtx_SET (VOIDmode,
8077 gen_rtx_MEM (DFmode, stack_pointer_rtx),
8078 reg);
8079 RTX_FRAME_RELATED_P (tmp) = 1;
8080 XVECEXP (dwarf, 0, 1) = tmp;
8082 for (i = 1; i < count; i++)
8084 reg = gen_rtx_REG (DFmode, base_reg);
8085 base_reg += 2;
8086 XVECEXP (par, 0, i) = gen_rtx_USE (VOIDmode, reg);
8088 tmp = gen_rtx_SET (VOIDmode,
8089 gen_rtx_MEM (DFmode,
8090 gen_rtx_PLUS (SImode,
8091 stack_pointer_rtx,
8092 GEN_INT (i * 8))),
8093 reg);
8094 RTX_FRAME_RELATED_P (tmp) = 1;
8095 XVECEXP (dwarf, 0, i + 1) = tmp;
8098 par = emit_insn (par);
8099 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
8100 REG_NOTES (par));
8101 RTX_FRAME_RELATED_P (par) = 1;
8103 return count * 8 + 4;
8107 /* Output a 'call' insn. */
8108 const char *
8109 output_call (rtx *operands)
8111 if (arm_arch5)
8112 abort (); /* Patterns should call blx <reg> directly. */
8114 /* Handle calls to lr using ip (which may be clobbered in subr anyway). */
8115 if (REGNO (operands[0]) == LR_REGNUM)
8117 operands[0] = gen_rtx_REG (SImode, IP_REGNUM);
8118 output_asm_insn ("mov%?\t%0, %|lr", operands);
8121 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
8123 if (TARGET_INTERWORK || arm_arch4t)
8124 output_asm_insn ("bx%?\t%0", operands);
8125 else
8126 output_asm_insn ("mov%?\t%|pc, %0", operands);
8128 return "";
8131 /* Output a 'call' insn that is a reference in memory. */
8132 const char *
8133 output_call_mem (rtx *operands)
8135 if (TARGET_INTERWORK && !arm_arch5)
8137 output_asm_insn ("ldr%?\t%|ip, %0", operands);
8138 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
8139 output_asm_insn ("bx%?\t%|ip", operands);
8141 else if (regno_use_in (LR_REGNUM, operands[0]))
8143 /* LR is used in the memory address. We load the address in the
8144 first instruction. It's safe to use IP as the target of the
8145 load since the call will kill it anyway. */
8146 output_asm_insn ("ldr%?\t%|ip, %0", operands);
8147 if (arm_arch5)
8148 output_asm_insn ("blx%?%|ip", operands);
8149 else
8151 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
8152 if (arm_arch4t)
8153 output_asm_insn ("bx%?\t%|ip", operands);
8154 else
8155 output_asm_insn ("mov%?\t%|pc, %|ip", operands);
8158 else
8160 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
8161 output_asm_insn ("ldr%?\t%|pc, %0", operands);
8164 return "";
8168 /* Output a move from arm registers to an fpa registers.
8169 OPERANDS[0] is an fpa register.
8170 OPERANDS[1] is the first registers of an arm register pair. */
8171 const char *
8172 output_mov_long_double_fpa_from_arm (rtx *operands)
8174 int arm_reg0 = REGNO (operands[1]);
8175 rtx ops[3];
8177 if (arm_reg0 == IP_REGNUM)
8178 abort ();
8180 ops[0] = gen_rtx_REG (SImode, arm_reg0);
8181 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
8182 ops[2] = gen_rtx_REG (SImode, 2 + arm_reg0);
8184 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1, %2}", ops);
8185 output_asm_insn ("ldf%?e\t%0, [%|sp], #12", operands);
8187 return "";
8190 /* Output a move from an fpa register to arm registers.
8191 OPERANDS[0] is the first registers of an arm register pair.
8192 OPERANDS[1] is an fpa register. */
8193 const char *
8194 output_mov_long_double_arm_from_fpa (rtx *operands)
8196 int arm_reg0 = REGNO (operands[0]);
8197 rtx ops[3];
8199 if (arm_reg0 == IP_REGNUM)
8200 abort ();
8202 ops[0] = gen_rtx_REG (SImode, arm_reg0);
8203 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
8204 ops[2] = gen_rtx_REG (SImode, 2 + arm_reg0);
8206 output_asm_insn ("stf%?e\t%1, [%|sp, #-12]!", operands);
8207 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1, %2}", ops);
8208 return "";
8211 /* Output a move from arm registers to arm registers of a long double
8212 OPERANDS[0] is the destination.
8213 OPERANDS[1] is the source. */
8214 const char *
8215 output_mov_long_double_arm_from_arm (rtx *operands)
8217 /* We have to be careful here because the two might overlap. */
8218 int dest_start = REGNO (operands[0]);
8219 int src_start = REGNO (operands[1]);
8220 rtx ops[2];
8221 int i;
8223 if (dest_start < src_start)
8225 for (i = 0; i < 3; i++)
8227 ops[0] = gen_rtx_REG (SImode, dest_start + i);
8228 ops[1] = gen_rtx_REG (SImode, src_start + i);
8229 output_asm_insn ("mov%?\t%0, %1", ops);
8232 else
8234 for (i = 2; i >= 0; i--)
8236 ops[0] = gen_rtx_REG (SImode, dest_start + i);
8237 ops[1] = gen_rtx_REG (SImode, src_start + i);
8238 output_asm_insn ("mov%?\t%0, %1", ops);
8242 return "";
8246 /* Output a move from arm registers to an fpa registers.
8247 OPERANDS[0] is an fpa register.
8248 OPERANDS[1] is the first registers of an arm register pair. */
8249 const char *
8250 output_mov_double_fpa_from_arm (rtx *operands)
8252 int arm_reg0 = REGNO (operands[1]);
8253 rtx ops[2];
8255 if (arm_reg0 == IP_REGNUM)
8256 abort ();
8258 ops[0] = gen_rtx_REG (SImode, arm_reg0);
8259 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
8260 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1}", ops);
8261 output_asm_insn ("ldf%?d\t%0, [%|sp], #8", operands);
8262 return "";
8265 /* Output a move from an fpa register to arm registers.
8266 OPERANDS[0] is the first registers of an arm register pair.
8267 OPERANDS[1] is an fpa register. */
8268 const char *
8269 output_mov_double_arm_from_fpa (rtx *operands)
8271 int arm_reg0 = REGNO (operands[0]);
8272 rtx ops[2];
8274 if (arm_reg0 == IP_REGNUM)
8275 abort ();
8277 ops[0] = gen_rtx_REG (SImode, arm_reg0);
8278 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
8279 output_asm_insn ("stf%?d\t%1, [%|sp, #-8]!", operands);
8280 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1}", ops);
8281 return "";
8284 /* Output a move between double words.
8285 It must be REG<-REG, REG<-CONST_DOUBLE, REG<-CONST_INT, REG<-MEM
8286 or MEM<-REG and all MEMs must be offsettable addresses. */
8287 const char *
8288 output_move_double (rtx *operands)
8290 enum rtx_code code0 = GET_CODE (operands[0]);
8291 enum rtx_code code1 = GET_CODE (operands[1]);
8292 rtx otherops[3];
8294 if (code0 == REG)
8296 int reg0 = REGNO (operands[0]);
8298 otherops[0] = gen_rtx_REG (SImode, 1 + reg0);
8300 if (code1 == REG)
8302 int reg1 = REGNO (operands[1]);
8303 if (reg1 == IP_REGNUM)
8304 abort ();
8306 /* Ensure the second source is not overwritten. */
8307 if (reg1 == reg0 + (WORDS_BIG_ENDIAN ? -1 : 1))
8308 output_asm_insn ("mov%?\t%Q0, %Q1\n\tmov%?\t%R0, %R1", operands);
8309 else
8310 output_asm_insn ("mov%?\t%R0, %R1\n\tmov%?\t%Q0, %Q1", operands);
8312 else if (code1 == CONST_VECTOR)
8314 HOST_WIDE_INT hint = 0;
8316 switch (GET_MODE (operands[1]))
8318 case V2SImode:
8319 otherops[1] = GEN_INT (INTVAL (CONST_VECTOR_ELT (operands[1], 1)));
8320 operands[1] = GEN_INT (INTVAL (CONST_VECTOR_ELT (operands[1], 0)));
8321 break;
8323 case V4HImode:
8324 if (BYTES_BIG_ENDIAN)
8326 hint = INTVAL (CONST_VECTOR_ELT (operands[1], 2));
8327 hint <<= 16;
8328 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 3));
8330 else
8332 hint = INTVAL (CONST_VECTOR_ELT (operands[1], 3));
8333 hint <<= 16;
8334 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 2));
8337 otherops[1] = GEN_INT (hint);
8338 hint = 0;
8340 if (BYTES_BIG_ENDIAN)
8342 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 0));
8343 hint <<= 16;
8344 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 1));
8346 else
8348 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 1));
8349 hint <<= 16;
8350 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 0));
8353 operands[1] = GEN_INT (hint);
8354 break;
8356 case V8QImode:
8357 if (BYTES_BIG_ENDIAN)
8359 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 4));
8360 hint <<= 8;
8361 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 5));
8362 hint <<= 8;
8363 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 6));
8364 hint <<= 8;
8365 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 7));
8367 else
8369 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 7));
8370 hint <<= 8;
8371 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 6));
8372 hint <<= 8;
8373 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 5));
8374 hint <<= 8;
8375 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 4));
8378 otherops[1] = GEN_INT (hint);
8379 hint = 0;
8381 if (BYTES_BIG_ENDIAN)
8383 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 0));
8384 hint <<= 8;
8385 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 1));
8386 hint <<= 8;
8387 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 2));
8388 hint <<= 8;
8389 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 3));
8391 else
8393 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 3));
8394 hint <<= 8;
8395 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 2));
8396 hint <<= 8;
8397 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 1));
8398 hint <<= 8;
8399 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 0));
8402 operands[1] = GEN_INT (hint);
8403 break;
8405 default:
8406 abort ();
8408 output_mov_immediate (operands);
8409 output_mov_immediate (otherops);
8411 else if (code1 == CONST_DOUBLE)
8413 if (GET_MODE (operands[1]) == DFmode)
8415 REAL_VALUE_TYPE r;
8416 long l[2];
8418 REAL_VALUE_FROM_CONST_DOUBLE (r, operands[1]);
8419 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
8420 otherops[1] = GEN_INT (l[1]);
8421 operands[1] = GEN_INT (l[0]);
8423 else if (GET_MODE (operands[1]) != VOIDmode)
8424 abort ();
8425 else if (WORDS_BIG_ENDIAN)
8427 otherops[1] = GEN_INT (CONST_DOUBLE_LOW (operands[1]));
8428 operands[1] = GEN_INT (CONST_DOUBLE_HIGH (operands[1]));
8430 else
8432 otherops[1] = GEN_INT (CONST_DOUBLE_HIGH (operands[1]));
8433 operands[1] = GEN_INT (CONST_DOUBLE_LOW (operands[1]));
8436 output_mov_immediate (operands);
8437 output_mov_immediate (otherops);
8439 else if (code1 == CONST_INT)
8441 #if HOST_BITS_PER_WIDE_INT > 32
8442 /* If HOST_WIDE_INT is more than 32 bits, the intval tells us
8443 what the upper word is. */
8444 if (WORDS_BIG_ENDIAN)
8446 otherops[1] = GEN_INT (ARM_SIGN_EXTEND (INTVAL (operands[1])));
8447 operands[1] = GEN_INT (INTVAL (operands[1]) >> 32);
8449 else
8451 otherops[1] = GEN_INT (INTVAL (operands[1]) >> 32);
8452 operands[1] = GEN_INT (ARM_SIGN_EXTEND (INTVAL (operands[1])));
8454 #else
8455 /* Sign extend the intval into the high-order word. */
8456 if (WORDS_BIG_ENDIAN)
8458 otherops[1] = operands[1];
8459 operands[1] = (INTVAL (operands[1]) < 0
8460 ? constm1_rtx : const0_rtx);
8462 else
8463 otherops[1] = INTVAL (operands[1]) < 0 ? constm1_rtx : const0_rtx;
8464 #endif
8465 output_mov_immediate (otherops);
8466 output_mov_immediate (operands);
8468 else if (code1 == MEM)
8470 switch (GET_CODE (XEXP (operands[1], 0)))
8472 case REG:
8473 output_asm_insn ("ldm%?ia\t%m1, %M0", operands);
8474 break;
8476 case PRE_INC:
8477 if (!TARGET_LDRD)
8478 abort (); /* Should never happen now. */
8479 output_asm_insn ("ldr%?d\t%0, [%m1, #8]!", operands);
8480 break;
8482 case PRE_DEC:
8483 output_asm_insn ("ldm%?db\t%m1!, %M0", operands);
8484 break;
8486 case POST_INC:
8487 output_asm_insn ("ldm%?ia\t%m1!, %M0", operands);
8488 break;
8490 case POST_DEC:
8491 if (!TARGET_LDRD)
8492 abort (); /* Should never happen now. */
8493 output_asm_insn ("ldr%?d\t%0, [%m1], #-8", operands);
8494 break;
8496 case PRE_MODIFY:
8497 case POST_MODIFY:
8498 otherops[0] = operands[0];
8499 otherops[1] = XEXP (XEXP (XEXP (operands[1], 0), 1), 0);
8500 otherops[2] = XEXP (XEXP (XEXP (operands[1], 0), 1), 1);
8502 if (GET_CODE (XEXP (operands[1], 0)) == PRE_MODIFY)
8504 if (reg_overlap_mentioned_p (otherops[0], otherops[2]))
8506 /* Registers overlap so split out the increment. */
8507 output_asm_insn ("add%?\t%1, %1, %2", otherops);
8508 output_asm_insn ("ldr%?d\t%0, [%1] @split", otherops);
8510 else
8511 output_asm_insn ("ldr%?d\t%0, [%1, %2]!", otherops);
8513 else
8515 /* We only allow constant increments, so this is safe. */
8516 output_asm_insn ("ldr%?d\t%0, [%1], %2", otherops);
8518 break;
8520 case LABEL_REF:
8521 case CONST:
8522 output_asm_insn ("adr%?\t%0, %1", operands);
8523 output_asm_insn ("ldm%?ia\t%0, %M0", operands);
8524 break;
8526 default:
8527 if (arm_add_operand (XEXP (XEXP (operands[1], 0), 1),
8528 GET_MODE (XEXP (XEXP (operands[1], 0), 1))))
8530 otherops[0] = operands[0];
8531 otherops[1] = XEXP (XEXP (operands[1], 0), 0);
8532 otherops[2] = XEXP (XEXP (operands[1], 0), 1);
8534 if (GET_CODE (XEXP (operands[1], 0)) == PLUS)
8536 if (GET_CODE (otherops[2]) == CONST_INT)
8538 switch ((int) INTVAL (otherops[2]))
8540 case -8:
8541 output_asm_insn ("ldm%?db\t%1, %M0", otherops);
8542 return "";
8543 case -4:
8544 output_asm_insn ("ldm%?da\t%1, %M0", otherops);
8545 return "";
8546 case 4:
8547 output_asm_insn ("ldm%?ib\t%1, %M0", otherops);
8548 return "";
8551 if (TARGET_LDRD
8552 && (GET_CODE (otherops[2]) == REG
8553 || (GET_CODE (otherops[2]) == CONST_INT
8554 && INTVAL (otherops[2]) > -256
8555 && INTVAL (otherops[2]) < 256)))
8557 if (reg_overlap_mentioned_p (otherops[0],
8558 otherops[2]))
8560 /* Swap base and index registers over to
8561 avoid a conflict. */
8562 otherops[1] = XEXP (XEXP (operands[1], 0), 1);
8563 otherops[2] = XEXP (XEXP (operands[1], 0), 0);
8566 /* If both registers conflict, it will usually
8567 have been fixed by a splitter. */
8568 if (reg_overlap_mentioned_p (otherops[0],
8569 otherops[2]))
8571 output_asm_insn ("add%?\t%1, %1, %2", otherops);
8572 output_asm_insn ("ldr%?d\t%0, [%1]",
8573 otherops);
8574 return "";
8576 else
8578 output_asm_insn ("ldr%?d\t%0, [%1, %2]",
8579 otherops);
8580 return "";
8583 if (GET_CODE (otherops[2]) == CONST_INT)
8585 if (!(const_ok_for_arm (INTVAL (otherops[2]))))
8586 output_asm_insn ("sub%?\t%0, %1, #%n2", otherops);
8587 else
8588 output_asm_insn ("add%?\t%0, %1, %2", otherops);
8590 else
8591 output_asm_insn ("add%?\t%0, %1, %2", otherops);
8593 else
8594 output_asm_insn ("sub%?\t%0, %1, %2", otherops);
8596 return "ldm%?ia\t%0, %M0";
8598 else
8600 otherops[1] = adjust_address (operands[1], SImode, 4);
8601 /* Take care of overlapping base/data reg. */
8602 if (reg_mentioned_p (operands[0], operands[1]))
8604 output_asm_insn ("ldr%?\t%0, %1", otherops);
8605 output_asm_insn ("ldr%?\t%0, %1", operands);
8607 else
8609 output_asm_insn ("ldr%?\t%0, %1", operands);
8610 output_asm_insn ("ldr%?\t%0, %1", otherops);
8615 else
8616 abort (); /* Constraints should prevent this. */
8618 else if (code0 == MEM && code1 == REG)
8620 if (REGNO (operands[1]) == IP_REGNUM)
8621 abort ();
8623 switch (GET_CODE (XEXP (operands[0], 0)))
8625 case REG:
8626 output_asm_insn ("stm%?ia\t%m0, %M1", operands);
8627 break;
8629 case PRE_INC:
8630 if (!TARGET_LDRD)
8631 abort (); /* Should never happen now. */
8632 output_asm_insn ("str%?d\t%1, [%m0, #8]!", operands);
8633 break;
8635 case PRE_DEC:
8636 output_asm_insn ("stm%?db\t%m0!, %M1", operands);
8637 break;
8639 case POST_INC:
8640 output_asm_insn ("stm%?ia\t%m0!, %M1", operands);
8641 break;
8643 case POST_DEC:
8644 if (!TARGET_LDRD)
8645 abort (); /* Should never happen now. */
8646 output_asm_insn ("str%?d\t%1, [%m0], #-8", operands);
8647 break;
8649 case PRE_MODIFY:
8650 case POST_MODIFY:
8651 otherops[0] = operands[1];
8652 otherops[1] = XEXP (XEXP (XEXP (operands[0], 0), 1), 0);
8653 otherops[2] = XEXP (XEXP (XEXP (operands[0], 0), 1), 1);
8655 if (GET_CODE (XEXP (operands[0], 0)) == PRE_MODIFY)
8656 output_asm_insn ("str%?d\t%0, [%1, %2]!", otherops);
8657 else
8658 output_asm_insn ("str%?d\t%0, [%1], %2", otherops);
8659 break;
8661 case PLUS:
8662 otherops[2] = XEXP (XEXP (operands[0], 0), 1);
8663 if (GET_CODE (otherops[2]) == CONST_INT)
8665 switch ((int) INTVAL (XEXP (XEXP (operands[0], 0), 1)))
8667 case -8:
8668 output_asm_insn ("stm%?db\t%m0, %M1", operands);
8669 return "";
8671 case -4:
8672 output_asm_insn ("stm%?da\t%m0, %M1", operands);
8673 return "";
8675 case 4:
8676 output_asm_insn ("stm%?ib\t%m0, %M1", operands);
8677 return "";
8680 if (TARGET_LDRD
8681 && (GET_CODE (otherops[2]) == REG
8682 || (GET_CODE (otherops[2]) == CONST_INT
8683 && INTVAL (otherops[2]) > -256
8684 && INTVAL (otherops[2]) < 256)))
8686 otherops[0] = operands[1];
8687 otherops[1] = XEXP (XEXP (operands[0], 0), 0);
8688 output_asm_insn ("str%?d\t%0, [%1, %2]", otherops);
8689 return "";
8691 /* Fall through */
8693 default:
8694 otherops[0] = adjust_address (operands[0], SImode, 4);
8695 otherops[1] = gen_rtx_REG (SImode, 1 + REGNO (operands[1]));
8696 output_asm_insn ("str%?\t%1, %0", operands);
8697 output_asm_insn ("str%?\t%1, %0", otherops);
8700 else
8701 /* Constraints should prevent this. */
8702 abort ();
8704 return "";
8708 /* Output an arbitrary MOV reg, #n.
8709 OPERANDS[0] is a register. OPERANDS[1] is a const_int. */
8710 const char *
8711 output_mov_immediate (rtx *operands)
8713 HOST_WIDE_INT n = INTVAL (operands[1]);
8715 /* Try to use one MOV. */
8716 if (const_ok_for_arm (n))
8717 output_asm_insn ("mov%?\t%0, %1", operands);
8719 /* Try to use one MVN. */
8720 else if (const_ok_for_arm (~n))
8722 operands[1] = GEN_INT (~n);
8723 output_asm_insn ("mvn%?\t%0, %1", operands);
8725 else
8727 int n_ones = 0;
8728 int i;
8730 /* If all else fails, make it out of ORRs or BICs as appropriate. */
8731 for (i = 0; i < 32; i++)
8732 if (n & 1 << i)
8733 n_ones++;
8735 if (n_ones > 16) /* Shorter to use MVN with BIC in this case. */
8736 output_multi_immediate (operands, "mvn%?\t%0, %1", "bic%?\t%0, %0, %1", 1, ~ n);
8737 else
8738 output_multi_immediate (operands, "mov%?\t%0, %1", "orr%?\t%0, %0, %1", 1, n);
8741 return "";
8744 /* Output an ADD r, s, #n where n may be too big for one instruction.
8745 If adding zero to one register, output nothing. */
8746 const char *
8747 output_add_immediate (rtx *operands)
8749 HOST_WIDE_INT n = INTVAL (operands[2]);
8751 if (n != 0 || REGNO (operands[0]) != REGNO (operands[1]))
8753 if (n < 0)
8754 output_multi_immediate (operands,
8755 "sub%?\t%0, %1, %2", "sub%?\t%0, %0, %2", 2,
8756 -n);
8757 else
8758 output_multi_immediate (operands,
8759 "add%?\t%0, %1, %2", "add%?\t%0, %0, %2", 2,
8763 return "";
8766 /* Output a multiple immediate operation.
8767 OPERANDS is the vector of operands referred to in the output patterns.
8768 INSTR1 is the output pattern to use for the first constant.
8769 INSTR2 is the output pattern to use for subsequent constants.
8770 IMMED_OP is the index of the constant slot in OPERANDS.
8771 N is the constant value. */
8772 static const char *
8773 output_multi_immediate (rtx *operands, const char *instr1, const char *instr2,
8774 int immed_op, HOST_WIDE_INT n)
8776 #if HOST_BITS_PER_WIDE_INT > 32
8777 n &= 0xffffffff;
8778 #endif
8780 if (n == 0)
8782 /* Quick and easy output. */
8783 operands[immed_op] = const0_rtx;
8784 output_asm_insn (instr1, operands);
8786 else
8788 int i;
8789 const char * instr = instr1;
8791 /* Note that n is never zero here (which would give no output). */
8792 for (i = 0; i < 32; i += 2)
8794 if (n & (3 << i))
8796 operands[immed_op] = GEN_INT (n & (255 << i));
8797 output_asm_insn (instr, operands);
8798 instr = instr2;
8799 i += 6;
8804 return "";
8807 /* Return the appropriate ARM instruction for the operation code.
8808 The returned result should not be overwritten. OP is the rtx of the
8809 operation. SHIFT_FIRST_ARG is TRUE if the first argument of the operator
8810 was shifted. */
8811 const char *
8812 arithmetic_instr (rtx op, int shift_first_arg)
8814 switch (GET_CODE (op))
8816 case PLUS:
8817 return "add";
8819 case MINUS:
8820 return shift_first_arg ? "rsb" : "sub";
8822 case IOR:
8823 return "orr";
8825 case XOR:
8826 return "eor";
8828 case AND:
8829 return "and";
8831 default:
8832 abort ();
8836 /* Ensure valid constant shifts and return the appropriate shift mnemonic
8837 for the operation code. The returned result should not be overwritten.
8838 OP is the rtx code of the shift.
8839 On exit, *AMOUNTP will be -1 if the shift is by a register, or a constant
8840 shift. */
8841 static const char *
8842 shift_op (rtx op, HOST_WIDE_INT *amountp)
8844 const char * mnem;
8845 enum rtx_code code = GET_CODE (op);
8847 if (GET_CODE (XEXP (op, 1)) == REG || GET_CODE (XEXP (op, 1)) == SUBREG)
8848 *amountp = -1;
8849 else if (GET_CODE (XEXP (op, 1)) == CONST_INT)
8850 *amountp = INTVAL (XEXP (op, 1));
8851 else
8852 abort ();
8854 switch (code)
8856 case ASHIFT:
8857 mnem = "asl";
8858 break;
8860 case ASHIFTRT:
8861 mnem = "asr";
8862 break;
8864 case LSHIFTRT:
8865 mnem = "lsr";
8866 break;
8868 case ROTATERT:
8869 mnem = "ror";
8870 break;
8872 case MULT:
8873 /* We never have to worry about the amount being other than a
8874 power of 2, since this case can never be reloaded from a reg. */
8875 if (*amountp != -1)
8876 *amountp = int_log2 (*amountp);
8877 else
8878 abort ();
8879 return "asl";
8881 default:
8882 abort ();
8885 if (*amountp != -1)
8887 /* This is not 100% correct, but follows from the desire to merge
8888 multiplication by a power of 2 with the recognizer for a
8889 shift. >=32 is not a valid shift for "asl", so we must try and
8890 output a shift that produces the correct arithmetical result.
8891 Using lsr #32 is identical except for the fact that the carry bit
8892 is not set correctly if we set the flags; but we never use the
8893 carry bit from such an operation, so we can ignore that. */
8894 if (code == ROTATERT)
8895 /* Rotate is just modulo 32. */
8896 *amountp &= 31;
8897 else if (*amountp != (*amountp & 31))
8899 if (code == ASHIFT)
8900 mnem = "lsr";
8901 *amountp = 32;
8904 /* Shifts of 0 are no-ops. */
8905 if (*amountp == 0)
8906 return NULL;
8909 return mnem;
8912 /* Obtain the shift from the POWER of two. */
8914 static HOST_WIDE_INT
8915 int_log2 (HOST_WIDE_INT power)
8917 HOST_WIDE_INT shift = 0;
8919 while ((((HOST_WIDE_INT) 1 << shift) & power) == 0)
8921 if (shift > 31)
8922 abort ();
8923 shift++;
8926 return shift;
8929 /* Output a .ascii pseudo-op, keeping track of lengths. This is because
8930 /bin/as is horribly restrictive. */
8931 #define MAX_ASCII_LEN 51
8933 void
8934 output_ascii_pseudo_op (FILE *stream, const unsigned char *p, int len)
8936 int i;
8937 int len_so_far = 0;
8939 fputs ("\t.ascii\t\"", stream);
8941 for (i = 0; i < len; i++)
8943 int c = p[i];
8945 if (len_so_far >= MAX_ASCII_LEN)
8947 fputs ("\"\n\t.ascii\t\"", stream);
8948 len_so_far = 0;
8951 switch (c)
8953 case TARGET_TAB:
8954 fputs ("\\t", stream);
8955 len_so_far += 2;
8956 break;
8958 case TARGET_FF:
8959 fputs ("\\f", stream);
8960 len_so_far += 2;
8961 break;
8963 case TARGET_BS:
8964 fputs ("\\b", stream);
8965 len_so_far += 2;
8966 break;
8968 case TARGET_CR:
8969 fputs ("\\r", stream);
8970 len_so_far += 2;
8971 break;
8973 case TARGET_NEWLINE:
8974 fputs ("\\n", stream);
8975 c = p [i + 1];
8976 if ((c >= ' ' && c <= '~')
8977 || c == TARGET_TAB)
8978 /* This is a good place for a line break. */
8979 len_so_far = MAX_ASCII_LEN;
8980 else
8981 len_so_far += 2;
8982 break;
8984 case '\"':
8985 case '\\':
8986 putc ('\\', stream);
8987 len_so_far++;
8988 /* Drop through. */
8990 default:
8991 if (c >= ' ' && c <= '~')
8993 putc (c, stream);
8994 len_so_far++;
8996 else
8998 fprintf (stream, "\\%03o", c);
8999 len_so_far += 4;
9001 break;
9005 fputs ("\"\n", stream);
9008 /* Compute the register save mask for registers 0 through 12
9009 inclusive. This code is used by arm_compute_save_reg_mask. */
9010 static unsigned long
9011 arm_compute_save_reg0_reg12_mask (void)
9013 unsigned long func_type = arm_current_func_type ();
9014 unsigned int save_reg_mask = 0;
9015 unsigned int reg;
9017 if (IS_INTERRUPT (func_type))
9019 unsigned int max_reg;
9020 /* Interrupt functions must not corrupt any registers,
9021 even call clobbered ones. If this is a leaf function
9022 we can just examine the registers used by the RTL, but
9023 otherwise we have to assume that whatever function is
9024 called might clobber anything, and so we have to save
9025 all the call-clobbered registers as well. */
9026 if (ARM_FUNC_TYPE (func_type) == ARM_FT_FIQ)
9027 /* FIQ handlers have registers r8 - r12 banked, so
9028 we only need to check r0 - r7, Normal ISRs only
9029 bank r14 and r15, so we must check up to r12.
9030 r13 is the stack pointer which is always preserved,
9031 so we do not need to consider it here. */
9032 max_reg = 7;
9033 else
9034 max_reg = 12;
9036 for (reg = 0; reg <= max_reg; reg++)
9037 if (regs_ever_live[reg]
9038 || (! current_function_is_leaf && call_used_regs [reg]))
9039 save_reg_mask |= (1 << reg);
9041 else
9043 /* In the normal case we only need to save those registers
9044 which are call saved and which are used by this function. */
9045 for (reg = 0; reg <= 10; reg++)
9046 if (regs_ever_live[reg] && ! call_used_regs [reg])
9047 save_reg_mask |= (1 << reg);
9049 /* Handle the frame pointer as a special case. */
9050 if (! TARGET_APCS_FRAME
9051 && ! frame_pointer_needed
9052 && regs_ever_live[HARD_FRAME_POINTER_REGNUM]
9053 && ! call_used_regs[HARD_FRAME_POINTER_REGNUM])
9054 save_reg_mask |= 1 << HARD_FRAME_POINTER_REGNUM;
9056 /* If we aren't loading the PIC register,
9057 don't stack it even though it may be live. */
9058 if (flag_pic
9059 && ! TARGET_SINGLE_PIC_BASE
9060 && regs_ever_live[PIC_OFFSET_TABLE_REGNUM])
9061 save_reg_mask |= 1 << PIC_OFFSET_TABLE_REGNUM;
9064 /* Save registers so the exception handler can modify them. */
9065 if (current_function_calls_eh_return)
9067 unsigned int i;
9069 for (i = 0; ; i++)
9071 reg = EH_RETURN_DATA_REGNO (i);
9072 if (reg == INVALID_REGNUM)
9073 break;
9074 save_reg_mask |= 1 << reg;
9078 return save_reg_mask;
9081 /* Compute a bit mask of which registers need to be
9082 saved on the stack for the current function. */
9084 static unsigned long
9085 arm_compute_save_reg_mask (void)
9087 unsigned int save_reg_mask = 0;
9088 unsigned long func_type = arm_current_func_type ();
9090 if (IS_NAKED (func_type))
9091 /* This should never really happen. */
9092 return 0;
9094 /* If we are creating a stack frame, then we must save the frame pointer,
9095 IP (which will hold the old stack pointer), LR and the PC. */
9096 if (frame_pointer_needed)
9097 save_reg_mask |=
9098 (1 << ARM_HARD_FRAME_POINTER_REGNUM)
9099 | (1 << IP_REGNUM)
9100 | (1 << LR_REGNUM)
9101 | (1 << PC_REGNUM);
9103 /* Volatile functions do not return, so there
9104 is no need to save any other registers. */
9105 if (IS_VOLATILE (func_type))
9106 return save_reg_mask;
9108 save_reg_mask |= arm_compute_save_reg0_reg12_mask ();
9110 /* Decide if we need to save the link register.
9111 Interrupt routines have their own banked link register,
9112 so they never need to save it.
9113 Otherwise if we do not use the link register we do not need to save
9114 it. If we are pushing other registers onto the stack however, we
9115 can save an instruction in the epilogue by pushing the link register
9116 now and then popping it back into the PC. This incurs extra memory
9117 accesses though, so we only do it when optimizing for size, and only
9118 if we know that we will not need a fancy return sequence. */
9119 if (regs_ever_live [LR_REGNUM]
9120 || (save_reg_mask
9121 && optimize_size
9122 && ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
9123 && !current_function_calls_eh_return))
9124 save_reg_mask |= 1 << LR_REGNUM;
9126 if (cfun->machine->lr_save_eliminated)
9127 save_reg_mask &= ~ (1 << LR_REGNUM);
9129 if (TARGET_REALLY_IWMMXT
9130 && ((bit_count (save_reg_mask)
9131 + ARM_NUM_INTS (current_function_pretend_args_size)) % 2) != 0)
9133 unsigned int reg;
9135 /* The total number of registers that are going to be pushed
9136 onto the stack is odd. We need to ensure that the stack
9137 is 64-bit aligned before we start to save iWMMXt registers,
9138 and also before we start to create locals. (A local variable
9139 might be a double or long long which we will load/store using
9140 an iWMMXt instruction). Therefore we need to push another
9141 ARM register, so that the stack will be 64-bit aligned. We
9142 try to avoid using the arg registers (r0 -r3) as they might be
9143 used to pass values in a tail call. */
9144 for (reg = 4; reg <= 12; reg++)
9145 if ((save_reg_mask & (1 << reg)) == 0)
9146 break;
9148 if (reg <= 12)
9149 save_reg_mask |= (1 << reg);
9150 else
9152 cfun->machine->sibcall_blocked = 1;
9153 save_reg_mask |= (1 << 3);
9157 return save_reg_mask;
9161 /* Return the number of bytes required to save VFP registers. */
9162 static int
9163 arm_get_vfp_saved_size (void)
9165 unsigned int regno;
9166 int count;
9167 int saved;
9169 saved = 0;
9170 /* Space for saved VFP registers. */
9171 if (TARGET_HARD_FLOAT && TARGET_VFP)
9173 count = 0;
9174 for (regno = FIRST_VFP_REGNUM;
9175 regno < LAST_VFP_REGNUM;
9176 regno += 2)
9178 if ((!regs_ever_live[regno] || call_used_regs[regno])
9179 && (!regs_ever_live[regno + 1] || call_used_regs[regno + 1]))
9181 if (count > 0)
9183 /* Workaround ARM10 VFPr1 bug. */
9184 if (count == 2 && !arm_arch6)
9185 count++;
9186 saved += count * 8 + 4;
9188 count = 0;
9190 else
9191 count++;
9193 if (count > 0)
9195 if (count == 2 && !arm_arch6)
9196 count++;
9197 saved += count * 8 + 4;
9200 return saved;
9204 /* Generate a function exit sequence. If REALLY_RETURN is false, then do
9205 everything bar the final return instruction. */
9206 const char *
9207 output_return_instruction (rtx operand, int really_return, int reverse)
9209 char conditional[10];
9210 char instr[100];
9211 int reg;
9212 unsigned long live_regs_mask;
9213 unsigned long func_type;
9214 arm_stack_offsets *offsets;
9216 func_type = arm_current_func_type ();
9218 if (IS_NAKED (func_type))
9219 return "";
9221 if (IS_VOLATILE (func_type) && TARGET_ABORT_NORETURN)
9223 /* If this function was declared non-returning, and we have
9224 found a tail call, then we have to trust that the called
9225 function won't return. */
9226 if (really_return)
9228 rtx ops[2];
9230 /* Otherwise, trap an attempted return by aborting. */
9231 ops[0] = operand;
9232 ops[1] = gen_rtx_SYMBOL_REF (Pmode, NEED_PLT_RELOC ? "abort(PLT)"
9233 : "abort");
9234 assemble_external_libcall (ops[1]);
9235 output_asm_insn (reverse ? "bl%D0\t%a1" : "bl%d0\t%a1", ops);
9238 return "";
9241 if (current_function_calls_alloca && !really_return)
9242 abort ();
9244 sprintf (conditional, "%%?%%%c0", reverse ? 'D' : 'd');
9246 return_used_this_function = 1;
9248 live_regs_mask = arm_compute_save_reg_mask ();
9250 if (live_regs_mask)
9252 const char * return_reg;
9254 /* If we do not have any special requirements for function exit
9255 (eg interworking, or ISR) then we can load the return address
9256 directly into the PC. Otherwise we must load it into LR. */
9257 if (really_return
9258 && ! TARGET_INTERWORK)
9259 return_reg = reg_names[PC_REGNUM];
9260 else
9261 return_reg = reg_names[LR_REGNUM];
9263 if ((live_regs_mask & (1 << IP_REGNUM)) == (1 << IP_REGNUM))
9265 /* There are three possible reasons for the IP register
9266 being saved. 1) a stack frame was created, in which case
9267 IP contains the old stack pointer, or 2) an ISR routine
9268 corrupted it, or 3) it was saved to align the stack on
9269 iWMMXt. In case 1, restore IP into SP, otherwise just
9270 restore IP. */
9271 if (frame_pointer_needed)
9273 live_regs_mask &= ~ (1 << IP_REGNUM);
9274 live_regs_mask |= (1 << SP_REGNUM);
9276 else
9278 if (! IS_INTERRUPT (func_type)
9279 && ! TARGET_REALLY_IWMMXT)
9280 abort ();
9284 /* On some ARM architectures it is faster to use LDR rather than
9285 LDM to load a single register. On other architectures, the
9286 cost is the same. In 26 bit mode, or for exception handlers,
9287 we have to use LDM to load the PC so that the CPSR is also
9288 restored. */
9289 for (reg = 0; reg <= LAST_ARM_REGNUM; reg++)
9291 if (live_regs_mask == (unsigned int)(1 << reg))
9292 break;
9294 if (reg <= LAST_ARM_REGNUM
9295 && (reg != LR_REGNUM
9296 || ! really_return
9297 || ! IS_INTERRUPT (func_type)))
9299 sprintf (instr, "ldr%s\t%%|%s, [%%|sp], #4", conditional,
9300 (reg == LR_REGNUM) ? return_reg : reg_names[reg]);
9302 else
9304 char *p;
9305 int first = 1;
9307 /* Generate the load multiple instruction to restore the
9308 registers. Note we can get here, even if
9309 frame_pointer_needed is true, but only if sp already
9310 points to the base of the saved core registers. */
9311 if (live_regs_mask & (1 << SP_REGNUM))
9313 unsigned HOST_WIDE_INT stack_adjust;
9315 offsets = arm_get_frame_offsets ();
9316 stack_adjust = offsets->outgoing_args - offsets->saved_regs;
9317 if (stack_adjust != 0 && stack_adjust != 4)
9318 abort ();
9320 if (stack_adjust && arm_arch5)
9321 sprintf (instr, "ldm%sib\t%%|sp, {", conditional);
9322 else
9324 /* If we can't use ldmib (SA110 bug), then try to pop r3
9325 instead. */
9326 if (stack_adjust)
9327 live_regs_mask |= 1 << 3;
9328 sprintf (instr, "ldm%sfd\t%%|sp, {", conditional);
9331 else
9332 sprintf (instr, "ldm%sfd\t%%|sp!, {", conditional);
9334 p = instr + strlen (instr);
9336 for (reg = 0; reg <= SP_REGNUM; reg++)
9337 if (live_regs_mask & (1 << reg))
9339 int l = strlen (reg_names[reg]);
9341 if (first)
9342 first = 0;
9343 else
9345 memcpy (p, ", ", 2);
9346 p += 2;
9349 memcpy (p, "%|", 2);
9350 memcpy (p + 2, reg_names[reg], l);
9351 p += l + 2;
9354 if (live_regs_mask & (1 << LR_REGNUM))
9356 sprintf (p, "%s%%|%s}", first ? "" : ", ", return_reg);
9357 /* If returning from an interrupt, restore the CPSR. */
9358 if (IS_INTERRUPT (func_type))
9359 strcat (p, "^");
9361 else
9362 strcpy (p, "}");
9365 output_asm_insn (instr, & operand);
9367 /* See if we need to generate an extra instruction to
9368 perform the actual function return. */
9369 if (really_return
9370 && func_type != ARM_FT_INTERWORKED
9371 && (live_regs_mask & (1 << LR_REGNUM)) != 0)
9373 /* The return has already been handled
9374 by loading the LR into the PC. */
9375 really_return = 0;
9379 if (really_return)
9381 switch ((int) ARM_FUNC_TYPE (func_type))
9383 case ARM_FT_ISR:
9384 case ARM_FT_FIQ:
9385 sprintf (instr, "sub%ss\t%%|pc, %%|lr, #4", conditional);
9386 break;
9388 case ARM_FT_INTERWORKED:
9389 sprintf (instr, "bx%s\t%%|lr", conditional);
9390 break;
9392 case ARM_FT_EXCEPTION:
9393 sprintf (instr, "mov%ss\t%%|pc, %%|lr", conditional);
9394 break;
9396 default:
9397 /* Use bx if it's available. */
9398 if (arm_arch5 || arm_arch4t)
9399 sprintf (instr, "bx%s\t%%|lr", conditional);
9400 else
9401 sprintf (instr, "mov%s\t%%|pc, %%|lr", conditional);
9402 break;
9405 output_asm_insn (instr, & operand);
9408 return "";
9411 /* Write the function name into the code section, directly preceding
9412 the function prologue.
9414 Code will be output similar to this:
9416 .ascii "arm_poke_function_name", 0
9417 .align
9419 .word 0xff000000 + (t1 - t0)
9420 arm_poke_function_name
9421 mov ip, sp
9422 stmfd sp!, {fp, ip, lr, pc}
9423 sub fp, ip, #4
9425 When performing a stack backtrace, code can inspect the value
9426 of 'pc' stored at 'fp' + 0. If the trace function then looks
9427 at location pc - 12 and the top 8 bits are set, then we know
9428 that there is a function name embedded immediately preceding this
9429 location and has length ((pc[-3]) & 0xff000000).
9431 We assume that pc is declared as a pointer to an unsigned long.
9433 It is of no benefit to output the function name if we are assembling
9434 a leaf function. These function types will not contain a stack
9435 backtrace structure, therefore it is not possible to determine the
9436 function name. */
9437 void
9438 arm_poke_function_name (FILE *stream, const char *name)
9440 unsigned long alignlength;
9441 unsigned long length;
9442 rtx x;
9444 length = strlen (name) + 1;
9445 alignlength = ROUND_UP_WORD (length);
9447 ASM_OUTPUT_ASCII (stream, name, length);
9448 ASM_OUTPUT_ALIGN (stream, 2);
9449 x = GEN_INT ((unsigned HOST_WIDE_INT) 0xff000000 + alignlength);
9450 assemble_aligned_integer (UNITS_PER_WORD, x);
9453 /* Place some comments into the assembler stream
9454 describing the current function. */
9455 static void
9456 arm_output_function_prologue (FILE *f, HOST_WIDE_INT frame_size)
9458 unsigned long func_type;
9460 if (!TARGET_ARM)
9462 thumb_output_function_prologue (f, frame_size);
9463 return;
9466 /* Sanity check. */
9467 if (arm_ccfsm_state || arm_target_insn)
9468 abort ();
9470 func_type = arm_current_func_type ();
9472 switch ((int) ARM_FUNC_TYPE (func_type))
9474 default:
9475 case ARM_FT_NORMAL:
9476 break;
9477 case ARM_FT_INTERWORKED:
9478 asm_fprintf (f, "\t%@ Function supports interworking.\n");
9479 break;
9480 case ARM_FT_ISR:
9481 asm_fprintf (f, "\t%@ Interrupt Service Routine.\n");
9482 break;
9483 case ARM_FT_FIQ:
9484 asm_fprintf (f, "\t%@ Fast Interrupt Service Routine.\n");
9485 break;
9486 case ARM_FT_EXCEPTION:
9487 asm_fprintf (f, "\t%@ ARM Exception Handler.\n");
9488 break;
9491 if (IS_NAKED (func_type))
9492 asm_fprintf (f, "\t%@ Naked Function: prologue and epilogue provided by programmer.\n");
9494 if (IS_VOLATILE (func_type))
9495 asm_fprintf (f, "\t%@ Volatile: function does not return.\n");
9497 if (IS_NESTED (func_type))
9498 asm_fprintf (f, "\t%@ Nested: function declared inside another function.\n");
9500 asm_fprintf (f, "\t%@ args = %d, pretend = %d, frame = %wd\n",
9501 current_function_args_size,
9502 current_function_pretend_args_size, frame_size);
9504 asm_fprintf (f, "\t%@ frame_needed = %d, uses_anonymous_args = %d\n",
9505 frame_pointer_needed,
9506 cfun->machine->uses_anonymous_args);
9508 if (cfun->machine->lr_save_eliminated)
9509 asm_fprintf (f, "\t%@ link register save eliminated.\n");
9511 if (current_function_calls_eh_return)
9512 asm_fprintf (f, "\t@ Calls __builtin_eh_return.\n");
9514 #ifdef AOF_ASSEMBLER
9515 if (flag_pic)
9516 asm_fprintf (f, "\tmov\t%r, %r\n", IP_REGNUM, PIC_OFFSET_TABLE_REGNUM);
9517 #endif
9519 return_used_this_function = 0;
9522 const char *
9523 arm_output_epilogue (rtx sibling)
9525 int reg;
9526 unsigned long saved_regs_mask;
9527 unsigned long func_type;
9528 /* Floats_offset is the offset from the "virtual" frame. In an APCS
9529 frame that is $fp + 4 for a non-variadic function. */
9530 int floats_offset = 0;
9531 rtx operands[3];
9532 FILE * f = asm_out_file;
9533 unsigned int lrm_count = 0;
9534 int really_return = (sibling == NULL);
9535 int start_reg;
9536 arm_stack_offsets *offsets;
9538 /* If we have already generated the return instruction
9539 then it is futile to generate anything else. */
9540 if (use_return_insn (FALSE, sibling) && return_used_this_function)
9541 return "";
9543 func_type = arm_current_func_type ();
9545 if (IS_NAKED (func_type))
9546 /* Naked functions don't have epilogues. */
9547 return "";
9549 if (IS_VOLATILE (func_type) && TARGET_ABORT_NORETURN)
9551 rtx op;
9553 /* A volatile function should never return. Call abort. */
9554 op = gen_rtx_SYMBOL_REF (Pmode, NEED_PLT_RELOC ? "abort(PLT)" : "abort");
9555 assemble_external_libcall (op);
9556 output_asm_insn ("bl\t%a0", &op);
9558 return "";
9561 if (current_function_calls_eh_return
9562 && ! really_return)
9563 /* If we are throwing an exception, then we really must
9564 be doing a return, so we can't tail-call. */
9565 abort ();
9567 offsets = arm_get_frame_offsets ();
9568 saved_regs_mask = arm_compute_save_reg_mask ();
9570 if (TARGET_IWMMXT)
9571 lrm_count = bit_count (saved_regs_mask);
9573 floats_offset = offsets->saved_args;
9574 /* Compute how far away the floats will be. */
9575 for (reg = 0; reg <= LAST_ARM_REGNUM; reg++)
9576 if (saved_regs_mask & (1 << reg))
9577 floats_offset += 4;
9579 if (frame_pointer_needed)
9581 /* This variable is for the Virtual Frame Pointer, not VFP regs. */
9582 int vfp_offset = offsets->frame;
9584 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
9586 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
9587 if (regs_ever_live[reg] && !call_used_regs[reg])
9589 floats_offset += 12;
9590 asm_fprintf (f, "\tldfe\t%r, [%r, #-%d]\n",
9591 reg, FP_REGNUM, floats_offset - vfp_offset);
9594 else
9596 start_reg = LAST_FPA_REGNUM;
9598 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
9600 if (regs_ever_live[reg] && !call_used_regs[reg])
9602 floats_offset += 12;
9604 /* We can't unstack more than four registers at once. */
9605 if (start_reg - reg == 3)
9607 asm_fprintf (f, "\tlfm\t%r, 4, [%r, #-%d]\n",
9608 reg, FP_REGNUM, floats_offset - vfp_offset);
9609 start_reg = reg - 1;
9612 else
9614 if (reg != start_reg)
9615 asm_fprintf (f, "\tlfm\t%r, %d, [%r, #-%d]\n",
9616 reg + 1, start_reg - reg,
9617 FP_REGNUM, floats_offset - vfp_offset);
9618 start_reg = reg - 1;
9622 /* Just in case the last register checked also needs unstacking. */
9623 if (reg != start_reg)
9624 asm_fprintf (f, "\tlfm\t%r, %d, [%r, #-%d]\n",
9625 reg + 1, start_reg - reg,
9626 FP_REGNUM, floats_offset - vfp_offset);
9629 if (TARGET_HARD_FLOAT && TARGET_VFP)
9631 int saved_size;
9633 /* The fldmx insn does not have base+offset addressing modes,
9634 so we use IP to hold the address. */
9635 saved_size = arm_get_vfp_saved_size ();
9637 if (saved_size > 0)
9639 floats_offset += saved_size;
9640 asm_fprintf (f, "\tsub\t%r, %r, #%d\n", IP_REGNUM,
9641 FP_REGNUM, floats_offset - vfp_offset);
9643 start_reg = FIRST_VFP_REGNUM;
9644 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
9646 if ((!regs_ever_live[reg] || call_used_regs[reg])
9647 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
9649 if (start_reg != reg)
9650 arm_output_fldmx (f, IP_REGNUM,
9651 (start_reg - FIRST_VFP_REGNUM) / 2,
9652 (reg - start_reg) / 2);
9653 start_reg = reg + 2;
9656 if (start_reg != reg)
9657 arm_output_fldmx (f, IP_REGNUM,
9658 (start_reg - FIRST_VFP_REGNUM) / 2,
9659 (reg - start_reg) / 2);
9662 if (TARGET_IWMMXT)
9664 /* The frame pointer is guaranteed to be non-double-word aligned.
9665 This is because it is set to (old_stack_pointer - 4) and the
9666 old_stack_pointer was double word aligned. Thus the offset to
9667 the iWMMXt registers to be loaded must also be non-double-word
9668 sized, so that the resultant address *is* double-word aligned.
9669 We can ignore floats_offset since that was already included in
9670 the live_regs_mask. */
9671 lrm_count += (lrm_count % 2 ? 2 : 1);
9673 for (reg = LAST_IWMMXT_REGNUM; reg >= FIRST_IWMMXT_REGNUM; reg--)
9674 if (regs_ever_live[reg] && !call_used_regs[reg])
9676 asm_fprintf (f, "\twldrd\t%r, [%r, #-%d]\n",
9677 reg, FP_REGNUM, lrm_count * 4);
9678 lrm_count += 2;
9682 /* saved_regs_mask should contain the IP, which at the time of stack
9683 frame generation actually contains the old stack pointer. So a
9684 quick way to unwind the stack is just pop the IP register directly
9685 into the stack pointer. */
9686 if ((saved_regs_mask & (1 << IP_REGNUM)) == 0)
9687 abort ();
9688 saved_regs_mask &= ~ (1 << IP_REGNUM);
9689 saved_regs_mask |= (1 << SP_REGNUM);
9691 /* There are two registers left in saved_regs_mask - LR and PC. We
9692 only need to restore the LR register (the return address), but to
9693 save time we can load it directly into the PC, unless we need a
9694 special function exit sequence, or we are not really returning. */
9695 if (really_return
9696 && ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
9697 && !current_function_calls_eh_return)
9698 /* Delete the LR from the register mask, so that the LR on
9699 the stack is loaded into the PC in the register mask. */
9700 saved_regs_mask &= ~ (1 << LR_REGNUM);
9701 else
9702 saved_regs_mask &= ~ (1 << PC_REGNUM);
9704 /* We must use SP as the base register, because SP is one of the
9705 registers being restored. If an interrupt or page fault
9706 happens in the ldm instruction, the SP might or might not
9707 have been restored. That would be bad, as then SP will no
9708 longer indicate the safe area of stack, and we can get stack
9709 corruption. Using SP as the base register means that it will
9710 be reset correctly to the original value, should an interrupt
9711 occur. If the stack pointer already points at the right
9712 place, then omit the subtraction. */
9713 if (offsets->outgoing_args != (1 + (int) bit_count (saved_regs_mask))
9714 || current_function_calls_alloca)
9715 asm_fprintf (f, "\tsub\t%r, %r, #%d\n", SP_REGNUM, FP_REGNUM,
9716 4 * bit_count (saved_regs_mask));
9717 print_multi_reg (f, "ldmfd\t%r", SP_REGNUM, saved_regs_mask);
9719 if (IS_INTERRUPT (func_type))
9720 /* Interrupt handlers will have pushed the
9721 IP onto the stack, so restore it now. */
9722 print_multi_reg (f, "ldmfd\t%r!", SP_REGNUM, 1 << IP_REGNUM);
9724 else
9726 /* Restore stack pointer if necessary. */
9727 if (offsets->outgoing_args != offsets->saved_regs)
9729 operands[0] = operands[1] = stack_pointer_rtx;
9730 operands[2] = GEN_INT (offsets->outgoing_args - offsets->saved_regs);
9731 output_add_immediate (operands);
9734 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
9736 for (reg = FIRST_FPA_REGNUM; reg <= LAST_FPA_REGNUM; reg++)
9737 if (regs_ever_live[reg] && !call_used_regs[reg])
9738 asm_fprintf (f, "\tldfe\t%r, [%r], #12\n",
9739 reg, SP_REGNUM);
9741 else
9743 start_reg = FIRST_FPA_REGNUM;
9745 for (reg = FIRST_FPA_REGNUM; reg <= LAST_FPA_REGNUM; reg++)
9747 if (regs_ever_live[reg] && !call_used_regs[reg])
9749 if (reg - start_reg == 3)
9751 asm_fprintf (f, "\tlfmfd\t%r, 4, [%r]!\n",
9752 start_reg, SP_REGNUM);
9753 start_reg = reg + 1;
9756 else
9758 if (reg != start_reg)
9759 asm_fprintf (f, "\tlfmfd\t%r, %d, [%r]!\n",
9760 start_reg, reg - start_reg,
9761 SP_REGNUM);
9763 start_reg = reg + 1;
9767 /* Just in case the last register checked also needs unstacking. */
9768 if (reg != start_reg)
9769 asm_fprintf (f, "\tlfmfd\t%r, %d, [%r]!\n",
9770 start_reg, reg - start_reg, SP_REGNUM);
9773 if (TARGET_HARD_FLOAT && TARGET_VFP)
9775 start_reg = FIRST_VFP_REGNUM;
9776 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
9778 if ((!regs_ever_live[reg] || call_used_regs[reg])
9779 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
9781 if (start_reg != reg)
9782 arm_output_fldmx (f, SP_REGNUM,
9783 (start_reg - FIRST_VFP_REGNUM) / 2,
9784 (reg - start_reg) / 2);
9785 start_reg = reg + 2;
9788 if (start_reg != reg)
9789 arm_output_fldmx (f, SP_REGNUM,
9790 (start_reg - FIRST_VFP_REGNUM) / 2,
9791 (reg - start_reg) / 2);
9793 if (TARGET_IWMMXT)
9794 for (reg = FIRST_IWMMXT_REGNUM; reg <= LAST_IWMMXT_REGNUM; reg++)
9795 if (regs_ever_live[reg] && !call_used_regs[reg])
9796 asm_fprintf (f, "\twldrd\t%r, [%r], #8\n", reg, SP_REGNUM);
9798 /* If we can, restore the LR into the PC. */
9799 if (ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
9800 && really_return
9801 && current_function_pretend_args_size == 0
9802 && saved_regs_mask & (1 << LR_REGNUM)
9803 && !current_function_calls_eh_return)
9805 saved_regs_mask &= ~ (1 << LR_REGNUM);
9806 saved_regs_mask |= (1 << PC_REGNUM);
9809 /* Load the registers off the stack. If we only have one register
9810 to load use the LDR instruction - it is faster. */
9811 if (saved_regs_mask == (1 << LR_REGNUM))
9813 asm_fprintf (f, "\tldr\t%r, [%r], #4\n", LR_REGNUM, SP_REGNUM);
9815 else if (saved_regs_mask)
9817 if (saved_regs_mask & (1 << SP_REGNUM))
9818 /* Note - write back to the stack register is not enabled
9819 (ie "ldmfd sp!..."). We know that the stack pointer is
9820 in the list of registers and if we add writeback the
9821 instruction becomes UNPREDICTABLE. */
9822 print_multi_reg (f, "ldmfd\t%r", SP_REGNUM, saved_regs_mask);
9823 else
9824 print_multi_reg (f, "ldmfd\t%r!", SP_REGNUM, saved_regs_mask);
9827 if (current_function_pretend_args_size)
9829 /* Unwind the pre-pushed regs. */
9830 operands[0] = operands[1] = stack_pointer_rtx;
9831 operands[2] = GEN_INT (current_function_pretend_args_size);
9832 output_add_immediate (operands);
9836 /* We may have already restored PC directly from the stack. */
9837 if (!really_return || saved_regs_mask & (1 << PC_REGNUM))
9838 return "";
9840 /* Stack adjustment for exception handler. */
9841 if (current_function_calls_eh_return)
9842 asm_fprintf (f, "\tadd\t%r, %r, %r\n", SP_REGNUM, SP_REGNUM,
9843 ARM_EH_STACKADJ_REGNUM);
9845 /* Generate the return instruction. */
9846 switch ((int) ARM_FUNC_TYPE (func_type))
9848 case ARM_FT_ISR:
9849 case ARM_FT_FIQ:
9850 asm_fprintf (f, "\tsubs\t%r, %r, #4\n", PC_REGNUM, LR_REGNUM);
9851 break;
9853 case ARM_FT_EXCEPTION:
9854 asm_fprintf (f, "\tmovs\t%r, %r\n", PC_REGNUM, LR_REGNUM);
9855 break;
9857 case ARM_FT_INTERWORKED:
9858 asm_fprintf (f, "\tbx\t%r\n", LR_REGNUM);
9859 break;
9861 default:
9862 if (arm_arch5 || arm_arch4t)
9863 asm_fprintf (f, "\tbx\t%r\n", LR_REGNUM);
9864 else
9865 asm_fprintf (f, "\tmov\t%r, %r\n", PC_REGNUM, LR_REGNUM);
9866 break;
9869 return "";
9872 static void
9873 arm_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
9874 HOST_WIDE_INT frame_size ATTRIBUTE_UNUSED)
9876 arm_stack_offsets *offsets;
9878 if (TARGET_THUMB)
9880 /* ??? Probably not safe to set this here, since it assumes that a
9881 function will be emitted as assembly immediately after we generate
9882 RTL for it. This does not happen for inline functions. */
9883 return_used_this_function = 0;
9885 else
9887 /* We need to take into account any stack-frame rounding. */
9888 offsets = arm_get_frame_offsets ();
9890 if (use_return_insn (FALSE, NULL)
9891 && return_used_this_function
9892 && offsets->saved_regs != offsets->outgoing_args
9893 && !frame_pointer_needed)
9894 abort ();
9896 /* Reset the ARM-specific per-function variables. */
9897 after_arm_reorg = 0;
9901 /* Generate and emit an insn that we will recognize as a push_multi.
9902 Unfortunately, since this insn does not reflect very well the actual
9903 semantics of the operation, we need to annotate the insn for the benefit
9904 of DWARF2 frame unwind information. */
9905 static rtx
9906 emit_multi_reg_push (int mask)
9908 int num_regs = 0;
9909 int num_dwarf_regs;
9910 int i, j;
9911 rtx par;
9912 rtx dwarf;
9913 int dwarf_par_index;
9914 rtx tmp, reg;
9916 for (i = 0; i <= LAST_ARM_REGNUM; i++)
9917 if (mask & (1 << i))
9918 num_regs++;
9920 if (num_regs == 0 || num_regs > 16)
9921 abort ();
9923 /* We don't record the PC in the dwarf frame information. */
9924 num_dwarf_regs = num_regs;
9925 if (mask & (1 << PC_REGNUM))
9926 num_dwarf_regs--;
9928 /* For the body of the insn we are going to generate an UNSPEC in
9929 parallel with several USEs. This allows the insn to be recognized
9930 by the push_multi pattern in the arm.md file. The insn looks
9931 something like this:
9933 (parallel [
9934 (set (mem:BLK (pre_dec:BLK (reg:SI sp)))
9935 (unspec:BLK [(reg:SI r4)] UNSPEC_PUSH_MULT))
9936 (use (reg:SI 11 fp))
9937 (use (reg:SI 12 ip))
9938 (use (reg:SI 14 lr))
9939 (use (reg:SI 15 pc))
9942 For the frame note however, we try to be more explicit and actually
9943 show each register being stored into the stack frame, plus a (single)
9944 decrement of the stack pointer. We do it this way in order to be
9945 friendly to the stack unwinding code, which only wants to see a single
9946 stack decrement per instruction. The RTL we generate for the note looks
9947 something like this:
9949 (sequence [
9950 (set (reg:SI sp) (plus:SI (reg:SI sp) (const_int -20)))
9951 (set (mem:SI (reg:SI sp)) (reg:SI r4))
9952 (set (mem:SI (plus:SI (reg:SI sp) (const_int 4))) (reg:SI fp))
9953 (set (mem:SI (plus:SI (reg:SI sp) (const_int 8))) (reg:SI ip))
9954 (set (mem:SI (plus:SI (reg:SI sp) (const_int 12))) (reg:SI lr))
9957 This sequence is used both by the code to support stack unwinding for
9958 exceptions handlers and the code to generate dwarf2 frame debugging. */
9960 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (num_regs));
9961 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (num_dwarf_regs + 1));
9962 dwarf_par_index = 1;
9964 for (i = 0; i <= LAST_ARM_REGNUM; i++)
9966 if (mask & (1 << i))
9968 reg = gen_rtx_REG (SImode, i);
9970 XVECEXP (par, 0, 0)
9971 = gen_rtx_SET (VOIDmode,
9972 gen_rtx_MEM (BLKmode,
9973 gen_rtx_PRE_DEC (BLKmode,
9974 stack_pointer_rtx)),
9975 gen_rtx_UNSPEC (BLKmode,
9976 gen_rtvec (1, reg),
9977 UNSPEC_PUSH_MULT));
9979 if (i != PC_REGNUM)
9981 tmp = gen_rtx_SET (VOIDmode,
9982 gen_rtx_MEM (SImode, stack_pointer_rtx),
9983 reg);
9984 RTX_FRAME_RELATED_P (tmp) = 1;
9985 XVECEXP (dwarf, 0, dwarf_par_index) = tmp;
9986 dwarf_par_index++;
9989 break;
9993 for (j = 1, i++; j < num_regs; i++)
9995 if (mask & (1 << i))
9997 reg = gen_rtx_REG (SImode, i);
9999 XVECEXP (par, 0, j) = gen_rtx_USE (VOIDmode, reg);
10001 if (i != PC_REGNUM)
10003 tmp = gen_rtx_SET (VOIDmode,
10004 gen_rtx_MEM (SImode,
10005 plus_constant (stack_pointer_rtx,
10006 4 * j)),
10007 reg);
10008 RTX_FRAME_RELATED_P (tmp) = 1;
10009 XVECEXP (dwarf, 0, dwarf_par_index++) = tmp;
10012 j++;
10016 par = emit_insn (par);
10018 tmp = gen_rtx_SET (SImode,
10019 stack_pointer_rtx,
10020 gen_rtx_PLUS (SImode,
10021 stack_pointer_rtx,
10022 GEN_INT (-4 * num_regs)));
10023 RTX_FRAME_RELATED_P (tmp) = 1;
10024 XVECEXP (dwarf, 0, 0) = tmp;
10026 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
10027 REG_NOTES (par));
10028 return par;
10031 static rtx
10032 emit_sfm (int base_reg, int count)
10034 rtx par;
10035 rtx dwarf;
10036 rtx tmp, reg;
10037 int i;
10039 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
10040 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (count + 1));
10042 reg = gen_rtx_REG (XFmode, base_reg++);
10044 XVECEXP (par, 0, 0)
10045 = gen_rtx_SET (VOIDmode,
10046 gen_rtx_MEM (BLKmode,
10047 gen_rtx_PRE_DEC (BLKmode, stack_pointer_rtx)),
10048 gen_rtx_UNSPEC (BLKmode,
10049 gen_rtvec (1, reg),
10050 UNSPEC_PUSH_MULT));
10051 tmp = gen_rtx_SET (VOIDmode,
10052 gen_rtx_MEM (XFmode, stack_pointer_rtx), reg);
10053 RTX_FRAME_RELATED_P (tmp) = 1;
10054 XVECEXP (dwarf, 0, 1) = tmp;
10056 for (i = 1; i < count; i++)
10058 reg = gen_rtx_REG (XFmode, base_reg++);
10059 XVECEXP (par, 0, i) = gen_rtx_USE (VOIDmode, reg);
10061 tmp = gen_rtx_SET (VOIDmode,
10062 gen_rtx_MEM (XFmode,
10063 plus_constant (stack_pointer_rtx,
10064 i * 12)),
10065 reg);
10066 RTX_FRAME_RELATED_P (tmp) = 1;
10067 XVECEXP (dwarf, 0, i + 1) = tmp;
10070 tmp = gen_rtx_SET (VOIDmode,
10071 stack_pointer_rtx,
10072 gen_rtx_PLUS (SImode,
10073 stack_pointer_rtx,
10074 GEN_INT (-12 * count)));
10075 RTX_FRAME_RELATED_P (tmp) = 1;
10076 XVECEXP (dwarf, 0, 0) = tmp;
10078 par = emit_insn (par);
10079 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
10080 REG_NOTES (par));
10081 return par;
10085 /* Return true if the current function needs to save/restore LR. */
10087 static bool
10088 thumb_force_lr_save (void)
10090 return !cfun->machine->lr_save_eliminated
10091 && (!leaf_function_p ()
10092 || thumb_far_jump_used_p ()
10093 || regs_ever_live [LR_REGNUM]);
10097 /* Compute the distance from register FROM to register TO.
10098 These can be the arg pointer (26), the soft frame pointer (25),
10099 the stack pointer (13) or the hard frame pointer (11).
10100 In thumb mode r7 is used as the soft frame pointer, if needed.
10101 Typical stack layout looks like this:
10103 old stack pointer -> | |
10104 ----
10105 | | \
10106 | | saved arguments for
10107 | | vararg functions
10108 | | /
10110 hard FP & arg pointer -> | | \
10111 | | stack
10112 | | frame
10113 | | /
10115 | | \
10116 | | call saved
10117 | | registers
10118 soft frame pointer -> | | /
10120 | | \
10121 | | local
10122 | | variables
10123 | | /
10125 | | \
10126 | | outgoing
10127 | | arguments
10128 current stack pointer -> | | /
10131 For a given function some or all of these stack components
10132 may not be needed, giving rise to the possibility of
10133 eliminating some of the registers.
10135 The values returned by this function must reflect the behavior
10136 of arm_expand_prologue() and arm_compute_save_reg_mask().
10138 The sign of the number returned reflects the direction of stack
10139 growth, so the values are positive for all eliminations except
10140 from the soft frame pointer to the hard frame pointer.
10142 SFP may point just inside the local variables block to ensure correct
10143 alignment. */
10146 /* Calculate stack offsets. These are used to calculate register elimination
10147 offsets and in prologue/epilogue code. */
10149 static arm_stack_offsets *
10150 arm_get_frame_offsets (void)
10152 struct arm_stack_offsets *offsets;
10153 unsigned long func_type;
10154 int leaf;
10155 int saved;
10156 HOST_WIDE_INT frame_size;
10158 offsets = &cfun->machine->stack_offsets;
10160 /* We need to know if we are a leaf function. Unfortunately, it
10161 is possible to be called after start_sequence has been called,
10162 which causes get_insns to return the insns for the sequence,
10163 not the function, which will cause leaf_function_p to return
10164 the incorrect result.
10166 to know about leaf functions once reload has completed, and the
10167 frame size cannot be changed after that time, so we can safely
10168 use the cached value. */
10170 if (reload_completed)
10171 return offsets;
10173 /* Initially this is the size of the local variables. It will translated
10174 into an offset once we have determined the size of preceding data. */
10175 frame_size = ROUND_UP_WORD (get_frame_size ());
10177 leaf = leaf_function_p ();
10179 /* Space for variadic functions. */
10180 offsets->saved_args = current_function_pretend_args_size;
10182 offsets->frame = offsets->saved_args + (frame_pointer_needed ? 4 : 0);
10184 if (TARGET_ARM)
10186 unsigned int regno;
10188 saved = bit_count (arm_compute_save_reg_mask ()) * 4;
10190 /* We know that SP will be doubleword aligned on entry, and we must
10191 preserve that condition at any subroutine call. We also require the
10192 soft frame pointer to be doubleword aligned. */
10194 if (TARGET_REALLY_IWMMXT)
10196 /* Check for the call-saved iWMMXt registers. */
10197 for (regno = FIRST_IWMMXT_REGNUM;
10198 regno <= LAST_IWMMXT_REGNUM;
10199 regno++)
10200 if (regs_ever_live [regno] && ! call_used_regs [regno])
10201 saved += 8;
10204 func_type = arm_current_func_type ();
10205 if (! IS_VOLATILE (func_type))
10207 /* Space for saved FPA registers. */
10208 for (regno = FIRST_FPA_REGNUM; regno <= LAST_FPA_REGNUM; regno++)
10209 if (regs_ever_live[regno] && ! call_used_regs[regno])
10210 saved += 12;
10212 /* Space for saved VFP registers. */
10213 if (TARGET_HARD_FLOAT && TARGET_VFP)
10214 saved += arm_get_vfp_saved_size ();
10217 else /* TARGET_THUMB */
10219 int reg;
10220 int count_regs;
10222 saved = 0;
10223 count_regs = 0;
10224 for (reg = 8; reg < 13; reg ++)
10225 if (THUMB_REG_PUSHED_P (reg))
10226 count_regs ++;
10227 if (count_regs)
10228 saved += 4 * count_regs;
10229 count_regs = 0;
10230 for (reg = 0; reg <= LAST_LO_REGNUM; reg ++)
10231 if (THUMB_REG_PUSHED_P (reg))
10232 count_regs ++;
10233 if (count_regs || thumb_force_lr_save ())
10234 saved += 4 * (count_regs + 1);
10235 if (TARGET_BACKTRACE)
10237 if ((count_regs & 0xFF) == 0 && (regs_ever_live[3] != 0))
10238 saved += 20;
10239 else
10240 saved += 16;
10244 /* Saved registers include the stack frame. */
10245 offsets->saved_regs = offsets->saved_args + saved;
10246 offsets->soft_frame = offsets->saved_regs;
10247 /* A leaf function does not need any stack alignment if it has nothing
10248 on the stack. */
10249 if (leaf && frame_size == 0)
10251 offsets->outgoing_args = offsets->soft_frame;
10252 return offsets;
10255 /* Ensure SFP has the correct alignment. */
10256 if (ARM_DOUBLEWORD_ALIGN
10257 && (offsets->soft_frame & 7))
10258 offsets->soft_frame += 4;
10260 offsets->outgoing_args = offsets->soft_frame + frame_size
10261 + current_function_outgoing_args_size;
10263 if (ARM_DOUBLEWORD_ALIGN)
10265 /* Ensure SP remains doubleword aligned. */
10266 if (offsets->outgoing_args & 7)
10267 offsets->outgoing_args += 4;
10268 if (offsets->outgoing_args & 7)
10269 abort ();
10272 return offsets;
10276 /* Calculate the relative offsets for the different stack pointers. Positive
10277 offsets are in the direction of stack growth. */
10279 HOST_WIDE_INT
10280 arm_compute_initial_elimination_offset (unsigned int from, unsigned int to)
10282 arm_stack_offsets *offsets;
10284 offsets = arm_get_frame_offsets ();
10286 /* OK, now we have enough information to compute the distances.
10287 There must be an entry in these switch tables for each pair
10288 of registers in ELIMINABLE_REGS, even if some of the entries
10289 seem to be redundant or useless. */
10290 switch (from)
10292 case ARG_POINTER_REGNUM:
10293 switch (to)
10295 case THUMB_HARD_FRAME_POINTER_REGNUM:
10296 return 0;
10298 case FRAME_POINTER_REGNUM:
10299 /* This is the reverse of the soft frame pointer
10300 to hard frame pointer elimination below. */
10301 return offsets->soft_frame - offsets->saved_args;
10303 case ARM_HARD_FRAME_POINTER_REGNUM:
10304 /* If there is no stack frame then the hard
10305 frame pointer and the arg pointer coincide. */
10306 if (offsets->frame == offsets->saved_regs)
10307 return 0;
10308 /* FIXME: Not sure about this. Maybe we should always return 0 ? */
10309 return (frame_pointer_needed
10310 && cfun->static_chain_decl != NULL
10311 && ! cfun->machine->uses_anonymous_args) ? 4 : 0;
10313 case STACK_POINTER_REGNUM:
10314 /* If nothing has been pushed on the stack at all
10315 then this will return -4. This *is* correct! */
10316 return offsets->outgoing_args - (offsets->saved_args + 4);
10318 default:
10319 abort ();
10321 break;
10323 case FRAME_POINTER_REGNUM:
10324 switch (to)
10326 case THUMB_HARD_FRAME_POINTER_REGNUM:
10327 return 0;
10329 case ARM_HARD_FRAME_POINTER_REGNUM:
10330 /* The hard frame pointer points to the top entry in the
10331 stack frame. The soft frame pointer to the bottom entry
10332 in the stack frame. If there is no stack frame at all,
10333 then they are identical. */
10335 return offsets->frame - offsets->soft_frame;
10337 case STACK_POINTER_REGNUM:
10338 return offsets->outgoing_args - offsets->soft_frame;
10340 default:
10341 abort ();
10343 break;
10345 default:
10346 /* You cannot eliminate from the stack pointer.
10347 In theory you could eliminate from the hard frame
10348 pointer to the stack pointer, but this will never
10349 happen, since if a stack frame is not needed the
10350 hard frame pointer will never be used. */
10351 abort ();
10356 /* Generate the prologue instructions for entry into an ARM function. */
10357 void
10358 arm_expand_prologue (void)
10360 int reg;
10361 rtx amount;
10362 rtx insn;
10363 rtx ip_rtx;
10364 unsigned long live_regs_mask;
10365 unsigned long func_type;
10366 int fp_offset = 0;
10367 int saved_pretend_args = 0;
10368 int saved_regs = 0;
10369 unsigned int args_to_push;
10370 arm_stack_offsets *offsets;
10372 func_type = arm_current_func_type ();
10374 /* Naked functions don't have prologues. */
10375 if (IS_NAKED (func_type))
10376 return;
10378 /* Make a copy of c_f_p_a_s as we may need to modify it locally. */
10379 args_to_push = current_function_pretend_args_size;
10381 /* Compute which register we will have to save onto the stack. */
10382 live_regs_mask = arm_compute_save_reg_mask ();
10384 ip_rtx = gen_rtx_REG (SImode, IP_REGNUM);
10386 if (frame_pointer_needed)
10388 if (IS_INTERRUPT (func_type))
10390 /* Interrupt functions must not corrupt any registers.
10391 Creating a frame pointer however, corrupts the IP
10392 register, so we must push it first. */
10393 insn = emit_multi_reg_push (1 << IP_REGNUM);
10395 /* Do not set RTX_FRAME_RELATED_P on this insn.
10396 The dwarf stack unwinding code only wants to see one
10397 stack decrement per function, and this is not it. If
10398 this instruction is labeled as being part of the frame
10399 creation sequence then dwarf2out_frame_debug_expr will
10400 abort when it encounters the assignment of IP to FP
10401 later on, since the use of SP here establishes SP as
10402 the CFA register and not IP.
10404 Anyway this instruction is not really part of the stack
10405 frame creation although it is part of the prologue. */
10407 else if (IS_NESTED (func_type))
10409 /* The Static chain register is the same as the IP register
10410 used as a scratch register during stack frame creation.
10411 To get around this need to find somewhere to store IP
10412 whilst the frame is being created. We try the following
10413 places in order:
10415 1. The last argument register.
10416 2. A slot on the stack above the frame. (This only
10417 works if the function is not a varargs function).
10418 3. Register r3, after pushing the argument registers
10419 onto the stack.
10421 Note - we only need to tell the dwarf2 backend about the SP
10422 adjustment in the second variant; the static chain register
10423 doesn't need to be unwound, as it doesn't contain a value
10424 inherited from the caller. */
10426 if (regs_ever_live[3] == 0)
10428 insn = gen_rtx_REG (SImode, 3);
10429 insn = gen_rtx_SET (SImode, insn, ip_rtx);
10430 insn = emit_insn (insn);
10432 else if (args_to_push == 0)
10434 rtx dwarf;
10435 insn = gen_rtx_PRE_DEC (SImode, stack_pointer_rtx);
10436 insn = gen_rtx_MEM (SImode, insn);
10437 insn = gen_rtx_SET (VOIDmode, insn, ip_rtx);
10438 insn = emit_insn (insn);
10440 fp_offset = 4;
10442 /* Just tell the dwarf backend that we adjusted SP. */
10443 dwarf = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
10444 gen_rtx_PLUS (SImode, stack_pointer_rtx,
10445 GEN_INT (-fp_offset)));
10446 RTX_FRAME_RELATED_P (insn) = 1;
10447 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
10448 dwarf, REG_NOTES (insn));
10450 else
10452 /* Store the args on the stack. */
10453 if (cfun->machine->uses_anonymous_args)
10454 insn = emit_multi_reg_push
10455 ((0xf0 >> (args_to_push / 4)) & 0xf);
10456 else
10457 insn = emit_insn
10458 (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10459 GEN_INT (- args_to_push)));
10461 RTX_FRAME_RELATED_P (insn) = 1;
10463 saved_pretend_args = 1;
10464 fp_offset = args_to_push;
10465 args_to_push = 0;
10467 /* Now reuse r3 to preserve IP. */
10468 insn = gen_rtx_REG (SImode, 3);
10469 insn = gen_rtx_SET (SImode, insn, ip_rtx);
10470 (void) emit_insn (insn);
10474 if (fp_offset)
10476 insn = gen_rtx_PLUS (SImode, stack_pointer_rtx, GEN_INT (fp_offset));
10477 insn = gen_rtx_SET (SImode, ip_rtx, insn);
10479 else
10480 insn = gen_movsi (ip_rtx, stack_pointer_rtx);
10482 insn = emit_insn (insn);
10483 RTX_FRAME_RELATED_P (insn) = 1;
10486 if (args_to_push)
10488 /* Push the argument registers, or reserve space for them. */
10489 if (cfun->machine->uses_anonymous_args)
10490 insn = emit_multi_reg_push
10491 ((0xf0 >> (args_to_push / 4)) & 0xf);
10492 else
10493 insn = emit_insn
10494 (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10495 GEN_INT (- args_to_push)));
10496 RTX_FRAME_RELATED_P (insn) = 1;
10499 /* If this is an interrupt service routine, and the link register
10500 is going to be pushed, and we are not creating a stack frame,
10501 (which would involve an extra push of IP and a pop in the epilogue)
10502 subtracting four from LR now will mean that the function return
10503 can be done with a single instruction. */
10504 if ((func_type == ARM_FT_ISR || func_type == ARM_FT_FIQ)
10505 && (live_regs_mask & (1 << LR_REGNUM)) != 0
10506 && ! frame_pointer_needed)
10507 emit_insn (gen_rtx_SET (SImode,
10508 gen_rtx_REG (SImode, LR_REGNUM),
10509 gen_rtx_PLUS (SImode,
10510 gen_rtx_REG (SImode, LR_REGNUM),
10511 GEN_INT (-4))));
10513 if (live_regs_mask)
10515 insn = emit_multi_reg_push (live_regs_mask);
10516 saved_regs += bit_count (live_regs_mask) * 4;
10517 RTX_FRAME_RELATED_P (insn) = 1;
10520 if (TARGET_IWMMXT)
10521 for (reg = LAST_IWMMXT_REGNUM; reg >= FIRST_IWMMXT_REGNUM; reg--)
10522 if (regs_ever_live[reg] && ! call_used_regs [reg])
10524 insn = gen_rtx_PRE_DEC (V2SImode, stack_pointer_rtx);
10525 insn = gen_rtx_MEM (V2SImode, insn);
10526 insn = emit_insn (gen_rtx_SET (VOIDmode, insn,
10527 gen_rtx_REG (V2SImode, reg)));
10528 RTX_FRAME_RELATED_P (insn) = 1;
10529 saved_regs += 8;
10532 if (! IS_VOLATILE (func_type))
10534 int start_reg;
10536 /* Save any floating point call-saved registers used by this
10537 function. */
10538 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
10540 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
10541 if (regs_ever_live[reg] && !call_used_regs[reg])
10543 insn = gen_rtx_PRE_DEC (XFmode, stack_pointer_rtx);
10544 insn = gen_rtx_MEM (XFmode, insn);
10545 insn = emit_insn (gen_rtx_SET (VOIDmode, insn,
10546 gen_rtx_REG (XFmode, reg)));
10547 RTX_FRAME_RELATED_P (insn) = 1;
10548 saved_regs += 12;
10551 else
10553 start_reg = LAST_FPA_REGNUM;
10555 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
10557 if (regs_ever_live[reg] && !call_used_regs[reg])
10559 if (start_reg - reg == 3)
10561 insn = emit_sfm (reg, 4);
10562 RTX_FRAME_RELATED_P (insn) = 1;
10563 saved_regs += 48;
10564 start_reg = reg - 1;
10567 else
10569 if (start_reg != reg)
10571 insn = emit_sfm (reg + 1, start_reg - reg);
10572 RTX_FRAME_RELATED_P (insn) = 1;
10573 saved_regs += (start_reg - reg) * 12;
10575 start_reg = reg - 1;
10579 if (start_reg != reg)
10581 insn = emit_sfm (reg + 1, start_reg - reg);
10582 saved_regs += (start_reg - reg) * 12;
10583 RTX_FRAME_RELATED_P (insn) = 1;
10586 if (TARGET_HARD_FLOAT && TARGET_VFP)
10588 start_reg = FIRST_VFP_REGNUM;
10590 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
10592 if ((!regs_ever_live[reg] || call_used_regs[reg])
10593 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
10595 if (start_reg != reg)
10596 saved_regs += vfp_emit_fstmx (start_reg,
10597 (reg - start_reg) / 2);
10598 start_reg = reg + 2;
10601 if (start_reg != reg)
10602 saved_regs += vfp_emit_fstmx (start_reg,
10603 (reg - start_reg) / 2);
10607 if (frame_pointer_needed)
10609 /* Create the new frame pointer. */
10610 insn = GEN_INT (-(4 + args_to_push + fp_offset));
10611 insn = emit_insn (gen_addsi3 (hard_frame_pointer_rtx, ip_rtx, insn));
10612 RTX_FRAME_RELATED_P (insn) = 1;
10614 if (IS_NESTED (func_type))
10616 /* Recover the static chain register. */
10617 if (regs_ever_live [3] == 0
10618 || saved_pretend_args)
10619 insn = gen_rtx_REG (SImode, 3);
10620 else /* if (current_function_pretend_args_size == 0) */
10622 insn = gen_rtx_PLUS (SImode, hard_frame_pointer_rtx,
10623 GEN_INT (4));
10624 insn = gen_rtx_MEM (SImode, insn);
10627 emit_insn (gen_rtx_SET (SImode, ip_rtx, insn));
10628 /* Add a USE to stop propagate_one_insn() from barfing. */
10629 emit_insn (gen_prologue_use (ip_rtx));
10633 offsets = arm_get_frame_offsets ();
10634 if (offsets->outgoing_args != offsets->saved_args + saved_regs)
10636 /* This add can produce multiple insns for a large constant, so we
10637 need to get tricky. */
10638 rtx last = get_last_insn ();
10640 amount = GEN_INT (offsets->saved_args + saved_regs
10641 - offsets->outgoing_args);
10643 insn = emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10644 amount));
10647 last = last ? NEXT_INSN (last) : get_insns ();
10648 RTX_FRAME_RELATED_P (last) = 1;
10650 while (last != insn);
10652 /* If the frame pointer is needed, emit a special barrier that
10653 will prevent the scheduler from moving stores to the frame
10654 before the stack adjustment. */
10655 if (frame_pointer_needed)
10656 insn = emit_insn (gen_stack_tie (stack_pointer_rtx,
10657 hard_frame_pointer_rtx));
10660 /* If we are profiling, make sure no instructions are scheduled before
10661 the call to mcount. Similarly if the user has requested no
10662 scheduling in the prolog. */
10663 if (current_function_profile || TARGET_NO_SCHED_PRO)
10664 emit_insn (gen_blockage ());
10666 /* If the link register is being kept alive, with the return address in it,
10667 then make sure that it does not get reused by the ce2 pass. */
10668 if ((live_regs_mask & (1 << LR_REGNUM)) == 0)
10670 emit_insn (gen_prologue_use (gen_rtx_REG (SImode, LR_REGNUM)));
10671 cfun->machine->lr_save_eliminated = 1;
10675 /* If CODE is 'd', then the X is a condition operand and the instruction
10676 should only be executed if the condition is true.
10677 if CODE is 'D', then the X is a condition operand and the instruction
10678 should only be executed if the condition is false: however, if the mode
10679 of the comparison is CCFPEmode, then always execute the instruction -- we
10680 do this because in these circumstances !GE does not necessarily imply LT;
10681 in these cases the instruction pattern will take care to make sure that
10682 an instruction containing %d will follow, thereby undoing the effects of
10683 doing this instruction unconditionally.
10684 If CODE is 'N' then X is a floating point operand that must be negated
10685 before output.
10686 If CODE is 'B' then output a bitwise inverted value of X (a const int).
10687 If X is a REG and CODE is `M', output a ldm/stm style multi-reg. */
10688 void
10689 arm_print_operand (FILE *stream, rtx x, int code)
10691 switch (code)
10693 case '@':
10694 fputs (ASM_COMMENT_START, stream);
10695 return;
10697 case '_':
10698 fputs (user_label_prefix, stream);
10699 return;
10701 case '|':
10702 fputs (REGISTER_PREFIX, stream);
10703 return;
10705 case '?':
10706 if (arm_ccfsm_state == 3 || arm_ccfsm_state == 4)
10708 if (TARGET_THUMB || current_insn_predicate != NULL)
10709 abort ();
10711 fputs (arm_condition_codes[arm_current_cc], stream);
10713 else if (current_insn_predicate)
10715 enum arm_cond_code code;
10717 if (TARGET_THUMB)
10718 abort ();
10720 code = get_arm_condition_code (current_insn_predicate);
10721 fputs (arm_condition_codes[code], stream);
10723 return;
10725 case 'N':
10727 REAL_VALUE_TYPE r;
10728 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
10729 r = REAL_VALUE_NEGATE (r);
10730 fprintf (stream, "%s", fp_const_from_val (&r));
10732 return;
10734 case 'B':
10735 if (GET_CODE (x) == CONST_INT)
10737 HOST_WIDE_INT val;
10738 val = ARM_SIGN_EXTEND (~INTVAL (x));
10739 fprintf (stream, HOST_WIDE_INT_PRINT_DEC, val);
10741 else
10743 putc ('~', stream);
10744 output_addr_const (stream, x);
10746 return;
10748 case 'i':
10749 fprintf (stream, "%s", arithmetic_instr (x, 1));
10750 return;
10752 /* Truncate Cirrus shift counts. */
10753 case 's':
10754 if (GET_CODE (x) == CONST_INT)
10756 fprintf (stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0x3f);
10757 return;
10759 arm_print_operand (stream, x, 0);
10760 return;
10762 case 'I':
10763 fprintf (stream, "%s", arithmetic_instr (x, 0));
10764 return;
10766 case 'S':
10768 HOST_WIDE_INT val;
10769 const char * shift = shift_op (x, &val);
10771 if (shift)
10773 fprintf (stream, ", %s ", shift_op (x, &val));
10774 if (val == -1)
10775 arm_print_operand (stream, XEXP (x, 1), 0);
10776 else
10777 fprintf (stream, "#" HOST_WIDE_INT_PRINT_DEC, val);
10780 return;
10782 /* An explanation of the 'Q', 'R' and 'H' register operands:
10784 In a pair of registers containing a DI or DF value the 'Q'
10785 operand returns the register number of the register containing
10786 the least significant part of the value. The 'R' operand returns
10787 the register number of the register containing the most
10788 significant part of the value.
10790 The 'H' operand returns the higher of the two register numbers.
10791 On a run where WORDS_BIG_ENDIAN is true the 'H' operand is the
10792 same as the 'Q' operand, since the most significant part of the
10793 value is held in the lower number register. The reverse is true
10794 on systems where WORDS_BIG_ENDIAN is false.
10796 The purpose of these operands is to distinguish between cases
10797 where the endian-ness of the values is important (for example
10798 when they are added together), and cases where the endian-ness
10799 is irrelevant, but the order of register operations is important.
10800 For example when loading a value from memory into a register
10801 pair, the endian-ness does not matter. Provided that the value
10802 from the lower memory address is put into the lower numbered
10803 register, and the value from the higher address is put into the
10804 higher numbered register, the load will work regardless of whether
10805 the value being loaded is big-wordian or little-wordian. The
10806 order of the two register loads can matter however, if the address
10807 of the memory location is actually held in one of the registers
10808 being overwritten by the load. */
10809 case 'Q':
10810 if (REGNO (x) > LAST_ARM_REGNUM)
10811 abort ();
10812 asm_fprintf (stream, "%r", REGNO (x) + (WORDS_BIG_ENDIAN ? 1 : 0));
10813 return;
10815 case 'R':
10816 if (REGNO (x) > LAST_ARM_REGNUM)
10817 abort ();
10818 asm_fprintf (stream, "%r", REGNO (x) + (WORDS_BIG_ENDIAN ? 0 : 1));
10819 return;
10821 case 'H':
10822 if (REGNO (x) > LAST_ARM_REGNUM)
10823 abort ();
10824 asm_fprintf (stream, "%r", REGNO (x) + 1);
10825 return;
10827 case 'm':
10828 asm_fprintf (stream, "%r",
10829 GET_CODE (XEXP (x, 0)) == REG
10830 ? REGNO (XEXP (x, 0)) : REGNO (XEXP (XEXP (x, 0), 0)));
10831 return;
10833 case 'M':
10834 asm_fprintf (stream, "{%r-%r}",
10835 REGNO (x),
10836 REGNO (x) + ARM_NUM_REGS (GET_MODE (x)) - 1);
10837 return;
10839 case 'd':
10840 /* CONST_TRUE_RTX means always -- that's the default. */
10841 if (x == const_true_rtx)
10842 return;
10844 fputs (arm_condition_codes[get_arm_condition_code (x)],
10845 stream);
10846 return;
10848 case 'D':
10849 /* CONST_TRUE_RTX means not always -- ie never. We shouldn't ever
10850 want to do that. */
10851 if (x == const_true_rtx)
10852 abort ();
10854 fputs (arm_condition_codes[ARM_INVERSE_CONDITION_CODE
10855 (get_arm_condition_code (x))],
10856 stream);
10857 return;
10859 /* Cirrus registers can be accessed in a variety of ways:
10860 single floating point (f)
10861 double floating point (d)
10862 32bit integer (fx)
10863 64bit integer (dx). */
10864 case 'W': /* Cirrus register in F mode. */
10865 case 'X': /* Cirrus register in D mode. */
10866 case 'Y': /* Cirrus register in FX mode. */
10867 case 'Z': /* Cirrus register in DX mode. */
10868 if (GET_CODE (x) != REG || REGNO_REG_CLASS (REGNO (x)) != CIRRUS_REGS)
10869 abort ();
10871 fprintf (stream, "mv%s%s",
10872 code == 'W' ? "f"
10873 : code == 'X' ? "d"
10874 : code == 'Y' ? "fx" : "dx", reg_names[REGNO (x)] + 2);
10876 return;
10878 /* Print cirrus register in the mode specified by the register's mode. */
10879 case 'V':
10881 int mode = GET_MODE (x);
10883 if (GET_CODE (x) != REG || REGNO_REG_CLASS (REGNO (x)) != CIRRUS_REGS)
10884 abort ();
10886 fprintf (stream, "mv%s%s",
10887 mode == DFmode ? "d"
10888 : mode == SImode ? "fx"
10889 : mode == DImode ? "dx"
10890 : "f", reg_names[REGNO (x)] + 2);
10892 return;
10895 case 'U':
10896 if (GET_CODE (x) != REG
10897 || REGNO (x) < FIRST_IWMMXT_GR_REGNUM
10898 || REGNO (x) > LAST_IWMMXT_GR_REGNUM)
10899 /* Bad value for wCG register number. */
10900 abort ();
10901 else
10902 fprintf (stream, "%d", REGNO (x) - FIRST_IWMMXT_GR_REGNUM);
10903 return;
10905 /* Print an iWMMXt control register name. */
10906 case 'w':
10907 if (GET_CODE (x) != CONST_INT
10908 || INTVAL (x) < 0
10909 || INTVAL (x) >= 16)
10910 /* Bad value for wC register number. */
10911 abort ();
10912 else
10914 static const char * wc_reg_names [16] =
10916 "wCID", "wCon", "wCSSF", "wCASF",
10917 "wC4", "wC5", "wC6", "wC7",
10918 "wCGR0", "wCGR1", "wCGR2", "wCGR3",
10919 "wC12", "wC13", "wC14", "wC15"
10922 fprintf (stream, wc_reg_names [INTVAL (x)]);
10924 return;
10926 /* Print a VFP double precision register name. */
10927 case 'P':
10929 int mode = GET_MODE (x);
10930 int num;
10932 if (mode != DImode && mode != DFmode)
10933 abort ();
10935 if (GET_CODE (x) != REG
10936 || !IS_VFP_REGNUM (REGNO (x)))
10937 abort ();
10939 num = REGNO(x) - FIRST_VFP_REGNUM;
10940 if (num & 1)
10941 abort ();
10943 fprintf (stream, "d%d", num >> 1);
10945 return;
10947 default:
10948 if (x == 0)
10949 abort ();
10951 if (GET_CODE (x) == REG)
10952 asm_fprintf (stream, "%r", REGNO (x));
10953 else if (GET_CODE (x) == MEM)
10955 output_memory_reference_mode = GET_MODE (x);
10956 output_address (XEXP (x, 0));
10958 else if (GET_CODE (x) == CONST_DOUBLE)
10959 fprintf (stream, "#%s", fp_immediate_constant (x));
10960 else if (GET_CODE (x) == NEG)
10961 abort (); /* This should never happen now. */
10962 else
10964 fputc ('#', stream);
10965 output_addr_const (stream, x);
10970 #ifndef AOF_ASSEMBLER
10971 /* Target hook for assembling integer objects. The ARM version needs to
10972 handle word-sized values specially. */
10973 static bool
10974 arm_assemble_integer (rtx x, unsigned int size, int aligned_p)
10976 if (size == UNITS_PER_WORD && aligned_p)
10978 fputs ("\t.word\t", asm_out_file);
10979 output_addr_const (asm_out_file, x);
10981 /* Mark symbols as position independent. We only do this in the
10982 .text segment, not in the .data segment. */
10983 if (NEED_GOT_RELOC && flag_pic && making_const_table &&
10984 (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF))
10986 if (GET_CODE (x) == SYMBOL_REF
10987 && (CONSTANT_POOL_ADDRESS_P (x)
10988 || SYMBOL_REF_LOCAL_P (x)))
10989 fputs ("(GOTOFF)", asm_out_file);
10990 else if (GET_CODE (x) == LABEL_REF)
10991 fputs ("(GOTOFF)", asm_out_file);
10992 else
10993 fputs ("(GOT)", asm_out_file);
10995 fputc ('\n', asm_out_file);
10996 return true;
10999 if (VECTOR_MODE_SUPPORTED_P (GET_MODE (x)))
11001 int i, units;
11003 if (GET_CODE (x) != CONST_VECTOR)
11004 abort ();
11006 units = CONST_VECTOR_NUNITS (x);
11008 switch (GET_MODE (x))
11010 case V2SImode: size = 4; break;
11011 case V4HImode: size = 2; break;
11012 case V8QImode: size = 1; break;
11013 default:
11014 abort ();
11017 for (i = 0; i < units; i++)
11019 rtx elt;
11021 elt = CONST_VECTOR_ELT (x, i);
11022 assemble_integer
11023 (elt, size, i == 0 ? BIGGEST_ALIGNMENT : size * BITS_PER_UNIT, 1);
11026 return true;
11029 return default_assemble_integer (x, size, aligned_p);
11031 #endif
11033 /* A finite state machine takes care of noticing whether or not instructions
11034 can be conditionally executed, and thus decrease execution time and code
11035 size by deleting branch instructions. The fsm is controlled by
11036 final_prescan_insn, and controls the actions of ASM_OUTPUT_OPCODE. */
11038 /* The state of the fsm controlling condition codes are:
11039 0: normal, do nothing special
11040 1: make ASM_OUTPUT_OPCODE not output this instruction
11041 2: make ASM_OUTPUT_OPCODE not output this instruction
11042 3: make instructions conditional
11043 4: make instructions conditional
11045 State transitions (state->state by whom under condition):
11046 0 -> 1 final_prescan_insn if the `target' is a label
11047 0 -> 2 final_prescan_insn if the `target' is an unconditional branch
11048 1 -> 3 ASM_OUTPUT_OPCODE after not having output the conditional branch
11049 2 -> 4 ASM_OUTPUT_OPCODE after not having output the conditional branch
11050 3 -> 0 (*targetm.asm_out.internal_label) if the `target' label is reached
11051 (the target label has CODE_LABEL_NUMBER equal to arm_target_label).
11052 4 -> 0 final_prescan_insn if the `target' unconditional branch is reached
11053 (the target insn is arm_target_insn).
11055 If the jump clobbers the conditions then we use states 2 and 4.
11057 A similar thing can be done with conditional return insns.
11059 XXX In case the `target' is an unconditional branch, this conditionalising
11060 of the instructions always reduces code size, but not always execution
11061 time. But then, I want to reduce the code size to somewhere near what
11062 /bin/cc produces. */
11064 /* Returns the index of the ARM condition code string in
11065 `arm_condition_codes'. COMPARISON should be an rtx like
11066 `(eq (...) (...))'. */
11067 static enum arm_cond_code
11068 get_arm_condition_code (rtx comparison)
11070 enum machine_mode mode = GET_MODE (XEXP (comparison, 0));
11071 int code;
11072 enum rtx_code comp_code = GET_CODE (comparison);
11074 if (GET_MODE_CLASS (mode) != MODE_CC)
11075 mode = SELECT_CC_MODE (comp_code, XEXP (comparison, 0),
11076 XEXP (comparison, 1));
11078 switch (mode)
11080 case CC_DNEmode: code = ARM_NE; goto dominance;
11081 case CC_DEQmode: code = ARM_EQ; goto dominance;
11082 case CC_DGEmode: code = ARM_GE; goto dominance;
11083 case CC_DGTmode: code = ARM_GT; goto dominance;
11084 case CC_DLEmode: code = ARM_LE; goto dominance;
11085 case CC_DLTmode: code = ARM_LT; goto dominance;
11086 case CC_DGEUmode: code = ARM_CS; goto dominance;
11087 case CC_DGTUmode: code = ARM_HI; goto dominance;
11088 case CC_DLEUmode: code = ARM_LS; goto dominance;
11089 case CC_DLTUmode: code = ARM_CC;
11091 dominance:
11092 if (comp_code != EQ && comp_code != NE)
11093 abort ();
11095 if (comp_code == EQ)
11096 return ARM_INVERSE_CONDITION_CODE (code);
11097 return code;
11099 case CC_NOOVmode:
11100 switch (comp_code)
11102 case NE: return ARM_NE;
11103 case EQ: return ARM_EQ;
11104 case GE: return ARM_PL;
11105 case LT: return ARM_MI;
11106 default: abort ();
11109 case CC_Zmode:
11110 switch (comp_code)
11112 case NE: return ARM_NE;
11113 case EQ: return ARM_EQ;
11114 default: abort ();
11117 case CC_Nmode:
11118 switch (comp_code)
11120 case NE: return ARM_MI;
11121 case EQ: return ARM_PL;
11122 default: abort ();
11125 case CCFPEmode:
11126 case CCFPmode:
11127 /* These encodings assume that AC=1 in the FPA system control
11128 byte. This allows us to handle all cases except UNEQ and
11129 LTGT. */
11130 switch (comp_code)
11132 case GE: return ARM_GE;
11133 case GT: return ARM_GT;
11134 case LE: return ARM_LS;
11135 case LT: return ARM_MI;
11136 case NE: return ARM_NE;
11137 case EQ: return ARM_EQ;
11138 case ORDERED: return ARM_VC;
11139 case UNORDERED: return ARM_VS;
11140 case UNLT: return ARM_LT;
11141 case UNLE: return ARM_LE;
11142 case UNGT: return ARM_HI;
11143 case UNGE: return ARM_PL;
11144 /* UNEQ and LTGT do not have a representation. */
11145 case UNEQ: /* Fall through. */
11146 case LTGT: /* Fall through. */
11147 default: abort ();
11150 case CC_SWPmode:
11151 switch (comp_code)
11153 case NE: return ARM_NE;
11154 case EQ: return ARM_EQ;
11155 case GE: return ARM_LE;
11156 case GT: return ARM_LT;
11157 case LE: return ARM_GE;
11158 case LT: return ARM_GT;
11159 case GEU: return ARM_LS;
11160 case GTU: return ARM_CC;
11161 case LEU: return ARM_CS;
11162 case LTU: return ARM_HI;
11163 default: abort ();
11166 case CC_Cmode:
11167 switch (comp_code)
11169 case LTU: return ARM_CS;
11170 case GEU: return ARM_CC;
11171 default: abort ();
11174 case CCmode:
11175 switch (comp_code)
11177 case NE: return ARM_NE;
11178 case EQ: return ARM_EQ;
11179 case GE: return ARM_GE;
11180 case GT: return ARM_GT;
11181 case LE: return ARM_LE;
11182 case LT: return ARM_LT;
11183 case GEU: return ARM_CS;
11184 case GTU: return ARM_HI;
11185 case LEU: return ARM_LS;
11186 case LTU: return ARM_CC;
11187 default: abort ();
11190 default: abort ();
11193 abort ();
11196 void
11197 arm_final_prescan_insn (rtx insn)
11199 /* BODY will hold the body of INSN. */
11200 rtx body = PATTERN (insn);
11202 /* This will be 1 if trying to repeat the trick, and things need to be
11203 reversed if it appears to fail. */
11204 int reverse = 0;
11206 /* JUMP_CLOBBERS will be one implies that the conditions if a branch is
11207 taken are clobbered, even if the rtl suggests otherwise. It also
11208 means that we have to grub around within the jump expression to find
11209 out what the conditions are when the jump isn't taken. */
11210 int jump_clobbers = 0;
11212 /* If we start with a return insn, we only succeed if we find another one. */
11213 int seeking_return = 0;
11215 /* START_INSN will hold the insn from where we start looking. This is the
11216 first insn after the following code_label if REVERSE is true. */
11217 rtx start_insn = insn;
11219 /* If in state 4, check if the target branch is reached, in order to
11220 change back to state 0. */
11221 if (arm_ccfsm_state == 4)
11223 if (insn == arm_target_insn)
11225 arm_target_insn = NULL;
11226 arm_ccfsm_state = 0;
11228 return;
11231 /* If in state 3, it is possible to repeat the trick, if this insn is an
11232 unconditional branch to a label, and immediately following this branch
11233 is the previous target label which is only used once, and the label this
11234 branch jumps to is not too far off. */
11235 if (arm_ccfsm_state == 3)
11237 if (simplejump_p (insn))
11239 start_insn = next_nonnote_insn (start_insn);
11240 if (GET_CODE (start_insn) == BARRIER)
11242 /* XXX Isn't this always a barrier? */
11243 start_insn = next_nonnote_insn (start_insn);
11245 if (GET_CODE (start_insn) == CODE_LABEL
11246 && CODE_LABEL_NUMBER (start_insn) == arm_target_label
11247 && LABEL_NUSES (start_insn) == 1)
11248 reverse = TRUE;
11249 else
11250 return;
11252 else if (GET_CODE (body) == RETURN)
11254 start_insn = next_nonnote_insn (start_insn);
11255 if (GET_CODE (start_insn) == BARRIER)
11256 start_insn = next_nonnote_insn (start_insn);
11257 if (GET_CODE (start_insn) == CODE_LABEL
11258 && CODE_LABEL_NUMBER (start_insn) == arm_target_label
11259 && LABEL_NUSES (start_insn) == 1)
11261 reverse = TRUE;
11262 seeking_return = 1;
11264 else
11265 return;
11267 else
11268 return;
11271 if (arm_ccfsm_state != 0 && !reverse)
11272 abort ();
11273 if (GET_CODE (insn) != JUMP_INSN)
11274 return;
11276 /* This jump might be paralleled with a clobber of the condition codes
11277 the jump should always come first */
11278 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) > 0)
11279 body = XVECEXP (body, 0, 0);
11281 if (reverse
11282 || (GET_CODE (body) == SET && GET_CODE (SET_DEST (body)) == PC
11283 && GET_CODE (SET_SRC (body)) == IF_THEN_ELSE))
11285 int insns_skipped;
11286 int fail = FALSE, succeed = FALSE;
11287 /* Flag which part of the IF_THEN_ELSE is the LABEL_REF. */
11288 int then_not_else = TRUE;
11289 rtx this_insn = start_insn, label = 0;
11291 /* If the jump cannot be done with one instruction, we cannot
11292 conditionally execute the instruction in the inverse case. */
11293 if (get_attr_conds (insn) == CONDS_JUMP_CLOB)
11295 jump_clobbers = 1;
11296 return;
11299 /* Register the insn jumped to. */
11300 if (reverse)
11302 if (!seeking_return)
11303 label = XEXP (SET_SRC (body), 0);
11305 else if (GET_CODE (XEXP (SET_SRC (body), 1)) == LABEL_REF)
11306 label = XEXP (XEXP (SET_SRC (body), 1), 0);
11307 else if (GET_CODE (XEXP (SET_SRC (body), 2)) == LABEL_REF)
11309 label = XEXP (XEXP (SET_SRC (body), 2), 0);
11310 then_not_else = FALSE;
11312 else if (GET_CODE (XEXP (SET_SRC (body), 1)) == RETURN)
11313 seeking_return = 1;
11314 else if (GET_CODE (XEXP (SET_SRC (body), 2)) == RETURN)
11316 seeking_return = 1;
11317 then_not_else = FALSE;
11319 else
11320 abort ();
11322 /* See how many insns this branch skips, and what kind of insns. If all
11323 insns are okay, and the label or unconditional branch to the same
11324 label is not too far away, succeed. */
11325 for (insns_skipped = 0;
11326 !fail && !succeed && insns_skipped++ < max_insns_skipped;)
11328 rtx scanbody;
11330 this_insn = next_nonnote_insn (this_insn);
11331 if (!this_insn)
11332 break;
11334 switch (GET_CODE (this_insn))
11336 case CODE_LABEL:
11337 /* Succeed if it is the target label, otherwise fail since
11338 control falls in from somewhere else. */
11339 if (this_insn == label)
11341 if (jump_clobbers)
11343 arm_ccfsm_state = 2;
11344 this_insn = next_nonnote_insn (this_insn);
11346 else
11347 arm_ccfsm_state = 1;
11348 succeed = TRUE;
11350 else
11351 fail = TRUE;
11352 break;
11354 case BARRIER:
11355 /* Succeed if the following insn is the target label.
11356 Otherwise fail.
11357 If return insns are used then the last insn in a function
11358 will be a barrier. */
11359 this_insn = next_nonnote_insn (this_insn);
11360 if (this_insn && this_insn == label)
11362 if (jump_clobbers)
11364 arm_ccfsm_state = 2;
11365 this_insn = next_nonnote_insn (this_insn);
11367 else
11368 arm_ccfsm_state = 1;
11369 succeed = TRUE;
11371 else
11372 fail = TRUE;
11373 break;
11375 case CALL_INSN:
11376 /* The AAPCS says that conditional calls should not be
11377 used since they make interworking inefficient (the
11378 linker can't transform BL<cond> into BLX). That's
11379 only a problem if the machine has BLX. */
11380 if (arm_arch5)
11382 fail = TRUE;
11383 break;
11386 /* Succeed if the following insn is the target label, or
11387 if the following two insns are a barrier and the
11388 target label. */
11389 this_insn = next_nonnote_insn (this_insn);
11390 if (this_insn && GET_CODE (this_insn) == BARRIER)
11391 this_insn = next_nonnote_insn (this_insn);
11393 if (this_insn && this_insn == label
11394 && insns_skipped < max_insns_skipped)
11396 if (jump_clobbers)
11398 arm_ccfsm_state = 2;
11399 this_insn = next_nonnote_insn (this_insn);
11401 else
11402 arm_ccfsm_state = 1;
11403 succeed = TRUE;
11405 else
11406 fail = TRUE;
11407 break;
11409 case JUMP_INSN:
11410 /* If this is an unconditional branch to the same label, succeed.
11411 If it is to another label, do nothing. If it is conditional,
11412 fail. */
11413 /* XXX Probably, the tests for SET and the PC are
11414 unnecessary. */
11416 scanbody = PATTERN (this_insn);
11417 if (GET_CODE (scanbody) == SET
11418 && GET_CODE (SET_DEST (scanbody)) == PC)
11420 if (GET_CODE (SET_SRC (scanbody)) == LABEL_REF
11421 && XEXP (SET_SRC (scanbody), 0) == label && !reverse)
11423 arm_ccfsm_state = 2;
11424 succeed = TRUE;
11426 else if (GET_CODE (SET_SRC (scanbody)) == IF_THEN_ELSE)
11427 fail = TRUE;
11429 /* Fail if a conditional return is undesirable (eg on a
11430 StrongARM), but still allow this if optimizing for size. */
11431 else if (GET_CODE (scanbody) == RETURN
11432 && !use_return_insn (TRUE, NULL)
11433 && !optimize_size)
11434 fail = TRUE;
11435 else if (GET_CODE (scanbody) == RETURN
11436 && seeking_return)
11438 arm_ccfsm_state = 2;
11439 succeed = TRUE;
11441 else if (GET_CODE (scanbody) == PARALLEL)
11443 switch (get_attr_conds (this_insn))
11445 case CONDS_NOCOND:
11446 break;
11447 default:
11448 fail = TRUE;
11449 break;
11452 else
11453 fail = TRUE; /* Unrecognized jump (eg epilogue). */
11455 break;
11457 case INSN:
11458 /* Instructions using or affecting the condition codes make it
11459 fail. */
11460 scanbody = PATTERN (this_insn);
11461 if (!(GET_CODE (scanbody) == SET
11462 || GET_CODE (scanbody) == PARALLEL)
11463 || get_attr_conds (this_insn) != CONDS_NOCOND)
11464 fail = TRUE;
11466 /* A conditional cirrus instruction must be followed by
11467 a non Cirrus instruction. However, since we
11468 conditionalize instructions in this function and by
11469 the time we get here we can't add instructions
11470 (nops), because shorten_branches() has already been
11471 called, we will disable conditionalizing Cirrus
11472 instructions to be safe. */
11473 if (GET_CODE (scanbody) != USE
11474 && GET_CODE (scanbody) != CLOBBER
11475 && get_attr_cirrus (this_insn) != CIRRUS_NOT)
11476 fail = TRUE;
11477 break;
11479 default:
11480 break;
11483 if (succeed)
11485 if ((!seeking_return) && (arm_ccfsm_state == 1 || reverse))
11486 arm_target_label = CODE_LABEL_NUMBER (label);
11487 else if (seeking_return || arm_ccfsm_state == 2)
11489 while (this_insn && GET_CODE (PATTERN (this_insn)) == USE)
11491 this_insn = next_nonnote_insn (this_insn);
11492 if (this_insn && (GET_CODE (this_insn) == BARRIER
11493 || GET_CODE (this_insn) == CODE_LABEL))
11494 abort ();
11496 if (!this_insn)
11498 /* Oh, dear! we ran off the end.. give up. */
11499 recog (PATTERN (insn), insn, NULL);
11500 arm_ccfsm_state = 0;
11501 arm_target_insn = NULL;
11502 return;
11504 arm_target_insn = this_insn;
11506 else
11507 abort ();
11508 if (jump_clobbers)
11510 if (reverse)
11511 abort ();
11512 arm_current_cc =
11513 get_arm_condition_code (XEXP (XEXP (XEXP (SET_SRC (body),
11514 0), 0), 1));
11515 if (GET_CODE (XEXP (XEXP (SET_SRC (body), 0), 0)) == AND)
11516 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
11517 if (GET_CODE (XEXP (SET_SRC (body), 0)) == NE)
11518 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
11520 else
11522 /* If REVERSE is true, ARM_CURRENT_CC needs to be inverted from
11523 what it was. */
11524 if (!reverse)
11525 arm_current_cc = get_arm_condition_code (XEXP (SET_SRC (body),
11526 0));
11529 if (reverse || then_not_else)
11530 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
11533 /* Restore recog_data (getting the attributes of other insns can
11534 destroy this array, but final.c assumes that it remains intact
11535 across this call; since the insn has been recognized already we
11536 call recog direct). */
11537 recog (PATTERN (insn), insn, NULL);
11541 /* Returns true if REGNO is a valid register
11542 for holding a quantity of tyoe MODE. */
11544 arm_hard_regno_mode_ok (unsigned int regno, enum machine_mode mode)
11546 if (GET_MODE_CLASS (mode) == MODE_CC)
11547 return regno == CC_REGNUM || regno == VFPCC_REGNUM;
11549 if (TARGET_THUMB)
11550 /* For the Thumb we only allow values bigger than SImode in
11551 registers 0 - 6, so that there is always a second low
11552 register available to hold the upper part of the value.
11553 We probably we ought to ensure that the register is the
11554 start of an even numbered register pair. */
11555 return (ARM_NUM_REGS (mode) < 2) || (regno < LAST_LO_REGNUM);
11557 if (IS_CIRRUS_REGNUM (regno))
11558 /* We have outlawed SI values in Cirrus registers because they
11559 reside in the lower 32 bits, but SF values reside in the
11560 upper 32 bits. This causes gcc all sorts of grief. We can't
11561 even split the registers into pairs because Cirrus SI values
11562 get sign extended to 64bits-- aldyh. */
11563 return (GET_MODE_CLASS (mode) == MODE_FLOAT) || (mode == DImode);
11565 if (IS_VFP_REGNUM (regno))
11567 if (mode == SFmode || mode == SImode)
11568 return TRUE;
11570 /* DFmode values are only valid in even register pairs. */
11571 if (mode == DFmode)
11572 return ((regno - FIRST_VFP_REGNUM) & 1) == 0;
11573 return FALSE;
11576 if (IS_IWMMXT_GR_REGNUM (regno))
11577 return mode == SImode;
11579 if (IS_IWMMXT_REGNUM (regno))
11580 return VALID_IWMMXT_REG_MODE (mode);
11582 /* We allow any value to be stored in the general registers.
11583 Restrict doubleword quantities to even register pairs so that we can
11584 use ldrd. */
11585 if (regno <= LAST_ARM_REGNUM)
11586 return !(TARGET_LDRD && GET_MODE_SIZE (mode) > 4 && (regno & 1) != 0);
11588 if ( regno == FRAME_POINTER_REGNUM
11589 || regno == ARG_POINTER_REGNUM)
11590 /* We only allow integers in the fake hard registers. */
11591 return GET_MODE_CLASS (mode) == MODE_INT;
11593 /* The only registers left are the FPA registers
11594 which we only allow to hold FP values. */
11595 return GET_MODE_CLASS (mode) == MODE_FLOAT
11596 && regno >= FIRST_FPA_REGNUM
11597 && regno <= LAST_FPA_REGNUM;
11601 arm_regno_class (int regno)
11603 if (TARGET_THUMB)
11605 if (regno == STACK_POINTER_REGNUM)
11606 return STACK_REG;
11607 if (regno == CC_REGNUM)
11608 return CC_REG;
11609 if (regno < 8)
11610 return LO_REGS;
11611 return HI_REGS;
11614 if ( regno <= LAST_ARM_REGNUM
11615 || regno == FRAME_POINTER_REGNUM
11616 || regno == ARG_POINTER_REGNUM)
11617 return GENERAL_REGS;
11619 if (regno == CC_REGNUM || regno == VFPCC_REGNUM)
11620 return NO_REGS;
11622 if (IS_CIRRUS_REGNUM (regno))
11623 return CIRRUS_REGS;
11625 if (IS_VFP_REGNUM (regno))
11626 return VFP_REGS;
11628 if (IS_IWMMXT_REGNUM (regno))
11629 return IWMMXT_REGS;
11631 if (IS_IWMMXT_GR_REGNUM (regno))
11632 return IWMMXT_GR_REGS;
11634 return FPA_REGS;
11637 /* Handle a special case when computing the offset
11638 of an argument from the frame pointer. */
11640 arm_debugger_arg_offset (int value, rtx addr)
11642 rtx insn;
11644 /* We are only interested if dbxout_parms() failed to compute the offset. */
11645 if (value != 0)
11646 return 0;
11648 /* We can only cope with the case where the address is held in a register. */
11649 if (GET_CODE (addr) != REG)
11650 return 0;
11652 /* If we are using the frame pointer to point at the argument, then
11653 an offset of 0 is correct. */
11654 if (REGNO (addr) == (unsigned) HARD_FRAME_POINTER_REGNUM)
11655 return 0;
11657 /* If we are using the stack pointer to point at the
11658 argument, then an offset of 0 is correct. */
11659 if ((TARGET_THUMB || !frame_pointer_needed)
11660 && REGNO (addr) == SP_REGNUM)
11661 return 0;
11663 /* Oh dear. The argument is pointed to by a register rather
11664 than being held in a register, or being stored at a known
11665 offset from the frame pointer. Since GDB only understands
11666 those two kinds of argument we must translate the address
11667 held in the register into an offset from the frame pointer.
11668 We do this by searching through the insns for the function
11669 looking to see where this register gets its value. If the
11670 register is initialized from the frame pointer plus an offset
11671 then we are in luck and we can continue, otherwise we give up.
11673 This code is exercised by producing debugging information
11674 for a function with arguments like this:
11676 double func (double a, double b, int c, double d) {return d;}
11678 Without this code the stab for parameter 'd' will be set to
11679 an offset of 0 from the frame pointer, rather than 8. */
11681 /* The if() statement says:
11683 If the insn is a normal instruction
11684 and if the insn is setting the value in a register
11685 and if the register being set is the register holding the address of the argument
11686 and if the address is computing by an addition
11687 that involves adding to a register
11688 which is the frame pointer
11689 a constant integer
11691 then... */
11693 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
11695 if ( GET_CODE (insn) == INSN
11696 && GET_CODE (PATTERN (insn)) == SET
11697 && REGNO (XEXP (PATTERN (insn), 0)) == REGNO (addr)
11698 && GET_CODE (XEXP (PATTERN (insn), 1)) == PLUS
11699 && GET_CODE (XEXP (XEXP (PATTERN (insn), 1), 0)) == REG
11700 && REGNO (XEXP (XEXP (PATTERN (insn), 1), 0)) == (unsigned) HARD_FRAME_POINTER_REGNUM
11701 && GET_CODE (XEXP (XEXP (PATTERN (insn), 1), 1)) == CONST_INT
11704 value = INTVAL (XEXP (XEXP (PATTERN (insn), 1), 1));
11706 break;
11710 if (value == 0)
11712 debug_rtx (addr);
11713 warning ("unable to compute real location of stacked parameter");
11714 value = 8; /* XXX magic hack */
11717 return value;
11720 #define def_mbuiltin(MASK, NAME, TYPE, CODE) \
11721 do \
11723 if ((MASK) & insn_flags) \
11724 lang_hooks.builtin_function ((NAME), (TYPE), (CODE), \
11725 BUILT_IN_MD, NULL, NULL_TREE); \
11727 while (0)
11729 struct builtin_description
11731 const unsigned int mask;
11732 const enum insn_code icode;
11733 const char * const name;
11734 const enum arm_builtins code;
11735 const enum rtx_code comparison;
11736 const unsigned int flag;
11739 static const struct builtin_description bdesc_2arg[] =
11741 #define IWMMXT_BUILTIN(code, string, builtin) \
11742 { FL_IWMMXT, CODE_FOR_##code, "__builtin_arm_" string, \
11743 ARM_BUILTIN_##builtin, 0, 0 },
11745 IWMMXT_BUILTIN (addv8qi3, "waddb", WADDB)
11746 IWMMXT_BUILTIN (addv4hi3, "waddh", WADDH)
11747 IWMMXT_BUILTIN (addv2si3, "waddw", WADDW)
11748 IWMMXT_BUILTIN (subv8qi3, "wsubb", WSUBB)
11749 IWMMXT_BUILTIN (subv4hi3, "wsubh", WSUBH)
11750 IWMMXT_BUILTIN (subv2si3, "wsubw", WSUBW)
11751 IWMMXT_BUILTIN (ssaddv8qi3, "waddbss", WADDSSB)
11752 IWMMXT_BUILTIN (ssaddv4hi3, "waddhss", WADDSSH)
11753 IWMMXT_BUILTIN (ssaddv2si3, "waddwss", WADDSSW)
11754 IWMMXT_BUILTIN (sssubv8qi3, "wsubbss", WSUBSSB)
11755 IWMMXT_BUILTIN (sssubv4hi3, "wsubhss", WSUBSSH)
11756 IWMMXT_BUILTIN (sssubv2si3, "wsubwss", WSUBSSW)
11757 IWMMXT_BUILTIN (usaddv8qi3, "waddbus", WADDUSB)
11758 IWMMXT_BUILTIN (usaddv4hi3, "waddhus", WADDUSH)
11759 IWMMXT_BUILTIN (usaddv2si3, "waddwus", WADDUSW)
11760 IWMMXT_BUILTIN (ussubv8qi3, "wsubbus", WSUBUSB)
11761 IWMMXT_BUILTIN (ussubv4hi3, "wsubhus", WSUBUSH)
11762 IWMMXT_BUILTIN (ussubv2si3, "wsubwus", WSUBUSW)
11763 IWMMXT_BUILTIN (mulv4hi3, "wmulul", WMULUL)
11764 IWMMXT_BUILTIN (smulv4hi3_highpart, "wmulsm", WMULSM)
11765 IWMMXT_BUILTIN (umulv4hi3_highpart, "wmulum", WMULUM)
11766 IWMMXT_BUILTIN (eqv8qi3, "wcmpeqb", WCMPEQB)
11767 IWMMXT_BUILTIN (eqv4hi3, "wcmpeqh", WCMPEQH)
11768 IWMMXT_BUILTIN (eqv2si3, "wcmpeqw", WCMPEQW)
11769 IWMMXT_BUILTIN (gtuv8qi3, "wcmpgtub", WCMPGTUB)
11770 IWMMXT_BUILTIN (gtuv4hi3, "wcmpgtuh", WCMPGTUH)
11771 IWMMXT_BUILTIN (gtuv2si3, "wcmpgtuw", WCMPGTUW)
11772 IWMMXT_BUILTIN (gtv8qi3, "wcmpgtsb", WCMPGTSB)
11773 IWMMXT_BUILTIN (gtv4hi3, "wcmpgtsh", WCMPGTSH)
11774 IWMMXT_BUILTIN (gtv2si3, "wcmpgtsw", WCMPGTSW)
11775 IWMMXT_BUILTIN (umaxv8qi3, "wmaxub", WMAXUB)
11776 IWMMXT_BUILTIN (smaxv8qi3, "wmaxsb", WMAXSB)
11777 IWMMXT_BUILTIN (umaxv4hi3, "wmaxuh", WMAXUH)
11778 IWMMXT_BUILTIN (smaxv4hi3, "wmaxsh", WMAXSH)
11779 IWMMXT_BUILTIN (umaxv2si3, "wmaxuw", WMAXUW)
11780 IWMMXT_BUILTIN (smaxv2si3, "wmaxsw", WMAXSW)
11781 IWMMXT_BUILTIN (uminv8qi3, "wminub", WMINUB)
11782 IWMMXT_BUILTIN (sminv8qi3, "wminsb", WMINSB)
11783 IWMMXT_BUILTIN (uminv4hi3, "wminuh", WMINUH)
11784 IWMMXT_BUILTIN (sminv4hi3, "wminsh", WMINSH)
11785 IWMMXT_BUILTIN (uminv2si3, "wminuw", WMINUW)
11786 IWMMXT_BUILTIN (sminv2si3, "wminsw", WMINSW)
11787 IWMMXT_BUILTIN (iwmmxt_anddi3, "wand", WAND)
11788 IWMMXT_BUILTIN (iwmmxt_nanddi3, "wandn", WANDN)
11789 IWMMXT_BUILTIN (iwmmxt_iordi3, "wor", WOR)
11790 IWMMXT_BUILTIN (iwmmxt_xordi3, "wxor", WXOR)
11791 IWMMXT_BUILTIN (iwmmxt_uavgv8qi3, "wavg2b", WAVG2B)
11792 IWMMXT_BUILTIN (iwmmxt_uavgv4hi3, "wavg2h", WAVG2H)
11793 IWMMXT_BUILTIN (iwmmxt_uavgrndv8qi3, "wavg2br", WAVG2BR)
11794 IWMMXT_BUILTIN (iwmmxt_uavgrndv4hi3, "wavg2hr", WAVG2HR)
11795 IWMMXT_BUILTIN (iwmmxt_wunpckilb, "wunpckilb", WUNPCKILB)
11796 IWMMXT_BUILTIN (iwmmxt_wunpckilh, "wunpckilh", WUNPCKILH)
11797 IWMMXT_BUILTIN (iwmmxt_wunpckilw, "wunpckilw", WUNPCKILW)
11798 IWMMXT_BUILTIN (iwmmxt_wunpckihb, "wunpckihb", WUNPCKIHB)
11799 IWMMXT_BUILTIN (iwmmxt_wunpckihh, "wunpckihh", WUNPCKIHH)
11800 IWMMXT_BUILTIN (iwmmxt_wunpckihw, "wunpckihw", WUNPCKIHW)
11801 IWMMXT_BUILTIN (iwmmxt_wmadds, "wmadds", WMADDS)
11802 IWMMXT_BUILTIN (iwmmxt_wmaddu, "wmaddu", WMADDU)
11804 #define IWMMXT_BUILTIN2(code, builtin) \
11805 { FL_IWMMXT, CODE_FOR_##code, NULL, ARM_BUILTIN_##builtin, 0, 0 },
11807 IWMMXT_BUILTIN2 (iwmmxt_wpackhss, WPACKHSS)
11808 IWMMXT_BUILTIN2 (iwmmxt_wpackwss, WPACKWSS)
11809 IWMMXT_BUILTIN2 (iwmmxt_wpackdss, WPACKDSS)
11810 IWMMXT_BUILTIN2 (iwmmxt_wpackhus, WPACKHUS)
11811 IWMMXT_BUILTIN2 (iwmmxt_wpackwus, WPACKWUS)
11812 IWMMXT_BUILTIN2 (iwmmxt_wpackdus, WPACKDUS)
11813 IWMMXT_BUILTIN2 (ashlv4hi3_di, WSLLH)
11814 IWMMXT_BUILTIN2 (ashlv4hi3, WSLLHI)
11815 IWMMXT_BUILTIN2 (ashlv2si3_di, WSLLW)
11816 IWMMXT_BUILTIN2 (ashlv2si3, WSLLWI)
11817 IWMMXT_BUILTIN2 (ashldi3_di, WSLLD)
11818 IWMMXT_BUILTIN2 (ashldi3_iwmmxt, WSLLDI)
11819 IWMMXT_BUILTIN2 (lshrv4hi3_di, WSRLH)
11820 IWMMXT_BUILTIN2 (lshrv4hi3, WSRLHI)
11821 IWMMXT_BUILTIN2 (lshrv2si3_di, WSRLW)
11822 IWMMXT_BUILTIN2 (lshrv2si3, WSRLWI)
11823 IWMMXT_BUILTIN2 (lshrdi3_di, WSRLD)
11824 IWMMXT_BUILTIN2 (lshrdi3_iwmmxt, WSRLDI)
11825 IWMMXT_BUILTIN2 (ashrv4hi3_di, WSRAH)
11826 IWMMXT_BUILTIN2 (ashrv4hi3, WSRAHI)
11827 IWMMXT_BUILTIN2 (ashrv2si3_di, WSRAW)
11828 IWMMXT_BUILTIN2 (ashrv2si3, WSRAWI)
11829 IWMMXT_BUILTIN2 (ashrdi3_di, WSRAD)
11830 IWMMXT_BUILTIN2 (ashrdi3_iwmmxt, WSRADI)
11831 IWMMXT_BUILTIN2 (rorv4hi3_di, WRORH)
11832 IWMMXT_BUILTIN2 (rorv4hi3, WRORHI)
11833 IWMMXT_BUILTIN2 (rorv2si3_di, WRORW)
11834 IWMMXT_BUILTIN2 (rorv2si3, WRORWI)
11835 IWMMXT_BUILTIN2 (rordi3_di, WRORD)
11836 IWMMXT_BUILTIN2 (rordi3, WRORDI)
11837 IWMMXT_BUILTIN2 (iwmmxt_wmacuz, WMACUZ)
11838 IWMMXT_BUILTIN2 (iwmmxt_wmacsz, WMACSZ)
11841 static const struct builtin_description bdesc_1arg[] =
11843 IWMMXT_BUILTIN (iwmmxt_tmovmskb, "tmovmskb", TMOVMSKB)
11844 IWMMXT_BUILTIN (iwmmxt_tmovmskh, "tmovmskh", TMOVMSKH)
11845 IWMMXT_BUILTIN (iwmmxt_tmovmskw, "tmovmskw", TMOVMSKW)
11846 IWMMXT_BUILTIN (iwmmxt_waccb, "waccb", WACCB)
11847 IWMMXT_BUILTIN (iwmmxt_wacch, "wacch", WACCH)
11848 IWMMXT_BUILTIN (iwmmxt_waccw, "waccw", WACCW)
11849 IWMMXT_BUILTIN (iwmmxt_wunpckehub, "wunpckehub", WUNPCKEHUB)
11850 IWMMXT_BUILTIN (iwmmxt_wunpckehuh, "wunpckehuh", WUNPCKEHUH)
11851 IWMMXT_BUILTIN (iwmmxt_wunpckehuw, "wunpckehuw", WUNPCKEHUW)
11852 IWMMXT_BUILTIN (iwmmxt_wunpckehsb, "wunpckehsb", WUNPCKEHSB)
11853 IWMMXT_BUILTIN (iwmmxt_wunpckehsh, "wunpckehsh", WUNPCKEHSH)
11854 IWMMXT_BUILTIN (iwmmxt_wunpckehsw, "wunpckehsw", WUNPCKEHSW)
11855 IWMMXT_BUILTIN (iwmmxt_wunpckelub, "wunpckelub", WUNPCKELUB)
11856 IWMMXT_BUILTIN (iwmmxt_wunpckeluh, "wunpckeluh", WUNPCKELUH)
11857 IWMMXT_BUILTIN (iwmmxt_wunpckeluw, "wunpckeluw", WUNPCKELUW)
11858 IWMMXT_BUILTIN (iwmmxt_wunpckelsb, "wunpckelsb", WUNPCKELSB)
11859 IWMMXT_BUILTIN (iwmmxt_wunpckelsh, "wunpckelsh", WUNPCKELSH)
11860 IWMMXT_BUILTIN (iwmmxt_wunpckelsw, "wunpckelsw", WUNPCKELSW)
11863 /* Set up all the iWMMXt builtins. This is
11864 not called if TARGET_IWMMXT is zero. */
11866 static void
11867 arm_init_iwmmxt_builtins (void)
11869 const struct builtin_description * d;
11870 size_t i;
11871 tree endlink = void_list_node;
11873 tree V2SI_type_node = build_vector_type_for_mode (intSI_type_node, V2SImode);
11874 tree V4HI_type_node = build_vector_type_for_mode (intHI_type_node, V4HImode);
11875 tree V8QI_type_node = build_vector_type_for_mode (intQI_type_node, V8QImode);
11877 tree int_ftype_int
11878 = build_function_type (integer_type_node,
11879 tree_cons (NULL_TREE, integer_type_node, endlink));
11880 tree v8qi_ftype_v8qi_v8qi_int
11881 = build_function_type (V8QI_type_node,
11882 tree_cons (NULL_TREE, V8QI_type_node,
11883 tree_cons (NULL_TREE, V8QI_type_node,
11884 tree_cons (NULL_TREE,
11885 integer_type_node,
11886 endlink))));
11887 tree v4hi_ftype_v4hi_int
11888 = build_function_type (V4HI_type_node,
11889 tree_cons (NULL_TREE, V4HI_type_node,
11890 tree_cons (NULL_TREE, integer_type_node,
11891 endlink)));
11892 tree v2si_ftype_v2si_int
11893 = build_function_type (V2SI_type_node,
11894 tree_cons (NULL_TREE, V2SI_type_node,
11895 tree_cons (NULL_TREE, integer_type_node,
11896 endlink)));
11897 tree v2si_ftype_di_di
11898 = build_function_type (V2SI_type_node,
11899 tree_cons (NULL_TREE, long_long_integer_type_node,
11900 tree_cons (NULL_TREE, long_long_integer_type_node,
11901 endlink)));
11902 tree di_ftype_di_int
11903 = build_function_type (long_long_integer_type_node,
11904 tree_cons (NULL_TREE, long_long_integer_type_node,
11905 tree_cons (NULL_TREE, integer_type_node,
11906 endlink)));
11907 tree di_ftype_di_int_int
11908 = build_function_type (long_long_integer_type_node,
11909 tree_cons (NULL_TREE, long_long_integer_type_node,
11910 tree_cons (NULL_TREE, integer_type_node,
11911 tree_cons (NULL_TREE,
11912 integer_type_node,
11913 endlink))));
11914 tree int_ftype_v8qi
11915 = build_function_type (integer_type_node,
11916 tree_cons (NULL_TREE, V8QI_type_node,
11917 endlink));
11918 tree int_ftype_v4hi
11919 = build_function_type (integer_type_node,
11920 tree_cons (NULL_TREE, V4HI_type_node,
11921 endlink));
11922 tree int_ftype_v2si
11923 = build_function_type (integer_type_node,
11924 tree_cons (NULL_TREE, V2SI_type_node,
11925 endlink));
11926 tree int_ftype_v8qi_int
11927 = build_function_type (integer_type_node,
11928 tree_cons (NULL_TREE, V8QI_type_node,
11929 tree_cons (NULL_TREE, integer_type_node,
11930 endlink)));
11931 tree int_ftype_v4hi_int
11932 = build_function_type (integer_type_node,
11933 tree_cons (NULL_TREE, V4HI_type_node,
11934 tree_cons (NULL_TREE, integer_type_node,
11935 endlink)));
11936 tree int_ftype_v2si_int
11937 = build_function_type (integer_type_node,
11938 tree_cons (NULL_TREE, V2SI_type_node,
11939 tree_cons (NULL_TREE, integer_type_node,
11940 endlink)));
11941 tree v8qi_ftype_v8qi_int_int
11942 = build_function_type (V8QI_type_node,
11943 tree_cons (NULL_TREE, V8QI_type_node,
11944 tree_cons (NULL_TREE, integer_type_node,
11945 tree_cons (NULL_TREE,
11946 integer_type_node,
11947 endlink))));
11948 tree v4hi_ftype_v4hi_int_int
11949 = build_function_type (V4HI_type_node,
11950 tree_cons (NULL_TREE, V4HI_type_node,
11951 tree_cons (NULL_TREE, integer_type_node,
11952 tree_cons (NULL_TREE,
11953 integer_type_node,
11954 endlink))));
11955 tree v2si_ftype_v2si_int_int
11956 = build_function_type (V2SI_type_node,
11957 tree_cons (NULL_TREE, V2SI_type_node,
11958 tree_cons (NULL_TREE, integer_type_node,
11959 tree_cons (NULL_TREE,
11960 integer_type_node,
11961 endlink))));
11962 /* Miscellaneous. */
11963 tree v8qi_ftype_v4hi_v4hi
11964 = build_function_type (V8QI_type_node,
11965 tree_cons (NULL_TREE, V4HI_type_node,
11966 tree_cons (NULL_TREE, V4HI_type_node,
11967 endlink)));
11968 tree v4hi_ftype_v2si_v2si
11969 = build_function_type (V4HI_type_node,
11970 tree_cons (NULL_TREE, V2SI_type_node,
11971 tree_cons (NULL_TREE, V2SI_type_node,
11972 endlink)));
11973 tree v2si_ftype_v4hi_v4hi
11974 = build_function_type (V2SI_type_node,
11975 tree_cons (NULL_TREE, V4HI_type_node,
11976 tree_cons (NULL_TREE, V4HI_type_node,
11977 endlink)));
11978 tree v2si_ftype_v8qi_v8qi
11979 = build_function_type (V2SI_type_node,
11980 tree_cons (NULL_TREE, V8QI_type_node,
11981 tree_cons (NULL_TREE, V8QI_type_node,
11982 endlink)));
11983 tree v4hi_ftype_v4hi_di
11984 = build_function_type (V4HI_type_node,
11985 tree_cons (NULL_TREE, V4HI_type_node,
11986 tree_cons (NULL_TREE,
11987 long_long_integer_type_node,
11988 endlink)));
11989 tree v2si_ftype_v2si_di
11990 = build_function_type (V2SI_type_node,
11991 tree_cons (NULL_TREE, V2SI_type_node,
11992 tree_cons (NULL_TREE,
11993 long_long_integer_type_node,
11994 endlink)));
11995 tree void_ftype_int_int
11996 = build_function_type (void_type_node,
11997 tree_cons (NULL_TREE, integer_type_node,
11998 tree_cons (NULL_TREE, integer_type_node,
11999 endlink)));
12000 tree di_ftype_void
12001 = build_function_type (long_long_unsigned_type_node, endlink);
12002 tree di_ftype_v8qi
12003 = build_function_type (long_long_integer_type_node,
12004 tree_cons (NULL_TREE, V8QI_type_node,
12005 endlink));
12006 tree di_ftype_v4hi
12007 = build_function_type (long_long_integer_type_node,
12008 tree_cons (NULL_TREE, V4HI_type_node,
12009 endlink));
12010 tree di_ftype_v2si
12011 = build_function_type (long_long_integer_type_node,
12012 tree_cons (NULL_TREE, V2SI_type_node,
12013 endlink));
12014 tree v2si_ftype_v4hi
12015 = build_function_type (V2SI_type_node,
12016 tree_cons (NULL_TREE, V4HI_type_node,
12017 endlink));
12018 tree v4hi_ftype_v8qi
12019 = build_function_type (V4HI_type_node,
12020 tree_cons (NULL_TREE, V8QI_type_node,
12021 endlink));
12023 tree di_ftype_di_v4hi_v4hi
12024 = build_function_type (long_long_unsigned_type_node,
12025 tree_cons (NULL_TREE,
12026 long_long_unsigned_type_node,
12027 tree_cons (NULL_TREE, V4HI_type_node,
12028 tree_cons (NULL_TREE,
12029 V4HI_type_node,
12030 endlink))));
12032 tree di_ftype_v4hi_v4hi
12033 = build_function_type (long_long_unsigned_type_node,
12034 tree_cons (NULL_TREE, V4HI_type_node,
12035 tree_cons (NULL_TREE, V4HI_type_node,
12036 endlink)));
12038 /* Normal vector binops. */
12039 tree v8qi_ftype_v8qi_v8qi
12040 = build_function_type (V8QI_type_node,
12041 tree_cons (NULL_TREE, V8QI_type_node,
12042 tree_cons (NULL_TREE, V8QI_type_node,
12043 endlink)));
12044 tree v4hi_ftype_v4hi_v4hi
12045 = build_function_type (V4HI_type_node,
12046 tree_cons (NULL_TREE, V4HI_type_node,
12047 tree_cons (NULL_TREE, V4HI_type_node,
12048 endlink)));
12049 tree v2si_ftype_v2si_v2si
12050 = build_function_type (V2SI_type_node,
12051 tree_cons (NULL_TREE, V2SI_type_node,
12052 tree_cons (NULL_TREE, V2SI_type_node,
12053 endlink)));
12054 tree di_ftype_di_di
12055 = build_function_type (long_long_unsigned_type_node,
12056 tree_cons (NULL_TREE, long_long_unsigned_type_node,
12057 tree_cons (NULL_TREE,
12058 long_long_unsigned_type_node,
12059 endlink)));
12061 /* Add all builtins that are more or less simple operations on two
12062 operands. */
12063 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
12065 /* Use one of the operands; the target can have a different mode for
12066 mask-generating compares. */
12067 enum machine_mode mode;
12068 tree type;
12070 if (d->name == 0)
12071 continue;
12073 mode = insn_data[d->icode].operand[1].mode;
12075 switch (mode)
12077 case V8QImode:
12078 type = v8qi_ftype_v8qi_v8qi;
12079 break;
12080 case V4HImode:
12081 type = v4hi_ftype_v4hi_v4hi;
12082 break;
12083 case V2SImode:
12084 type = v2si_ftype_v2si_v2si;
12085 break;
12086 case DImode:
12087 type = di_ftype_di_di;
12088 break;
12090 default:
12091 abort ();
12094 def_mbuiltin (d->mask, d->name, type, d->code);
12097 /* Add the remaining MMX insns with somewhat more complicated types. */
12098 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wzero", di_ftype_void, ARM_BUILTIN_WZERO);
12099 def_mbuiltin (FL_IWMMXT, "__builtin_arm_setwcx", void_ftype_int_int, ARM_BUILTIN_SETWCX);
12100 def_mbuiltin (FL_IWMMXT, "__builtin_arm_getwcx", int_ftype_int, ARM_BUILTIN_GETWCX);
12102 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSLLH);
12103 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllw", v2si_ftype_v2si_di, ARM_BUILTIN_WSLLW);
12104 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wslld", di_ftype_di_di, ARM_BUILTIN_WSLLD);
12105 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSLLHI);
12106 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllwi", v2si_ftype_v2si_int, ARM_BUILTIN_WSLLWI);
12107 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wslldi", di_ftype_di_int, ARM_BUILTIN_WSLLDI);
12109 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSRLH);
12110 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlw", v2si_ftype_v2si_di, ARM_BUILTIN_WSRLW);
12111 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrld", di_ftype_di_di, ARM_BUILTIN_WSRLD);
12112 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSRLHI);
12113 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlwi", v2si_ftype_v2si_int, ARM_BUILTIN_WSRLWI);
12114 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrldi", di_ftype_di_int, ARM_BUILTIN_WSRLDI);
12116 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrah", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSRAH);
12117 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsraw", v2si_ftype_v2si_di, ARM_BUILTIN_WSRAW);
12118 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrad", di_ftype_di_di, ARM_BUILTIN_WSRAD);
12119 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrahi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSRAHI);
12120 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrawi", v2si_ftype_v2si_int, ARM_BUILTIN_WSRAWI);
12121 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsradi", di_ftype_di_int, ARM_BUILTIN_WSRADI);
12123 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WRORH);
12124 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorw", v2si_ftype_v2si_di, ARM_BUILTIN_WRORW);
12125 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrord", di_ftype_di_di, ARM_BUILTIN_WRORD);
12126 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WRORHI);
12127 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorwi", v2si_ftype_v2si_int, ARM_BUILTIN_WRORWI);
12128 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrordi", di_ftype_di_int, ARM_BUILTIN_WRORDI);
12130 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wshufh", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSHUFH);
12132 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadb", v2si_ftype_v8qi_v8qi, ARM_BUILTIN_WSADB);
12133 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadh", v2si_ftype_v4hi_v4hi, ARM_BUILTIN_WSADH);
12134 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadbz", v2si_ftype_v8qi_v8qi, ARM_BUILTIN_WSADBZ);
12135 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadhz", v2si_ftype_v4hi_v4hi, ARM_BUILTIN_WSADHZ);
12137 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsb", int_ftype_v8qi_int, ARM_BUILTIN_TEXTRMSB);
12138 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsh", int_ftype_v4hi_int, ARM_BUILTIN_TEXTRMSH);
12139 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsw", int_ftype_v2si_int, ARM_BUILTIN_TEXTRMSW);
12140 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmub", int_ftype_v8qi_int, ARM_BUILTIN_TEXTRMUB);
12141 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmuh", int_ftype_v4hi_int, ARM_BUILTIN_TEXTRMUH);
12142 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmuw", int_ftype_v2si_int, ARM_BUILTIN_TEXTRMUW);
12143 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrb", v8qi_ftype_v8qi_int_int, ARM_BUILTIN_TINSRB);
12144 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrh", v4hi_ftype_v4hi_int_int, ARM_BUILTIN_TINSRH);
12145 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrw", v2si_ftype_v2si_int_int, ARM_BUILTIN_TINSRW);
12147 def_mbuiltin (FL_IWMMXT, "__builtin_arm_waccb", di_ftype_v8qi, ARM_BUILTIN_WACCB);
12148 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wacch", di_ftype_v4hi, ARM_BUILTIN_WACCH);
12149 def_mbuiltin (FL_IWMMXT, "__builtin_arm_waccw", di_ftype_v2si, ARM_BUILTIN_WACCW);
12151 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskb", int_ftype_v8qi, ARM_BUILTIN_TMOVMSKB);
12152 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskh", int_ftype_v4hi, ARM_BUILTIN_TMOVMSKH);
12153 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskw", int_ftype_v2si, ARM_BUILTIN_TMOVMSKW);
12155 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackhss", v8qi_ftype_v4hi_v4hi, ARM_BUILTIN_WPACKHSS);
12156 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackhus", v8qi_ftype_v4hi_v4hi, ARM_BUILTIN_WPACKHUS);
12157 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackwus", v4hi_ftype_v2si_v2si, ARM_BUILTIN_WPACKWUS);
12158 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackwss", v4hi_ftype_v2si_v2si, ARM_BUILTIN_WPACKWSS);
12159 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackdus", v2si_ftype_di_di, ARM_BUILTIN_WPACKDUS);
12160 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackdss", v2si_ftype_di_di, ARM_BUILTIN_WPACKDSS);
12162 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehub", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKEHUB);
12163 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehuh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKEHUH);
12164 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehuw", di_ftype_v2si, ARM_BUILTIN_WUNPCKEHUW);
12165 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsb", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKEHSB);
12166 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKEHSH);
12167 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsw", di_ftype_v2si, ARM_BUILTIN_WUNPCKEHSW);
12168 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelub", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKELUB);
12169 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckeluh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKELUH);
12170 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckeluw", di_ftype_v2si, ARM_BUILTIN_WUNPCKELUW);
12171 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsb", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKELSB);
12172 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKELSH);
12173 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsw", di_ftype_v2si, ARM_BUILTIN_WUNPCKELSW);
12175 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacs", di_ftype_di_v4hi_v4hi, ARM_BUILTIN_WMACS);
12176 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacsz", di_ftype_v4hi_v4hi, ARM_BUILTIN_WMACSZ);
12177 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacu", di_ftype_di_v4hi_v4hi, ARM_BUILTIN_WMACU);
12178 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacuz", di_ftype_v4hi_v4hi, ARM_BUILTIN_WMACUZ);
12180 def_mbuiltin (FL_IWMMXT, "__builtin_arm_walign", v8qi_ftype_v8qi_v8qi_int, ARM_BUILTIN_WALIGN);
12181 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmia", di_ftype_di_int_int, ARM_BUILTIN_TMIA);
12182 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiaph", di_ftype_di_int_int, ARM_BUILTIN_TMIAPH);
12183 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiabb", di_ftype_di_int_int, ARM_BUILTIN_TMIABB);
12184 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiabt", di_ftype_di_int_int, ARM_BUILTIN_TMIABT);
12185 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiatb", di_ftype_di_int_int, ARM_BUILTIN_TMIATB);
12186 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiatt", di_ftype_di_int_int, ARM_BUILTIN_TMIATT);
12189 static void
12190 arm_init_builtins (void)
12192 if (TARGET_REALLY_IWMMXT)
12193 arm_init_iwmmxt_builtins ();
12196 /* Errors in the source file can cause expand_expr to return const0_rtx
12197 where we expect a vector. To avoid crashing, use one of the vector
12198 clear instructions. */
12200 static rtx
12201 safe_vector_operand (rtx x, enum machine_mode mode)
12203 if (x != const0_rtx)
12204 return x;
12205 x = gen_reg_rtx (mode);
12207 emit_insn (gen_iwmmxt_clrdi (mode == DImode ? x
12208 : gen_rtx_SUBREG (DImode, x, 0)));
12209 return x;
12212 /* Subroutine of arm_expand_builtin to take care of binop insns. */
12214 static rtx
12215 arm_expand_binop_builtin (enum insn_code icode,
12216 tree arglist, rtx target)
12218 rtx pat;
12219 tree arg0 = TREE_VALUE (arglist);
12220 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12221 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12222 rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12223 enum machine_mode tmode = insn_data[icode].operand[0].mode;
12224 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
12225 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
12227 if (VECTOR_MODE_P (mode0))
12228 op0 = safe_vector_operand (op0, mode0);
12229 if (VECTOR_MODE_P (mode1))
12230 op1 = safe_vector_operand (op1, mode1);
12232 if (! target
12233 || GET_MODE (target) != tmode
12234 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12235 target = gen_reg_rtx (tmode);
12237 /* In case the insn wants input operands in modes different from
12238 the result, abort. */
12239 if (GET_MODE (op0) != mode0 || GET_MODE (op1) != mode1)
12240 abort ();
12242 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12243 op0 = copy_to_mode_reg (mode0, op0);
12244 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12245 op1 = copy_to_mode_reg (mode1, op1);
12247 pat = GEN_FCN (icode) (target, op0, op1);
12248 if (! pat)
12249 return 0;
12250 emit_insn (pat);
12251 return target;
12254 /* Subroutine of arm_expand_builtin to take care of unop insns. */
12256 static rtx
12257 arm_expand_unop_builtin (enum insn_code icode,
12258 tree arglist, rtx target, int do_load)
12260 rtx pat;
12261 tree arg0 = TREE_VALUE (arglist);
12262 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12263 enum machine_mode tmode = insn_data[icode].operand[0].mode;
12264 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
12266 if (! target
12267 || GET_MODE (target) != tmode
12268 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12269 target = gen_reg_rtx (tmode);
12270 if (do_load)
12271 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
12272 else
12274 if (VECTOR_MODE_P (mode0))
12275 op0 = safe_vector_operand (op0, mode0);
12277 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12278 op0 = copy_to_mode_reg (mode0, op0);
12281 pat = GEN_FCN (icode) (target, op0);
12282 if (! pat)
12283 return 0;
12284 emit_insn (pat);
12285 return target;
12288 /* Expand an expression EXP that calls a built-in function,
12289 with result going to TARGET if that's convenient
12290 (and in mode MODE if that's convenient).
12291 SUBTARGET may be used as the target for computing one of EXP's operands.
12292 IGNORE is nonzero if the value is to be ignored. */
12294 static rtx
12295 arm_expand_builtin (tree exp,
12296 rtx target,
12297 rtx subtarget ATTRIBUTE_UNUSED,
12298 enum machine_mode mode ATTRIBUTE_UNUSED,
12299 int ignore ATTRIBUTE_UNUSED)
12301 const struct builtin_description * d;
12302 enum insn_code icode;
12303 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
12304 tree arglist = TREE_OPERAND (exp, 1);
12305 tree arg0;
12306 tree arg1;
12307 tree arg2;
12308 rtx op0;
12309 rtx op1;
12310 rtx op2;
12311 rtx pat;
12312 int fcode = DECL_FUNCTION_CODE (fndecl);
12313 size_t i;
12314 enum machine_mode tmode;
12315 enum machine_mode mode0;
12316 enum machine_mode mode1;
12317 enum machine_mode mode2;
12319 switch (fcode)
12321 case ARM_BUILTIN_TEXTRMSB:
12322 case ARM_BUILTIN_TEXTRMUB:
12323 case ARM_BUILTIN_TEXTRMSH:
12324 case ARM_BUILTIN_TEXTRMUH:
12325 case ARM_BUILTIN_TEXTRMSW:
12326 case ARM_BUILTIN_TEXTRMUW:
12327 icode = (fcode == ARM_BUILTIN_TEXTRMSB ? CODE_FOR_iwmmxt_textrmsb
12328 : fcode == ARM_BUILTIN_TEXTRMUB ? CODE_FOR_iwmmxt_textrmub
12329 : fcode == ARM_BUILTIN_TEXTRMSH ? CODE_FOR_iwmmxt_textrmsh
12330 : fcode == ARM_BUILTIN_TEXTRMUH ? CODE_FOR_iwmmxt_textrmuh
12331 : CODE_FOR_iwmmxt_textrmw);
12333 arg0 = TREE_VALUE (arglist);
12334 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12335 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12336 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12337 tmode = insn_data[icode].operand[0].mode;
12338 mode0 = insn_data[icode].operand[1].mode;
12339 mode1 = insn_data[icode].operand[2].mode;
12341 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12342 op0 = copy_to_mode_reg (mode0, op0);
12343 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12345 /* @@@ better error message */
12346 error ("selector must be an immediate");
12347 return gen_reg_rtx (tmode);
12349 if (target == 0
12350 || GET_MODE (target) != tmode
12351 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12352 target = gen_reg_rtx (tmode);
12353 pat = GEN_FCN (icode) (target, op0, op1);
12354 if (! pat)
12355 return 0;
12356 emit_insn (pat);
12357 return target;
12359 case ARM_BUILTIN_TINSRB:
12360 case ARM_BUILTIN_TINSRH:
12361 case ARM_BUILTIN_TINSRW:
12362 icode = (fcode == ARM_BUILTIN_TINSRB ? CODE_FOR_iwmmxt_tinsrb
12363 : fcode == ARM_BUILTIN_TINSRH ? CODE_FOR_iwmmxt_tinsrh
12364 : CODE_FOR_iwmmxt_tinsrw);
12365 arg0 = TREE_VALUE (arglist);
12366 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12367 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
12368 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12369 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12370 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
12371 tmode = insn_data[icode].operand[0].mode;
12372 mode0 = insn_data[icode].operand[1].mode;
12373 mode1 = insn_data[icode].operand[2].mode;
12374 mode2 = insn_data[icode].operand[3].mode;
12376 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12377 op0 = copy_to_mode_reg (mode0, op0);
12378 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12379 op1 = copy_to_mode_reg (mode1, op1);
12380 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
12382 /* @@@ better error message */
12383 error ("selector must be an immediate");
12384 return const0_rtx;
12386 if (target == 0
12387 || GET_MODE (target) != tmode
12388 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12389 target = gen_reg_rtx (tmode);
12390 pat = GEN_FCN (icode) (target, op0, op1, op2);
12391 if (! pat)
12392 return 0;
12393 emit_insn (pat);
12394 return target;
12396 case ARM_BUILTIN_SETWCX:
12397 arg0 = TREE_VALUE (arglist);
12398 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12399 op0 = force_reg (SImode, expand_expr (arg0, NULL_RTX, VOIDmode, 0));
12400 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12401 emit_insn (gen_iwmmxt_tmcr (op1, op0));
12402 return 0;
12404 case ARM_BUILTIN_GETWCX:
12405 arg0 = TREE_VALUE (arglist);
12406 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12407 target = gen_reg_rtx (SImode);
12408 emit_insn (gen_iwmmxt_tmrc (target, op0));
12409 return target;
12411 case ARM_BUILTIN_WSHUFH:
12412 icode = CODE_FOR_iwmmxt_wshufh;
12413 arg0 = TREE_VALUE (arglist);
12414 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12415 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12416 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12417 tmode = insn_data[icode].operand[0].mode;
12418 mode1 = insn_data[icode].operand[1].mode;
12419 mode2 = insn_data[icode].operand[2].mode;
12421 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
12422 op0 = copy_to_mode_reg (mode1, op0);
12423 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
12425 /* @@@ better error message */
12426 error ("mask must be an immediate");
12427 return const0_rtx;
12429 if (target == 0
12430 || GET_MODE (target) != tmode
12431 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12432 target = gen_reg_rtx (tmode);
12433 pat = GEN_FCN (icode) (target, op0, op1);
12434 if (! pat)
12435 return 0;
12436 emit_insn (pat);
12437 return target;
12439 case ARM_BUILTIN_WSADB:
12440 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadb, arglist, target);
12441 case ARM_BUILTIN_WSADH:
12442 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadh, arglist, target);
12443 case ARM_BUILTIN_WSADBZ:
12444 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadbz, arglist, target);
12445 case ARM_BUILTIN_WSADHZ:
12446 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadhz, arglist, target);
12448 /* Several three-argument builtins. */
12449 case ARM_BUILTIN_WMACS:
12450 case ARM_BUILTIN_WMACU:
12451 case ARM_BUILTIN_WALIGN:
12452 case ARM_BUILTIN_TMIA:
12453 case ARM_BUILTIN_TMIAPH:
12454 case ARM_BUILTIN_TMIATT:
12455 case ARM_BUILTIN_TMIATB:
12456 case ARM_BUILTIN_TMIABT:
12457 case ARM_BUILTIN_TMIABB:
12458 icode = (fcode == ARM_BUILTIN_WMACS ? CODE_FOR_iwmmxt_wmacs
12459 : fcode == ARM_BUILTIN_WMACU ? CODE_FOR_iwmmxt_wmacu
12460 : fcode == ARM_BUILTIN_TMIA ? CODE_FOR_iwmmxt_tmia
12461 : fcode == ARM_BUILTIN_TMIAPH ? CODE_FOR_iwmmxt_tmiaph
12462 : fcode == ARM_BUILTIN_TMIABB ? CODE_FOR_iwmmxt_tmiabb
12463 : fcode == ARM_BUILTIN_TMIABT ? CODE_FOR_iwmmxt_tmiabt
12464 : fcode == ARM_BUILTIN_TMIATB ? CODE_FOR_iwmmxt_tmiatb
12465 : fcode == ARM_BUILTIN_TMIATT ? CODE_FOR_iwmmxt_tmiatt
12466 : CODE_FOR_iwmmxt_walign);
12467 arg0 = TREE_VALUE (arglist);
12468 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12469 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
12470 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12471 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12472 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
12473 tmode = insn_data[icode].operand[0].mode;
12474 mode0 = insn_data[icode].operand[1].mode;
12475 mode1 = insn_data[icode].operand[2].mode;
12476 mode2 = insn_data[icode].operand[3].mode;
12478 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12479 op0 = copy_to_mode_reg (mode0, op0);
12480 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12481 op1 = copy_to_mode_reg (mode1, op1);
12482 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
12483 op2 = copy_to_mode_reg (mode2, op2);
12484 if (target == 0
12485 || GET_MODE (target) != tmode
12486 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12487 target = gen_reg_rtx (tmode);
12488 pat = GEN_FCN (icode) (target, op0, op1, op2);
12489 if (! pat)
12490 return 0;
12491 emit_insn (pat);
12492 return target;
12494 case ARM_BUILTIN_WZERO:
12495 target = gen_reg_rtx (DImode);
12496 emit_insn (gen_iwmmxt_clrdi (target));
12497 return target;
12499 default:
12500 break;
12503 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
12504 if (d->code == (const enum arm_builtins) fcode)
12505 return arm_expand_binop_builtin (d->icode, arglist, target);
12507 for (i = 0, d = bdesc_1arg; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
12508 if (d->code == (const enum arm_builtins) fcode)
12509 return arm_expand_unop_builtin (d->icode, arglist, target, 0);
12511 /* @@@ Should really do something sensible here. */
12512 return NULL_RTX;
12515 /* Recursively search through all of the blocks in a function
12516 checking to see if any of the variables created in that
12517 function match the RTX called 'orig'. If they do then
12518 replace them with the RTX called 'new'. */
12519 static void
12520 replace_symbols_in_block (tree block, rtx orig, rtx new)
12522 for (; block; block = BLOCK_CHAIN (block))
12524 tree sym;
12526 if (!TREE_USED (block))
12527 continue;
12529 for (sym = BLOCK_VARS (block); sym; sym = TREE_CHAIN (sym))
12531 if ( (DECL_NAME (sym) == 0 && TREE_CODE (sym) != TYPE_DECL)
12532 || DECL_IGNORED_P (sym)
12533 || TREE_CODE (sym) != VAR_DECL
12534 || DECL_EXTERNAL (sym)
12535 || !rtx_equal_p (DECL_RTL (sym), orig)
12537 continue;
12539 SET_DECL_RTL (sym, new);
12542 replace_symbols_in_block (BLOCK_SUBBLOCKS (block), orig, new);
12546 /* Return the number (counting from 0) of
12547 the least significant set bit in MASK. */
12549 inline static int
12550 number_of_first_bit_set (int mask)
12552 int bit;
12554 for (bit = 0;
12555 (mask & (1 << bit)) == 0;
12556 ++bit)
12557 continue;
12559 return bit;
12562 /* Generate code to return from a thumb function.
12563 If 'reg_containing_return_addr' is -1, then the return address is
12564 actually on the stack, at the stack pointer. */
12565 static void
12566 thumb_exit (FILE *f, int reg_containing_return_addr)
12568 unsigned regs_available_for_popping;
12569 unsigned regs_to_pop;
12570 int pops_needed;
12571 unsigned available;
12572 unsigned required;
12573 int mode;
12574 int size;
12575 int restore_a4 = FALSE;
12577 /* Compute the registers we need to pop. */
12578 regs_to_pop = 0;
12579 pops_needed = 0;
12581 if (reg_containing_return_addr == -1)
12583 regs_to_pop |= 1 << LR_REGNUM;
12584 ++pops_needed;
12587 if (TARGET_BACKTRACE)
12589 /* Restore the (ARM) frame pointer and stack pointer. */
12590 regs_to_pop |= (1 << ARM_HARD_FRAME_POINTER_REGNUM) | (1 << SP_REGNUM);
12591 pops_needed += 2;
12594 /* If there is nothing to pop then just emit the BX instruction and
12595 return. */
12596 if (pops_needed == 0)
12598 if (current_function_calls_eh_return)
12599 asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, ARM_EH_STACKADJ_REGNUM);
12601 asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
12602 return;
12604 /* Otherwise if we are not supporting interworking and we have not created
12605 a backtrace structure and the function was not entered in ARM mode then
12606 just pop the return address straight into the PC. */
12607 else if (!TARGET_INTERWORK
12608 && !TARGET_BACKTRACE
12609 && !is_called_in_ARM_mode (current_function_decl)
12610 && !current_function_calls_eh_return)
12612 asm_fprintf (f, "\tpop\t{%r}\n", PC_REGNUM);
12613 return;
12616 /* Find out how many of the (return) argument registers we can corrupt. */
12617 regs_available_for_popping = 0;
12619 /* If returning via __builtin_eh_return, the bottom three registers
12620 all contain information needed for the return. */
12621 if (current_function_calls_eh_return)
12622 size = 12;
12623 else
12625 /* If we can deduce the registers used from the function's
12626 return value. This is more reliable that examining
12627 regs_ever_live[] because that will be set if the register is
12628 ever used in the function, not just if the register is used
12629 to hold a return value. */
12631 if (current_function_return_rtx != 0)
12632 mode = GET_MODE (current_function_return_rtx);
12633 else
12634 mode = DECL_MODE (DECL_RESULT (current_function_decl));
12636 size = GET_MODE_SIZE (mode);
12638 if (size == 0)
12640 /* In a void function we can use any argument register.
12641 In a function that returns a structure on the stack
12642 we can use the second and third argument registers. */
12643 if (mode == VOIDmode)
12644 regs_available_for_popping =
12645 (1 << ARG_REGISTER (1))
12646 | (1 << ARG_REGISTER (2))
12647 | (1 << ARG_REGISTER (3));
12648 else
12649 regs_available_for_popping =
12650 (1 << ARG_REGISTER (2))
12651 | (1 << ARG_REGISTER (3));
12653 else if (size <= 4)
12654 regs_available_for_popping =
12655 (1 << ARG_REGISTER (2))
12656 | (1 << ARG_REGISTER (3));
12657 else if (size <= 8)
12658 regs_available_for_popping =
12659 (1 << ARG_REGISTER (3));
12662 /* Match registers to be popped with registers into which we pop them. */
12663 for (available = regs_available_for_popping,
12664 required = regs_to_pop;
12665 required != 0 && available != 0;
12666 available &= ~(available & - available),
12667 required &= ~(required & - required))
12668 -- pops_needed;
12670 /* If we have any popping registers left over, remove them. */
12671 if (available > 0)
12672 regs_available_for_popping &= ~available;
12674 /* Otherwise if we need another popping register we can use
12675 the fourth argument register. */
12676 else if (pops_needed)
12678 /* If we have not found any free argument registers and
12679 reg a4 contains the return address, we must move it. */
12680 if (regs_available_for_popping == 0
12681 && reg_containing_return_addr == LAST_ARG_REGNUM)
12683 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM, LAST_ARG_REGNUM);
12684 reg_containing_return_addr = LR_REGNUM;
12686 else if (size > 12)
12688 /* Register a4 is being used to hold part of the return value,
12689 but we have dire need of a free, low register. */
12690 restore_a4 = TRUE;
12692 asm_fprintf (f, "\tmov\t%r, %r\n",IP_REGNUM, LAST_ARG_REGNUM);
12695 if (reg_containing_return_addr != LAST_ARG_REGNUM)
12697 /* The fourth argument register is available. */
12698 regs_available_for_popping |= 1 << LAST_ARG_REGNUM;
12700 --pops_needed;
12704 /* Pop as many registers as we can. */
12705 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
12706 regs_available_for_popping);
12708 /* Process the registers we popped. */
12709 if (reg_containing_return_addr == -1)
12711 /* The return address was popped into the lowest numbered register. */
12712 regs_to_pop &= ~(1 << LR_REGNUM);
12714 reg_containing_return_addr =
12715 number_of_first_bit_set (regs_available_for_popping);
12717 /* Remove this register for the mask of available registers, so that
12718 the return address will not be corrupted by further pops. */
12719 regs_available_for_popping &= ~(1 << reg_containing_return_addr);
12722 /* If we popped other registers then handle them here. */
12723 if (regs_available_for_popping)
12725 int frame_pointer;
12727 /* Work out which register currently contains the frame pointer. */
12728 frame_pointer = number_of_first_bit_set (regs_available_for_popping);
12730 /* Move it into the correct place. */
12731 asm_fprintf (f, "\tmov\t%r, %r\n",
12732 ARM_HARD_FRAME_POINTER_REGNUM, frame_pointer);
12734 /* (Temporarily) remove it from the mask of popped registers. */
12735 regs_available_for_popping &= ~(1 << frame_pointer);
12736 regs_to_pop &= ~(1 << ARM_HARD_FRAME_POINTER_REGNUM);
12738 if (regs_available_for_popping)
12740 int stack_pointer;
12742 /* We popped the stack pointer as well,
12743 find the register that contains it. */
12744 stack_pointer = number_of_first_bit_set (regs_available_for_popping);
12746 /* Move it into the stack register. */
12747 asm_fprintf (f, "\tmov\t%r, %r\n", SP_REGNUM, stack_pointer);
12749 /* At this point we have popped all necessary registers, so
12750 do not worry about restoring regs_available_for_popping
12751 to its correct value:
12753 assert (pops_needed == 0)
12754 assert (regs_available_for_popping == (1 << frame_pointer))
12755 assert (regs_to_pop == (1 << STACK_POINTER)) */
12757 else
12759 /* Since we have just move the popped value into the frame
12760 pointer, the popping register is available for reuse, and
12761 we know that we still have the stack pointer left to pop. */
12762 regs_available_for_popping |= (1 << frame_pointer);
12766 /* If we still have registers left on the stack, but we no longer have
12767 any registers into which we can pop them, then we must move the return
12768 address into the link register and make available the register that
12769 contained it. */
12770 if (regs_available_for_popping == 0 && pops_needed > 0)
12772 regs_available_for_popping |= 1 << reg_containing_return_addr;
12774 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM,
12775 reg_containing_return_addr);
12777 reg_containing_return_addr = LR_REGNUM;
12780 /* If we have registers left on the stack then pop some more.
12781 We know that at most we will want to pop FP and SP. */
12782 if (pops_needed > 0)
12784 int popped_into;
12785 int move_to;
12787 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
12788 regs_available_for_popping);
12790 /* We have popped either FP or SP.
12791 Move whichever one it is into the correct register. */
12792 popped_into = number_of_first_bit_set (regs_available_for_popping);
12793 move_to = number_of_first_bit_set (regs_to_pop);
12795 asm_fprintf (f, "\tmov\t%r, %r\n", move_to, popped_into);
12797 regs_to_pop &= ~(1 << move_to);
12799 --pops_needed;
12802 /* If we still have not popped everything then we must have only
12803 had one register available to us and we are now popping the SP. */
12804 if (pops_needed > 0)
12806 int popped_into;
12808 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
12809 regs_available_for_popping);
12811 popped_into = number_of_first_bit_set (regs_available_for_popping);
12813 asm_fprintf (f, "\tmov\t%r, %r\n", SP_REGNUM, popped_into);
12815 assert (regs_to_pop == (1 << STACK_POINTER))
12816 assert (pops_needed == 1)
12820 /* If necessary restore the a4 register. */
12821 if (restore_a4)
12823 if (reg_containing_return_addr != LR_REGNUM)
12825 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM, LAST_ARG_REGNUM);
12826 reg_containing_return_addr = LR_REGNUM;
12829 asm_fprintf (f, "\tmov\t%r, %r\n", LAST_ARG_REGNUM, IP_REGNUM);
12832 if (current_function_calls_eh_return)
12833 asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, ARM_EH_STACKADJ_REGNUM);
12835 /* Return to caller. */
12836 asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
12839 /* Emit code to push or pop registers to or from the stack. F is the
12840 assembly file. MASK is the registers to push or pop. PUSH is
12841 nonzero if we should push, and zero if we should pop. For debugging
12842 output, if pushing, adjust CFA_OFFSET by the amount of space added
12843 to the stack. REAL_REGS should have the same number of bits set as
12844 MASK, and will be used instead (in the same order) to describe which
12845 registers were saved - this is used to mark the save slots when we
12846 push high registers after moving them to low registers. */
12847 static void
12848 thumb_pushpop (FILE *f, int mask, int push, int *cfa_offset, int real_regs)
12850 int regno;
12851 int lo_mask = mask & 0xFF;
12852 int pushed_words = 0;
12854 if (lo_mask == 0 && !push && (mask & (1 << PC_REGNUM)))
12856 /* Special case. Do not generate a POP PC statement here, do it in
12857 thumb_exit() */
12858 thumb_exit (f, -1);
12859 return;
12862 fprintf (f, "\t%s\t{", push ? "push" : "pop");
12864 /* Look at the low registers first. */
12865 for (regno = 0; regno <= LAST_LO_REGNUM; regno++, lo_mask >>= 1)
12867 if (lo_mask & 1)
12869 asm_fprintf (f, "%r", regno);
12871 if ((lo_mask & ~1) != 0)
12872 fprintf (f, ", ");
12874 pushed_words++;
12878 if (push && (mask & (1 << LR_REGNUM)))
12880 /* Catch pushing the LR. */
12881 if (mask & 0xFF)
12882 fprintf (f, ", ");
12884 asm_fprintf (f, "%r", LR_REGNUM);
12886 pushed_words++;
12888 else if (!push && (mask & (1 << PC_REGNUM)))
12890 /* Catch popping the PC. */
12891 if (TARGET_INTERWORK || TARGET_BACKTRACE
12892 || current_function_calls_eh_return)
12894 /* The PC is never poped directly, instead
12895 it is popped into r3 and then BX is used. */
12896 fprintf (f, "}\n");
12898 thumb_exit (f, -1);
12900 return;
12902 else
12904 if (mask & 0xFF)
12905 fprintf (f, ", ");
12907 asm_fprintf (f, "%r", PC_REGNUM);
12911 fprintf (f, "}\n");
12913 if (push && pushed_words && dwarf2out_do_frame ())
12915 char *l = dwarf2out_cfi_label ();
12916 int pushed_mask = real_regs;
12918 *cfa_offset += pushed_words * 4;
12919 dwarf2out_def_cfa (l, SP_REGNUM, *cfa_offset);
12921 pushed_words = 0;
12922 pushed_mask = real_regs;
12923 for (regno = 0; regno <= 14; regno++, pushed_mask >>= 1)
12925 if (pushed_mask & 1)
12926 dwarf2out_reg_save (l, regno, 4 * pushed_words++ - *cfa_offset);
12931 void
12932 thumb_final_prescan_insn (rtx insn)
12934 if (flag_print_asm_name)
12935 asm_fprintf (asm_out_file, "%@ 0x%04x\n",
12936 INSN_ADDRESSES (INSN_UID (insn)));
12940 thumb_shiftable_const (unsigned HOST_WIDE_INT val)
12942 unsigned HOST_WIDE_INT mask = 0xff;
12943 int i;
12945 if (val == 0) /* XXX */
12946 return 0;
12948 for (i = 0; i < 25; i++)
12949 if ((val & (mask << i)) == val)
12950 return 1;
12952 return 0;
12955 /* Returns nonzero if the current function contains,
12956 or might contain a far jump. */
12957 static int
12958 thumb_far_jump_used_p (void)
12960 rtx insn;
12962 /* This test is only important for leaf functions. */
12963 /* assert (!leaf_function_p ()); */
12965 /* If we have already decided that far jumps may be used,
12966 do not bother checking again, and always return true even if
12967 it turns out that they are not being used. Once we have made
12968 the decision that far jumps are present (and that hence the link
12969 register will be pushed onto the stack) we cannot go back on it. */
12970 if (cfun->machine->far_jump_used)
12971 return 1;
12973 /* If this function is not being called from the prologue/epilogue
12974 generation code then it must be being called from the
12975 INITIAL_ELIMINATION_OFFSET macro. */
12976 if (!(ARM_DOUBLEWORD_ALIGN || reload_completed))
12978 /* In this case we know that we are being asked about the elimination
12979 of the arg pointer register. If that register is not being used,
12980 then there are no arguments on the stack, and we do not have to
12981 worry that a far jump might force the prologue to push the link
12982 register, changing the stack offsets. In this case we can just
12983 return false, since the presence of far jumps in the function will
12984 not affect stack offsets.
12986 If the arg pointer is live (or if it was live, but has now been
12987 eliminated and so set to dead) then we do have to test to see if
12988 the function might contain a far jump. This test can lead to some
12989 false negatives, since before reload is completed, then length of
12990 branch instructions is not known, so gcc defaults to returning their
12991 longest length, which in turn sets the far jump attribute to true.
12993 A false negative will not result in bad code being generated, but it
12994 will result in a needless push and pop of the link register. We
12995 hope that this does not occur too often.
12997 If we need doubleword stack alignment this could affect the other
12998 elimination offsets so we can't risk getting it wrong. */
12999 if (regs_ever_live [ARG_POINTER_REGNUM])
13000 cfun->machine->arg_pointer_live = 1;
13001 else if (!cfun->machine->arg_pointer_live)
13002 return 0;
13005 /* Check to see if the function contains a branch
13006 insn with the far jump attribute set. */
13007 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
13009 if (GET_CODE (insn) == JUMP_INSN
13010 /* Ignore tablejump patterns. */
13011 && GET_CODE (PATTERN (insn)) != ADDR_VEC
13012 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC
13013 && get_attr_far_jump (insn) == FAR_JUMP_YES
13016 /* Record the fact that we have decided that
13017 the function does use far jumps. */
13018 cfun->machine->far_jump_used = 1;
13019 return 1;
13023 return 0;
13026 /* Return nonzero if FUNC must be entered in ARM mode. */
13028 is_called_in_ARM_mode (tree func)
13030 if (TREE_CODE (func) != FUNCTION_DECL)
13031 abort ();
13033 /* Ignore the problem about functions whoes address is taken. */
13034 if (TARGET_CALLEE_INTERWORKING && TREE_PUBLIC (func))
13035 return TRUE;
13037 #ifdef ARM_PE
13038 return lookup_attribute ("interfacearm", DECL_ATTRIBUTES (func)) != NULL_TREE;
13039 #else
13040 return FALSE;
13041 #endif
13044 /* The bits which aren't usefully expanded as rtl. */
13045 const char *
13046 thumb_unexpanded_epilogue (void)
13048 int regno;
13049 int live_regs_mask = 0;
13050 int high_regs_pushed = 0;
13051 int had_to_push_lr;
13053 if (return_used_this_function)
13054 return "";
13056 if (IS_NAKED (arm_current_func_type ()))
13057 return "";
13059 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
13060 if (THUMB_REG_PUSHED_P (regno))
13061 live_regs_mask |= 1 << regno;
13063 for (regno = 8; regno < 13; regno++)
13064 if (THUMB_REG_PUSHED_P (regno))
13065 high_regs_pushed++;
13067 /* The prolog may have pushed some high registers to use as
13068 work registers. eg the testsuite file:
13069 gcc/testsuite/gcc/gcc.c-torture/execute/complex-2.c
13070 compiles to produce:
13071 push {r4, r5, r6, r7, lr}
13072 mov r7, r9
13073 mov r6, r8
13074 push {r6, r7}
13075 as part of the prolog. We have to undo that pushing here. */
13077 if (high_regs_pushed)
13079 int mask = live_regs_mask;
13080 int next_hi_reg;
13081 int size;
13082 int mode;
13084 /* If we can deduce the registers used from the function's return value.
13085 This is more reliable that examining regs_ever_live[] because that
13086 will be set if the register is ever used in the function, not just if
13087 the register is used to hold a return value. */
13089 if (current_function_return_rtx != 0)
13090 mode = GET_MODE (current_function_return_rtx);
13091 else
13092 mode = DECL_MODE (DECL_RESULT (current_function_decl));
13094 size = GET_MODE_SIZE (mode);
13096 /* Unless we are returning a type of size > 12 register r3 is
13097 available. */
13098 if (size < 13)
13099 mask |= 1 << 3;
13101 if (mask == 0)
13102 /* Oh dear! We have no low registers into which we can pop
13103 high registers! */
13104 internal_error
13105 ("no low registers available for popping high registers");
13107 for (next_hi_reg = 8; next_hi_reg < 13; next_hi_reg++)
13108 if (THUMB_REG_PUSHED_P (next_hi_reg))
13109 break;
13111 while (high_regs_pushed)
13113 /* Find lo register(s) into which the high register(s) can
13114 be popped. */
13115 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
13117 if (mask & (1 << regno))
13118 high_regs_pushed--;
13119 if (high_regs_pushed == 0)
13120 break;
13123 mask &= (2 << regno) - 1; /* A noop if regno == 8 */
13125 /* Pop the values into the low register(s). */
13126 thumb_pushpop (asm_out_file, mask, 0, NULL, mask);
13128 /* Move the value(s) into the high registers. */
13129 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
13131 if (mask & (1 << regno))
13133 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", next_hi_reg,
13134 regno);
13136 for (next_hi_reg++; next_hi_reg < 13; next_hi_reg++)
13137 if (THUMB_REG_PUSHED_P (next_hi_reg))
13138 break;
13144 had_to_push_lr = (live_regs_mask || thumb_force_lr_save ());
13146 if (TARGET_BACKTRACE
13147 && ((live_regs_mask & 0xFF) == 0)
13148 && regs_ever_live [LAST_ARG_REGNUM] != 0)
13150 /* The stack backtrace structure creation code had to
13151 push R7 in order to get a work register, so we pop
13152 it now. */
13153 live_regs_mask |= (1 << LAST_LO_REGNUM);
13156 if (current_function_pretend_args_size == 0 || TARGET_BACKTRACE)
13158 if (had_to_push_lr
13159 && !is_called_in_ARM_mode (current_function_decl))
13160 live_regs_mask |= 1 << PC_REGNUM;
13162 /* Either no argument registers were pushed or a backtrace
13163 structure was created which includes an adjusted stack
13164 pointer, so just pop everything. */
13165 if (live_regs_mask)
13166 thumb_pushpop (asm_out_file, live_regs_mask, FALSE, NULL,
13167 live_regs_mask);
13169 /* We have either just popped the return address into the
13170 PC or it is was kept in LR for the entire function or
13171 it is still on the stack because we do not want to
13172 return by doing a pop {pc}. */
13173 if ((live_regs_mask & (1 << PC_REGNUM)) == 0)
13174 thumb_exit (asm_out_file,
13175 (had_to_push_lr
13176 && is_called_in_ARM_mode (current_function_decl)) ?
13177 -1 : LR_REGNUM);
13179 else
13181 /* Pop everything but the return address. */
13182 live_regs_mask &= ~(1 << PC_REGNUM);
13184 if (live_regs_mask)
13185 thumb_pushpop (asm_out_file, live_regs_mask, FALSE, NULL,
13186 live_regs_mask);
13188 if (had_to_push_lr)
13189 /* Get the return address into a temporary register. */
13190 thumb_pushpop (asm_out_file, 1 << LAST_ARG_REGNUM, 0, NULL,
13191 1 << LAST_ARG_REGNUM);
13193 /* Remove the argument registers that were pushed onto the stack. */
13194 asm_fprintf (asm_out_file, "\tadd\t%r, %r, #%d\n",
13195 SP_REGNUM, SP_REGNUM,
13196 current_function_pretend_args_size);
13198 thumb_exit (asm_out_file,
13199 had_to_push_lr ? LAST_ARG_REGNUM : LR_REGNUM);
13202 return "";
13205 /* Functions to save and restore machine-specific function data. */
13206 static struct machine_function *
13207 arm_init_machine_status (void)
13209 struct machine_function *machine;
13210 machine = (machine_function *) ggc_alloc_cleared (sizeof (machine_function));
13212 #if ARM_FT_UNKNOWN != 0
13213 machine->func_type = ARM_FT_UNKNOWN;
13214 #endif
13215 return machine;
13218 /* Return an RTX indicating where the return address to the
13219 calling function can be found. */
13221 arm_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
13223 if (count != 0)
13224 return NULL_RTX;
13226 return get_hard_reg_initial_val (Pmode, LR_REGNUM);
13229 /* Do anything needed before RTL is emitted for each function. */
13230 void
13231 arm_init_expanders (void)
13233 /* Arrange to initialize and mark the machine per-function status. */
13234 init_machine_status = arm_init_machine_status;
13236 /* This is to stop the combine pass optimizing away the alignment
13237 adjustment of va_arg. */
13238 /* ??? It is claimed that this should not be necessary. */
13239 if (cfun)
13240 mark_reg_pointer (arg_pointer_rtx, PARM_BOUNDARY);
13244 /* Like arm_compute_initial_elimination offset. Simpler because
13245 THUMB_HARD_FRAME_POINTER isn't actually the ABI specified frame pointer. */
13247 HOST_WIDE_INT
13248 thumb_compute_initial_elimination_offset (unsigned int from, unsigned int to)
13250 arm_stack_offsets *offsets;
13252 offsets = arm_get_frame_offsets ();
13254 switch (from)
13256 case ARG_POINTER_REGNUM:
13257 switch (to)
13259 case STACK_POINTER_REGNUM:
13260 return offsets->outgoing_args - offsets->saved_args;
13262 case FRAME_POINTER_REGNUM:
13263 return offsets->soft_frame - offsets->saved_args;
13265 case THUMB_HARD_FRAME_POINTER_REGNUM:
13266 case ARM_HARD_FRAME_POINTER_REGNUM:
13267 return offsets->saved_regs - offsets->saved_args;
13269 default:
13270 abort();
13272 break;
13274 case FRAME_POINTER_REGNUM:
13275 switch (to)
13277 case STACK_POINTER_REGNUM:
13278 return offsets->outgoing_args - offsets->soft_frame;
13280 case THUMB_HARD_FRAME_POINTER_REGNUM:
13281 case ARM_HARD_FRAME_POINTER_REGNUM:
13282 return offsets->saved_regs - offsets->soft_frame;
13284 default:
13285 abort();
13287 break;
13289 default:
13290 abort ();
13295 /* Generate the rest of a function's prologue. */
13296 void
13297 thumb_expand_prologue (void)
13299 rtx insn, dwarf;
13301 HOST_WIDE_INT amount;
13302 arm_stack_offsets *offsets;
13303 unsigned long func_type;
13304 int regno;
13306 func_type = arm_current_func_type ();
13308 /* Naked functions don't have prologues. */
13309 if (IS_NAKED (func_type))
13310 return;
13312 if (IS_INTERRUPT (func_type))
13314 error ("interrupt Service Routines cannot be coded in Thumb mode");
13315 return;
13318 offsets = arm_get_frame_offsets ();
13320 if (frame_pointer_needed)
13322 insn = emit_insn (gen_movsi (hard_frame_pointer_rtx,
13323 stack_pointer_rtx));
13324 RTX_FRAME_RELATED_P (insn) = 1;
13327 amount = offsets->outgoing_args - offsets->saved_regs;
13328 if (amount)
13330 if (amount < 512)
13332 insn = emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
13333 GEN_INT (- amount)));
13334 RTX_FRAME_RELATED_P (insn) = 1;
13336 else
13338 rtx reg;
13340 /* The stack decrement is too big for an immediate value in a single
13341 insn. In theory we could issue multiple subtracts, but after
13342 three of them it becomes more space efficient to place the full
13343 value in the constant pool and load into a register. (Also the
13344 ARM debugger really likes to see only one stack decrement per
13345 function). So instead we look for a scratch register into which
13346 we can load the decrement, and then we subtract this from the
13347 stack pointer. Unfortunately on the thumb the only available
13348 scratch registers are the argument registers, and we cannot use
13349 these as they may hold arguments to the function. Instead we
13350 attempt to locate a call preserved register which is used by this
13351 function. If we can find one, then we know that it will have
13352 been pushed at the start of the prologue and so we can corrupt
13353 it now. */
13354 for (regno = LAST_ARG_REGNUM + 1; regno <= LAST_LO_REGNUM; regno++)
13355 if (THUMB_REG_PUSHED_P (regno)
13356 && !(frame_pointer_needed
13357 && (regno == THUMB_HARD_FRAME_POINTER_REGNUM)))
13358 break;
13360 if (regno > LAST_LO_REGNUM) /* Very unlikely. */
13362 rtx spare = gen_rtx_REG (SImode, IP_REGNUM);
13364 /* Choose an arbitrary, non-argument low register. */
13365 reg = gen_rtx_REG (SImode, LAST_LO_REGNUM);
13367 /* Save it by copying it into a high, scratch register. */
13368 emit_insn (gen_movsi (spare, reg));
13369 /* Add a USE to stop propagate_one_insn() from barfing. */
13370 emit_insn (gen_prologue_use (spare));
13372 /* Decrement the stack. */
13373 emit_insn (gen_movsi (reg, GEN_INT (- amount)));
13374 insn = emit_insn (gen_addsi3 (stack_pointer_rtx,
13375 stack_pointer_rtx, reg));
13376 RTX_FRAME_RELATED_P (insn) = 1;
13377 dwarf = gen_rtx_SET (SImode, stack_pointer_rtx,
13378 plus_constant (stack_pointer_rtx,
13379 -amount));
13380 RTX_FRAME_RELATED_P (dwarf) = 1;
13381 REG_NOTES (insn)
13382 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
13383 REG_NOTES (insn));
13385 /* Restore the low register's original value. */
13386 emit_insn (gen_movsi (reg, spare));
13388 /* Emit a USE of the restored scratch register, so that flow
13389 analysis will not consider the restore redundant. The
13390 register won't be used again in this function and isn't
13391 restored by the epilogue. */
13392 emit_insn (gen_prologue_use (reg));
13394 else
13396 reg = gen_rtx_REG (SImode, regno);
13398 emit_insn (gen_movsi (reg, GEN_INT (- amount)));
13400 insn = emit_insn (gen_addsi3 (stack_pointer_rtx,
13401 stack_pointer_rtx, reg));
13402 RTX_FRAME_RELATED_P (insn) = 1;
13403 dwarf = gen_rtx_SET (SImode, stack_pointer_rtx,
13404 plus_constant (stack_pointer_rtx,
13405 -amount));
13406 RTX_FRAME_RELATED_P (dwarf) = 1;
13407 REG_NOTES (insn)
13408 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
13409 REG_NOTES (insn));
13412 /* If the frame pointer is needed, emit a special barrier that
13413 will prevent the scheduler from moving stores to the frame
13414 before the stack adjustment. */
13415 if (frame_pointer_needed)
13416 emit_insn (gen_stack_tie (stack_pointer_rtx,
13417 hard_frame_pointer_rtx));
13420 if (current_function_profile || TARGET_NO_SCHED_PRO)
13421 emit_insn (gen_blockage ());
13423 cfun->machine->lr_save_eliminated = !thumb_force_lr_save ();
13424 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
13426 if (THUMB_REG_PUSHED_P (regno))
13428 cfun->machine->lr_save_eliminated = 0;
13429 break;
13433 /* If the link register is being kept alive, with the return address in it,
13434 then make sure that it does not get reused by the ce2 pass. */
13435 if (cfun->machine->lr_save_eliminated)
13436 emit_insn (gen_prologue_use (gen_rtx_REG (SImode, LR_REGNUM)));
13439 void
13440 thumb_expand_epilogue (void)
13442 HOST_WIDE_INT amount;
13443 arm_stack_offsets *offsets;
13444 int regno;
13446 /* Naked functions don't have prologues. */
13447 if (IS_NAKED (arm_current_func_type ()))
13448 return;
13450 offsets = arm_get_frame_offsets ();
13451 amount = offsets->outgoing_args - offsets->saved_regs;
13453 if (frame_pointer_needed)
13454 emit_insn (gen_movsi (stack_pointer_rtx, hard_frame_pointer_rtx));
13455 else if (amount)
13457 if (amount < 512)
13458 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
13459 GEN_INT (amount)));
13460 else
13462 /* r3 is always free in the epilogue. */
13463 rtx reg = gen_rtx_REG (SImode, LAST_ARG_REGNUM);
13465 emit_insn (gen_movsi (reg, GEN_INT (amount)));
13466 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, reg));
13470 /* Emit a USE (stack_pointer_rtx), so that
13471 the stack adjustment will not be deleted. */
13472 emit_insn (gen_prologue_use (stack_pointer_rtx));
13474 if (current_function_profile || TARGET_NO_SCHED_PRO)
13475 emit_insn (gen_blockage ());
13477 /* Emit a clobber for each insn that will be restored in the epilogue,
13478 so that flow2 will get register lifetimes correct. */
13479 for (regno = 0; regno < 13; regno++)
13480 if (regs_ever_live[regno] && !call_used_regs[regno])
13481 emit_insn (gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, regno)));
13483 if (! regs_ever_live[LR_REGNUM])
13484 emit_insn (gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, LR_REGNUM)));
13487 static void
13488 thumb_output_function_prologue (FILE *f, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
13490 int live_regs_mask = 0;
13491 int high_regs_pushed = 0;
13492 int cfa_offset = 0;
13493 int regno;
13495 if (IS_NAKED (arm_current_func_type ()))
13496 return;
13498 if (is_called_in_ARM_mode (current_function_decl))
13500 const char * name;
13502 if (GET_CODE (DECL_RTL (current_function_decl)) != MEM)
13503 abort ();
13504 if (GET_CODE (XEXP (DECL_RTL (current_function_decl), 0)) != SYMBOL_REF)
13505 abort ();
13506 name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
13508 /* Generate code sequence to switch us into Thumb mode. */
13509 /* The .code 32 directive has already been emitted by
13510 ASM_DECLARE_FUNCTION_NAME. */
13511 asm_fprintf (f, "\torr\t%r, %r, #1\n", IP_REGNUM, PC_REGNUM);
13512 asm_fprintf (f, "\tbx\t%r\n", IP_REGNUM);
13514 /* Generate a label, so that the debugger will notice the
13515 change in instruction sets. This label is also used by
13516 the assembler to bypass the ARM code when this function
13517 is called from a Thumb encoded function elsewhere in the
13518 same file. Hence the definition of STUB_NAME here must
13519 agree with the definition in gas/config/tc-arm.c. */
13521 #define STUB_NAME ".real_start_of"
13523 fprintf (f, "\t.code\t16\n");
13524 #ifdef ARM_PE
13525 if (arm_dllexport_name_p (name))
13526 name = arm_strip_name_encoding (name);
13527 #endif
13528 asm_fprintf (f, "\t.globl %s%U%s\n", STUB_NAME, name);
13529 fprintf (f, "\t.thumb_func\n");
13530 asm_fprintf (f, "%s%U%s:\n", STUB_NAME, name);
13533 if (current_function_pretend_args_size)
13535 if (cfun->machine->uses_anonymous_args)
13537 int num_pushes;
13539 fprintf (f, "\tpush\t{");
13541 num_pushes = ARM_NUM_INTS (current_function_pretend_args_size);
13543 for (regno = LAST_ARG_REGNUM + 1 - num_pushes;
13544 regno <= LAST_ARG_REGNUM;
13545 regno++)
13546 asm_fprintf (f, "%r%s", regno,
13547 regno == LAST_ARG_REGNUM ? "" : ", ");
13549 fprintf (f, "}\n");
13551 else
13552 asm_fprintf (f, "\tsub\t%r, %r, #%d\n",
13553 SP_REGNUM, SP_REGNUM,
13554 current_function_pretend_args_size);
13556 /* We don't need to record the stores for unwinding (would it
13557 help the debugger any if we did?), but record the change in
13558 the stack pointer. */
13559 if (dwarf2out_do_frame ())
13561 char *l = dwarf2out_cfi_label ();
13562 cfa_offset = cfa_offset + current_function_pretend_args_size;
13563 dwarf2out_def_cfa (l, SP_REGNUM, cfa_offset);
13567 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
13568 if (THUMB_REG_PUSHED_P (regno))
13569 live_regs_mask |= 1 << regno;
13571 if (live_regs_mask || thumb_force_lr_save ())
13572 live_regs_mask |= 1 << LR_REGNUM;
13574 if (TARGET_BACKTRACE)
13576 int offset;
13577 int work_register = 0;
13578 int wr;
13580 /* We have been asked to create a stack backtrace structure.
13581 The code looks like this:
13583 0 .align 2
13584 0 func:
13585 0 sub SP, #16 Reserve space for 4 registers.
13586 2 push {R7} Get a work register.
13587 4 add R7, SP, #20 Get the stack pointer before the push.
13588 6 str R7, [SP, #8] Store the stack pointer (before reserving the space).
13589 8 mov R7, PC Get hold of the start of this code plus 12.
13590 10 str R7, [SP, #16] Store it.
13591 12 mov R7, FP Get hold of the current frame pointer.
13592 14 str R7, [SP, #4] Store it.
13593 16 mov R7, LR Get hold of the current return address.
13594 18 str R7, [SP, #12] Store it.
13595 20 add R7, SP, #16 Point at the start of the backtrace structure.
13596 22 mov FP, R7 Put this value into the frame pointer. */
13598 if ((live_regs_mask & 0xFF) == 0)
13600 /* See if the a4 register is free. */
13602 if (regs_ever_live [LAST_ARG_REGNUM] == 0)
13603 work_register = LAST_ARG_REGNUM;
13604 else /* We must push a register of our own. */
13605 live_regs_mask |= (1 << LAST_LO_REGNUM);
13608 if (work_register == 0)
13610 /* Select a register from the list that will be pushed to
13611 use as our work register. */
13612 for (work_register = (LAST_LO_REGNUM + 1); work_register--;)
13613 if ((1 << work_register) & live_regs_mask)
13614 break;
13617 asm_fprintf
13618 (f, "\tsub\t%r, %r, #16\t%@ Create stack backtrace structure\n",
13619 SP_REGNUM, SP_REGNUM);
13621 if (dwarf2out_do_frame ())
13623 char *l = dwarf2out_cfi_label ();
13624 cfa_offset = cfa_offset + 16;
13625 dwarf2out_def_cfa (l, SP_REGNUM, cfa_offset);
13628 if (live_regs_mask)
13629 thumb_pushpop (f, live_regs_mask, 1, &cfa_offset, live_regs_mask);
13631 for (offset = 0, wr = 1 << 15; wr != 0; wr >>= 1)
13632 if (wr & live_regs_mask)
13633 offset += 4;
13635 asm_fprintf (f, "\tadd\t%r, %r, #%d\n", work_register, SP_REGNUM,
13636 offset + 16 + current_function_pretend_args_size);
13638 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13639 offset + 4);
13641 /* Make sure that the instruction fetching the PC is in the right place
13642 to calculate "start of backtrace creation code + 12". */
13643 if (live_regs_mask)
13645 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, PC_REGNUM);
13646 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13647 offset + 12);
13648 asm_fprintf (f, "\tmov\t%r, %r\n", work_register,
13649 ARM_HARD_FRAME_POINTER_REGNUM);
13650 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13651 offset);
13653 else
13655 asm_fprintf (f, "\tmov\t%r, %r\n", work_register,
13656 ARM_HARD_FRAME_POINTER_REGNUM);
13657 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13658 offset);
13659 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, PC_REGNUM);
13660 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13661 offset + 12);
13664 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, LR_REGNUM);
13665 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13666 offset + 8);
13667 asm_fprintf (f, "\tadd\t%r, %r, #%d\n", work_register, SP_REGNUM,
13668 offset + 12);
13669 asm_fprintf (f, "\tmov\t%r, %r\t\t%@ Backtrace structure created\n",
13670 ARM_HARD_FRAME_POINTER_REGNUM, work_register);
13672 else if (live_regs_mask)
13673 thumb_pushpop (f, live_regs_mask, 1, &cfa_offset, live_regs_mask);
13675 for (regno = 8; regno < 13; regno++)
13676 if (THUMB_REG_PUSHED_P (regno))
13677 high_regs_pushed++;
13679 if (high_regs_pushed)
13681 int pushable_regs = 0;
13682 int mask = live_regs_mask & 0xff;
13683 int next_hi_reg;
13685 for (next_hi_reg = 12; next_hi_reg > LAST_LO_REGNUM; next_hi_reg--)
13686 if (THUMB_REG_PUSHED_P (next_hi_reg))
13687 break;
13689 pushable_regs = mask;
13691 if (pushable_regs == 0)
13693 /* Desperation time -- this probably will never happen. */
13694 if (THUMB_REG_PUSHED_P (LAST_ARG_REGNUM))
13695 asm_fprintf (f, "\tmov\t%r, %r\n", IP_REGNUM, LAST_ARG_REGNUM);
13696 mask = 1 << LAST_ARG_REGNUM;
13699 while (high_regs_pushed > 0)
13701 int real_regs_mask = 0;
13703 for (regno = LAST_LO_REGNUM; regno >= 0; regno--)
13705 if (mask & (1 << regno))
13707 asm_fprintf (f, "\tmov\t%r, %r\n", regno, next_hi_reg);
13709 high_regs_pushed--;
13710 real_regs_mask |= (1 << next_hi_reg);
13712 if (high_regs_pushed)
13714 for (next_hi_reg--; next_hi_reg > LAST_LO_REGNUM;
13715 next_hi_reg--)
13716 if (THUMB_REG_PUSHED_P (next_hi_reg))
13717 break;
13719 else
13721 mask &= ~((1 << regno) - 1);
13722 break;
13727 thumb_pushpop (f, mask, 1, &cfa_offset, real_regs_mask);
13730 if (pushable_regs == 0
13731 && (THUMB_REG_PUSHED_P (LAST_ARG_REGNUM)))
13732 asm_fprintf (f, "\tmov\t%r, %r\n", LAST_ARG_REGNUM, IP_REGNUM);
13736 /* Handle the case of a double word load into a low register from
13737 a computed memory address. The computed address may involve a
13738 register which is overwritten by the load. */
13739 const char *
13740 thumb_load_double_from_address (rtx *operands)
13742 rtx addr;
13743 rtx base;
13744 rtx offset;
13745 rtx arg1;
13746 rtx arg2;
13748 if (GET_CODE (operands[0]) != REG)
13749 abort ();
13751 if (GET_CODE (operands[1]) != MEM)
13752 abort ();
13754 /* Get the memory address. */
13755 addr = XEXP (operands[1], 0);
13757 /* Work out how the memory address is computed. */
13758 switch (GET_CODE (addr))
13760 case REG:
13761 operands[2] = gen_rtx_MEM (SImode,
13762 plus_constant (XEXP (operands[1], 0), 4));
13764 if (REGNO (operands[0]) == REGNO (addr))
13766 output_asm_insn ("ldr\t%H0, %2", operands);
13767 output_asm_insn ("ldr\t%0, %1", operands);
13769 else
13771 output_asm_insn ("ldr\t%0, %1", operands);
13772 output_asm_insn ("ldr\t%H0, %2", operands);
13774 break;
13776 case CONST:
13777 /* Compute <address> + 4 for the high order load. */
13778 operands[2] = gen_rtx_MEM (SImode,
13779 plus_constant (XEXP (operands[1], 0), 4));
13781 output_asm_insn ("ldr\t%0, %1", operands);
13782 output_asm_insn ("ldr\t%H0, %2", operands);
13783 break;
13785 case PLUS:
13786 arg1 = XEXP (addr, 0);
13787 arg2 = XEXP (addr, 1);
13789 if (CONSTANT_P (arg1))
13790 base = arg2, offset = arg1;
13791 else
13792 base = arg1, offset = arg2;
13794 if (GET_CODE (base) != REG)
13795 abort ();
13797 /* Catch the case of <address> = <reg> + <reg> */
13798 if (GET_CODE (offset) == REG)
13800 int reg_offset = REGNO (offset);
13801 int reg_base = REGNO (base);
13802 int reg_dest = REGNO (operands[0]);
13804 /* Add the base and offset registers together into the
13805 higher destination register. */
13806 asm_fprintf (asm_out_file, "\tadd\t%r, %r, %r",
13807 reg_dest + 1, reg_base, reg_offset);
13809 /* Load the lower destination register from the address in
13810 the higher destination register. */
13811 asm_fprintf (asm_out_file, "\tldr\t%r, [%r, #0]",
13812 reg_dest, reg_dest + 1);
13814 /* Load the higher destination register from its own address
13815 plus 4. */
13816 asm_fprintf (asm_out_file, "\tldr\t%r, [%r, #4]",
13817 reg_dest + 1, reg_dest + 1);
13819 else
13821 /* Compute <address> + 4 for the high order load. */
13822 operands[2] = gen_rtx_MEM (SImode,
13823 plus_constant (XEXP (operands[1], 0), 4));
13825 /* If the computed address is held in the low order register
13826 then load the high order register first, otherwise always
13827 load the low order register first. */
13828 if (REGNO (operands[0]) == REGNO (base))
13830 output_asm_insn ("ldr\t%H0, %2", operands);
13831 output_asm_insn ("ldr\t%0, %1", operands);
13833 else
13835 output_asm_insn ("ldr\t%0, %1", operands);
13836 output_asm_insn ("ldr\t%H0, %2", operands);
13839 break;
13841 case LABEL_REF:
13842 /* With no registers to worry about we can just load the value
13843 directly. */
13844 operands[2] = gen_rtx_MEM (SImode,
13845 plus_constant (XEXP (operands[1], 0), 4));
13847 output_asm_insn ("ldr\t%H0, %2", operands);
13848 output_asm_insn ("ldr\t%0, %1", operands);
13849 break;
13851 default:
13852 abort ();
13853 break;
13856 return "";
13859 const char *
13860 thumb_output_move_mem_multiple (int n, rtx *operands)
13862 rtx tmp;
13864 switch (n)
13866 case 2:
13867 if (REGNO (operands[4]) > REGNO (operands[5]))
13869 tmp = operands[4];
13870 operands[4] = operands[5];
13871 operands[5] = tmp;
13873 output_asm_insn ("ldmia\t%1!, {%4, %5}", operands);
13874 output_asm_insn ("stmia\t%0!, {%4, %5}", operands);
13875 break;
13877 case 3:
13878 if (REGNO (operands[4]) > REGNO (operands[5]))
13880 tmp = operands[4];
13881 operands[4] = operands[5];
13882 operands[5] = tmp;
13884 if (REGNO (operands[5]) > REGNO (operands[6]))
13886 tmp = operands[5];
13887 operands[5] = operands[6];
13888 operands[6] = tmp;
13890 if (REGNO (operands[4]) > REGNO (operands[5]))
13892 tmp = operands[4];
13893 operands[4] = operands[5];
13894 operands[5] = tmp;
13897 output_asm_insn ("ldmia\t%1!, {%4, %5, %6}", operands);
13898 output_asm_insn ("stmia\t%0!, {%4, %5, %6}", operands);
13899 break;
13901 default:
13902 abort ();
13905 return "";
13908 /* Routines for generating rtl. */
13909 void
13910 thumb_expand_movmemqi (rtx *operands)
13912 rtx out = copy_to_mode_reg (SImode, XEXP (operands[0], 0));
13913 rtx in = copy_to_mode_reg (SImode, XEXP (operands[1], 0));
13914 HOST_WIDE_INT len = INTVAL (operands[2]);
13915 HOST_WIDE_INT offset = 0;
13917 while (len >= 12)
13919 emit_insn (gen_movmem12b (out, in, out, in));
13920 len -= 12;
13923 if (len >= 8)
13925 emit_insn (gen_movmem8b (out, in, out, in));
13926 len -= 8;
13929 if (len >= 4)
13931 rtx reg = gen_reg_rtx (SImode);
13932 emit_insn (gen_movsi (reg, gen_rtx_MEM (SImode, in)));
13933 emit_insn (gen_movsi (gen_rtx_MEM (SImode, out), reg));
13934 len -= 4;
13935 offset += 4;
13938 if (len >= 2)
13940 rtx reg = gen_reg_rtx (HImode);
13941 emit_insn (gen_movhi (reg, gen_rtx_MEM (HImode,
13942 plus_constant (in, offset))));
13943 emit_insn (gen_movhi (gen_rtx_MEM (HImode, plus_constant (out, offset)),
13944 reg));
13945 len -= 2;
13946 offset += 2;
13949 if (len)
13951 rtx reg = gen_reg_rtx (QImode);
13952 emit_insn (gen_movqi (reg, gen_rtx_MEM (QImode,
13953 plus_constant (in, offset))));
13954 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (out, offset)),
13955 reg));
13960 thumb_cmp_operand (rtx op, enum machine_mode mode)
13962 return ((GET_CODE (op) == CONST_INT
13963 && INTVAL (op) < 256
13964 && INTVAL (op) >= 0)
13965 || s_register_operand (op, mode));
13969 thumb_cmpneg_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
13971 return (GET_CODE (op) == CONST_INT
13972 && INTVAL (op) < 0
13973 && INTVAL (op) > -256);
13976 /* Return TRUE if a result can be stored in OP without clobbering the
13977 condition code register. Prior to reload we only accept a
13978 register. After reload we have to be able to handle memory as
13979 well, since a pseudo may not get a hard reg and reload cannot
13980 handle output-reloads on jump insns.
13982 We could possibly handle mem before reload as well, but that might
13983 complicate things with the need to handle increment
13984 side-effects. */
13987 thumb_cbrch_target_operand (rtx op, enum machine_mode mode)
13989 return (s_register_operand (op, mode)
13990 || ((reload_in_progress || reload_completed)
13991 && memory_operand (op, mode)));
13994 /* Handle storing a half-word to memory during reload. */
13995 void
13996 thumb_reload_out_hi (rtx *operands)
13998 emit_insn (gen_thumb_movhi_clobber (operands[0], operands[1], operands[2]));
14001 /* Handle reading a half-word from memory during reload. */
14002 void
14003 thumb_reload_in_hi (rtx *operands ATTRIBUTE_UNUSED)
14005 abort ();
14008 /* Return the length of a function name prefix
14009 that starts with the character 'c'. */
14010 static int
14011 arm_get_strip_length (int c)
14013 switch (c)
14015 ARM_NAME_ENCODING_LENGTHS
14016 default: return 0;
14020 /* Return a pointer to a function's name with any
14021 and all prefix encodings stripped from it. */
14022 const char *
14023 arm_strip_name_encoding (const char *name)
14025 int skip;
14027 while ((skip = arm_get_strip_length (* name)))
14028 name += skip;
14030 return name;
14033 /* If there is a '*' anywhere in the name's prefix, then
14034 emit the stripped name verbatim, otherwise prepend an
14035 underscore if leading underscores are being used. */
14036 void
14037 arm_asm_output_labelref (FILE *stream, const char *name)
14039 int skip;
14040 int verbatim = 0;
14042 while ((skip = arm_get_strip_length (* name)))
14044 verbatim |= (*name == '*');
14045 name += skip;
14048 if (verbatim)
14049 fputs (name, stream);
14050 else
14051 asm_fprintf (stream, "%U%s", name);
14054 rtx aof_pic_label;
14056 #ifdef AOF_ASSEMBLER
14057 /* Special functions only needed when producing AOF syntax assembler. */
14059 struct pic_chain
14061 struct pic_chain * next;
14062 const char * symname;
14065 static struct pic_chain * aof_pic_chain = NULL;
14068 aof_pic_entry (rtx x)
14070 struct pic_chain ** chainp;
14071 int offset;
14073 if (aof_pic_label == NULL_RTX)
14075 aof_pic_label = gen_rtx_SYMBOL_REF (Pmode, "x$adcons");
14078 for (offset = 0, chainp = &aof_pic_chain; *chainp;
14079 offset += 4, chainp = &(*chainp)->next)
14080 if ((*chainp)->symname == XSTR (x, 0))
14081 return plus_constant (aof_pic_label, offset);
14083 *chainp = (struct pic_chain *) xmalloc (sizeof (struct pic_chain));
14084 (*chainp)->next = NULL;
14085 (*chainp)->symname = XSTR (x, 0);
14086 return plus_constant (aof_pic_label, offset);
14089 void
14090 aof_dump_pic_table (FILE *f)
14092 struct pic_chain * chain;
14094 if (aof_pic_chain == NULL)
14095 return;
14097 asm_fprintf (f, "\tAREA |%r$$adcons|, BASED %r\n",
14098 PIC_OFFSET_TABLE_REGNUM,
14099 PIC_OFFSET_TABLE_REGNUM);
14100 fputs ("|x$adcons|\n", f);
14102 for (chain = aof_pic_chain; chain; chain = chain->next)
14104 fputs ("\tDCD\t", f);
14105 assemble_name (f, chain->symname);
14106 fputs ("\n", f);
14110 int arm_text_section_count = 1;
14112 char *
14113 aof_text_section (void )
14115 static char buf[100];
14116 sprintf (buf, "\tAREA |C$$code%d|, CODE, READONLY",
14117 arm_text_section_count++);
14118 if (flag_pic)
14119 strcat (buf, ", PIC, REENTRANT");
14120 return buf;
14123 static int arm_data_section_count = 1;
14125 char *
14126 aof_data_section (void)
14128 static char buf[100];
14129 sprintf (buf, "\tAREA |C$$data%d|, DATA", arm_data_section_count++);
14130 return buf;
14133 /* The AOF assembler is religiously strict about declarations of
14134 imported and exported symbols, so that it is impossible to declare
14135 a function as imported near the beginning of the file, and then to
14136 export it later on. It is, however, possible to delay the decision
14137 until all the functions in the file have been compiled. To get
14138 around this, we maintain a list of the imports and exports, and
14139 delete from it any that are subsequently defined. At the end of
14140 compilation we spit the remainder of the list out before the END
14141 directive. */
14143 struct import
14145 struct import * next;
14146 const char * name;
14149 static struct import * imports_list = NULL;
14151 void
14152 aof_add_import (const char *name)
14154 struct import * new;
14156 for (new = imports_list; new; new = new->next)
14157 if (new->name == name)
14158 return;
14160 new = (struct import *) xmalloc (sizeof (struct import));
14161 new->next = imports_list;
14162 imports_list = new;
14163 new->name = name;
14166 void
14167 aof_delete_import (const char *name)
14169 struct import ** old;
14171 for (old = &imports_list; *old; old = & (*old)->next)
14173 if ((*old)->name == name)
14175 *old = (*old)->next;
14176 return;
14181 int arm_main_function = 0;
14183 static void
14184 aof_dump_imports (FILE *f)
14186 /* The AOF assembler needs this to cause the startup code to be extracted
14187 from the library. Brining in __main causes the whole thing to work
14188 automagically. */
14189 if (arm_main_function)
14191 text_section ();
14192 fputs ("\tIMPORT __main\n", f);
14193 fputs ("\tDCD __main\n", f);
14196 /* Now dump the remaining imports. */
14197 while (imports_list)
14199 fprintf (f, "\tIMPORT\t");
14200 assemble_name (f, imports_list->name);
14201 fputc ('\n', f);
14202 imports_list = imports_list->next;
14206 static void
14207 aof_globalize_label (FILE *stream, const char *name)
14209 default_globalize_label (stream, name);
14210 if (! strcmp (name, "main"))
14211 arm_main_function = 1;
14214 static void
14215 aof_file_start (void)
14217 fputs ("__r0\tRN\t0\n", asm_out_file);
14218 fputs ("__a1\tRN\t0\n", asm_out_file);
14219 fputs ("__a2\tRN\t1\n", asm_out_file);
14220 fputs ("__a3\tRN\t2\n", asm_out_file);
14221 fputs ("__a4\tRN\t3\n", asm_out_file);
14222 fputs ("__v1\tRN\t4\n", asm_out_file);
14223 fputs ("__v2\tRN\t5\n", asm_out_file);
14224 fputs ("__v3\tRN\t6\n", asm_out_file);
14225 fputs ("__v4\tRN\t7\n", asm_out_file);
14226 fputs ("__v5\tRN\t8\n", asm_out_file);
14227 fputs ("__v6\tRN\t9\n", asm_out_file);
14228 fputs ("__sl\tRN\t10\n", asm_out_file);
14229 fputs ("__fp\tRN\t11\n", asm_out_file);
14230 fputs ("__ip\tRN\t12\n", asm_out_file);
14231 fputs ("__sp\tRN\t13\n", asm_out_file);
14232 fputs ("__lr\tRN\t14\n", asm_out_file);
14233 fputs ("__pc\tRN\t15\n", asm_out_file);
14234 fputs ("__f0\tFN\t0\n", asm_out_file);
14235 fputs ("__f1\tFN\t1\n", asm_out_file);
14236 fputs ("__f2\tFN\t2\n", asm_out_file);
14237 fputs ("__f3\tFN\t3\n", asm_out_file);
14238 fputs ("__f4\tFN\t4\n", asm_out_file);
14239 fputs ("__f5\tFN\t5\n", asm_out_file);
14240 fputs ("__f6\tFN\t6\n", asm_out_file);
14241 fputs ("__f7\tFN\t7\n", asm_out_file);
14242 text_section ();
14245 static void
14246 aof_file_end (void)
14248 if (flag_pic)
14249 aof_dump_pic_table (asm_out_file);
14250 aof_dump_imports (asm_out_file);
14251 fputs ("\tEND\n", asm_out_file);
14253 #endif /* AOF_ASSEMBLER */
14255 #ifdef OBJECT_FORMAT_ELF
14256 /* Switch to an arbitrary section NAME with attributes as specified
14257 by FLAGS. ALIGN specifies any known alignment requirements for
14258 the section; 0 if the default should be used.
14260 Differs from the default elf version only in the prefix character
14261 used before the section type. */
14263 static void
14264 arm_elf_asm_named_section (const char *name, unsigned int flags)
14266 char flagchars[10], *f = flagchars;
14268 if (! named_section_first_declaration (name))
14270 fprintf (asm_out_file, "\t.section\t%s\n", name);
14271 return;
14274 if (!(flags & SECTION_DEBUG))
14275 *f++ = 'a';
14276 if (flags & SECTION_WRITE)
14277 *f++ = 'w';
14278 if (flags & SECTION_CODE)
14279 *f++ = 'x';
14280 if (flags & SECTION_SMALL)
14281 *f++ = 's';
14282 if (flags & SECTION_MERGE)
14283 *f++ = 'M';
14284 if (flags & SECTION_STRINGS)
14285 *f++ = 'S';
14286 if (flags & SECTION_TLS)
14287 *f++ = 'T';
14288 *f = '\0';
14290 fprintf (asm_out_file, "\t.section\t%s,\"%s\"", name, flagchars);
14292 if (!(flags & SECTION_NOTYPE))
14294 const char *type;
14296 if (flags & SECTION_BSS)
14297 type = "nobits";
14298 else
14299 type = "progbits";
14301 fprintf (asm_out_file, ",%%%s", type);
14303 if (flags & SECTION_ENTSIZE)
14304 fprintf (asm_out_file, ",%d", flags & SECTION_ENTSIZE);
14307 putc ('\n', asm_out_file);
14309 #endif
14311 #ifndef ARM_PE
14312 /* Symbols in the text segment can be accessed without indirecting via the
14313 constant pool; it may take an extra binary operation, but this is still
14314 faster than indirecting via memory. Don't do this when not optimizing,
14315 since we won't be calculating al of the offsets necessary to do this
14316 simplification. */
14318 static void
14319 arm_encode_section_info (tree decl, rtx rtl, int first)
14321 /* This doesn't work with AOF syntax, since the string table may be in
14322 a different AREA. */
14323 #ifndef AOF_ASSEMBLER
14324 if (optimize > 0 && TREE_CONSTANT (decl))
14325 SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
14326 #endif
14328 /* If we are referencing a function that is weak then encode a long call
14329 flag in the function name, otherwise if the function is static or
14330 or known to be defined in this file then encode a short call flag. */
14331 if (first && TREE_CODE_CLASS (TREE_CODE (decl)) == 'd')
14333 if (TREE_CODE (decl) == FUNCTION_DECL && DECL_WEAK (decl))
14334 arm_encode_call_attribute (decl, LONG_CALL_FLAG_CHAR);
14335 else if (! TREE_PUBLIC (decl))
14336 arm_encode_call_attribute (decl, SHORT_CALL_FLAG_CHAR);
14339 #endif /* !ARM_PE */
14341 static void
14342 arm_internal_label (FILE *stream, const char *prefix, unsigned long labelno)
14344 if (arm_ccfsm_state == 3 && (unsigned) arm_target_label == labelno
14345 && !strcmp (prefix, "L"))
14347 arm_ccfsm_state = 0;
14348 arm_target_insn = NULL;
14350 default_internal_label (stream, prefix, labelno);
14353 /* Output code to add DELTA to the first argument, and then jump
14354 to FUNCTION. Used for C++ multiple inheritance. */
14355 static void
14356 arm_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
14357 HOST_WIDE_INT delta,
14358 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
14359 tree function)
14361 static int thunk_label = 0;
14362 char label[256];
14363 int mi_delta = delta;
14364 const char *const mi_op = mi_delta < 0 ? "sub" : "add";
14365 int shift = 0;
14366 int this_regno = (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function)
14367 ? 1 : 0);
14368 if (mi_delta < 0)
14369 mi_delta = - mi_delta;
14370 if (TARGET_THUMB)
14372 int labelno = thunk_label++;
14373 ASM_GENERATE_INTERNAL_LABEL (label, "LTHUMBFUNC", labelno);
14374 fputs ("\tldr\tr12, ", file);
14375 assemble_name (file, label);
14376 fputc ('\n', file);
14378 while (mi_delta != 0)
14380 if ((mi_delta & (3 << shift)) == 0)
14381 shift += 2;
14382 else
14384 asm_fprintf (file, "\t%s\t%r, %r, #%d\n",
14385 mi_op, this_regno, this_regno,
14386 mi_delta & (0xff << shift));
14387 mi_delta &= ~(0xff << shift);
14388 shift += 8;
14391 if (TARGET_THUMB)
14393 fprintf (file, "\tbx\tr12\n");
14394 ASM_OUTPUT_ALIGN (file, 2);
14395 assemble_name (file, label);
14396 fputs (":\n", file);
14397 assemble_integer (XEXP (DECL_RTL (function), 0), 4, BITS_PER_WORD, 1);
14399 else
14401 fputs ("\tb\t", file);
14402 assemble_name (file, XSTR (XEXP (DECL_RTL (function), 0), 0));
14403 if (NEED_PLT_RELOC)
14404 fputs ("(PLT)", file);
14405 fputc ('\n', file);
14410 arm_emit_vector_const (FILE *file, rtx x)
14412 int i;
14413 const char * pattern;
14415 if (GET_CODE (x) != CONST_VECTOR)
14416 abort ();
14418 switch (GET_MODE (x))
14420 case V2SImode: pattern = "%08x"; break;
14421 case V4HImode: pattern = "%04x"; break;
14422 case V8QImode: pattern = "%02x"; break;
14423 default: abort ();
14426 fprintf (file, "0x");
14427 for (i = CONST_VECTOR_NUNITS (x); i--;)
14429 rtx element;
14431 element = CONST_VECTOR_ELT (x, i);
14432 fprintf (file, pattern, INTVAL (element));
14435 return 1;
14438 const char *
14439 arm_output_load_gr (rtx *operands)
14441 rtx reg;
14442 rtx offset;
14443 rtx wcgr;
14444 rtx sum;
14446 if (GET_CODE (operands [1]) != MEM
14447 || GET_CODE (sum = XEXP (operands [1], 0)) != PLUS
14448 || GET_CODE (reg = XEXP (sum, 0)) != REG
14449 || GET_CODE (offset = XEXP (sum, 1)) != CONST_INT
14450 || ((INTVAL (offset) < 1024) && (INTVAL (offset) > -1024)))
14451 return "wldrw%?\t%0, %1";
14453 /* Fix up an out-of-range load of a GR register. */
14454 output_asm_insn ("str%?\t%0, [sp, #-4]!\t@ Start of GR load expansion", & reg);
14455 wcgr = operands[0];
14456 operands[0] = reg;
14457 output_asm_insn ("ldr%?\t%0, %1", operands);
14459 operands[0] = wcgr;
14460 operands[1] = reg;
14461 output_asm_insn ("tmcr%?\t%0, %1", operands);
14462 output_asm_insn ("ldr%?\t%0, [sp], #4\t@ End of GR load expansion", & reg);
14464 return "";
14467 static rtx
14468 arm_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
14469 int incoming ATTRIBUTE_UNUSED)
14471 #if 0
14472 /* FIXME: The ARM backend has special code to handle structure
14473 returns, and will reserve its own hidden first argument. So
14474 if this macro is enabled a *second* hidden argument will be
14475 reserved, which will break binary compatibility with old
14476 toolchains and also thunk handling. One day this should be
14477 fixed. */
14478 return 0;
14479 #else
14480 /* Register in which address to store a structure value
14481 is passed to a function. */
14482 return gen_rtx_REG (Pmode, ARG_REGISTER (1));
14483 #endif
14486 /* Worker function for TARGET_SETUP_INCOMING_VARARGS.
14488 On the ARM, PRETEND_SIZE is set in order to have the prologue push the last
14489 named arg and all anonymous args onto the stack.
14490 XXX I know the prologue shouldn't be pushing registers, but it is faster
14491 that way. */
14493 static void
14494 arm_setup_incoming_varargs (CUMULATIVE_ARGS *cum,
14495 enum machine_mode mode ATTRIBUTE_UNUSED,
14496 tree type ATTRIBUTE_UNUSED,
14497 int *pretend_size,
14498 int second_time ATTRIBUTE_UNUSED)
14500 cfun->machine->uses_anonymous_args = 1;
14501 if (cum->nregs < NUM_ARG_REGS)
14502 *pretend_size = (NUM_ARG_REGS - cum->nregs) * UNITS_PER_WORD;
14505 /* Return nonzero if the CONSUMER instruction (a store) does not need
14506 PRODUCER's value to calculate the address. */
14509 arm_no_early_store_addr_dep (rtx producer, rtx consumer)
14511 rtx value = PATTERN (producer);
14512 rtx addr = PATTERN (consumer);
14514 if (GET_CODE (value) == COND_EXEC)
14515 value = COND_EXEC_CODE (value);
14516 if (GET_CODE (value) == PARALLEL)
14517 value = XVECEXP (value, 0, 0);
14518 value = XEXP (value, 0);
14519 if (GET_CODE (addr) == COND_EXEC)
14520 addr = COND_EXEC_CODE (addr);
14521 if (GET_CODE (addr) == PARALLEL)
14522 addr = XVECEXP (addr, 0, 0);
14523 addr = XEXP (addr, 0);
14525 return !reg_overlap_mentioned_p (value, addr);
14528 /* Return nonzero if the CONSUMER instruction (an ALU op) does not
14529 have an early register shift value or amount dependency on the
14530 result of PRODUCER. */
14533 arm_no_early_alu_shift_dep (rtx producer, rtx consumer)
14535 rtx value = PATTERN (producer);
14536 rtx op = PATTERN (consumer);
14537 rtx early_op;
14539 if (GET_CODE (value) == COND_EXEC)
14540 value = COND_EXEC_CODE (value);
14541 if (GET_CODE (value) == PARALLEL)
14542 value = XVECEXP (value, 0, 0);
14543 value = XEXP (value, 0);
14544 if (GET_CODE (op) == COND_EXEC)
14545 op = COND_EXEC_CODE (op);
14546 if (GET_CODE (op) == PARALLEL)
14547 op = XVECEXP (op, 0, 0);
14548 op = XEXP (op, 1);
14550 early_op = XEXP (op, 0);
14551 /* This is either an actual independent shift, or a shift applied to
14552 the first operand of another operation. We want the whole shift
14553 operation. */
14554 if (GET_CODE (early_op) == REG)
14555 early_op = op;
14557 return !reg_overlap_mentioned_p (value, early_op);
14560 /* Return nonzero if the CONSUMER instruction (an ALU op) does not
14561 have an early register shift value dependency on the result of
14562 PRODUCER. */
14565 arm_no_early_alu_shift_value_dep (rtx producer, rtx consumer)
14567 rtx value = PATTERN (producer);
14568 rtx op = PATTERN (consumer);
14569 rtx early_op;
14571 if (GET_CODE (value) == COND_EXEC)
14572 value = COND_EXEC_CODE (value);
14573 if (GET_CODE (value) == PARALLEL)
14574 value = XVECEXP (value, 0, 0);
14575 value = XEXP (value, 0);
14576 if (GET_CODE (op) == COND_EXEC)
14577 op = COND_EXEC_CODE (op);
14578 if (GET_CODE (op) == PARALLEL)
14579 op = XVECEXP (op, 0, 0);
14580 op = XEXP (op, 1);
14582 early_op = XEXP (op, 0);
14584 /* This is either an actual independent shift, or a shift applied to
14585 the first operand of another operation. We want the value being
14586 shifted, in either case. */
14587 if (GET_CODE (early_op) != REG)
14588 early_op = XEXP (early_op, 0);
14590 return !reg_overlap_mentioned_p (value, early_op);
14593 /* Return nonzero if the CONSUMER (a mul or mac op) does not
14594 have an early register mult dependency on the result of
14595 PRODUCER. */
14598 arm_no_early_mul_dep (rtx producer, rtx consumer)
14600 rtx value = PATTERN (producer);
14601 rtx op = PATTERN (consumer);
14603 if (GET_CODE (value) == COND_EXEC)
14604 value = COND_EXEC_CODE (value);
14605 if (GET_CODE (value) == PARALLEL)
14606 value = XVECEXP (value, 0, 0);
14607 value = XEXP (value, 0);
14608 if (GET_CODE (op) == COND_EXEC)
14609 op = COND_EXEC_CODE (op);
14610 if (GET_CODE (op) == PARALLEL)
14611 op = XVECEXP (op, 0, 0);
14612 op = XEXP (op, 1);
14614 return (GET_CODE (op) == PLUS
14615 && !reg_overlap_mentioned_p (value, XEXP (op, 0)));
14619 /* We can't rely on the caller doing the proper promotion when
14620 using APCS or ATPCS. */
14622 static bool
14623 arm_promote_prototypes (tree t ATTRIBUTE_UNUSED)
14625 return !TARGET_AAPCS_BASED;
14629 /* AAPCS based ABIs use short enums by default. */
14631 static bool
14632 arm_default_short_enums (void)
14634 return TARGET_AAPCS_BASED;
14638 /* AAPCS requires that anonymous bitfields affect structure alignment. */
14640 static bool
14641 arm_align_anon_bitfield (void)
14643 return TARGET_AAPCS_BASED;
14647 /* The generic C++ ABI says 64-bit (long long). The EABI says 32-bit. */
14649 static tree
14650 arm_cxx_guard_type (void)
14652 return TARGET_AAPCS_BASED ? integer_type_node : long_long_integer_type_node;
14656 /* The EABI says test the least significan bit of a guard variable. */
14658 static bool
14659 arm_cxx_guard_mask_bit (void)
14661 return TARGET_AAPCS_BASED;
14665 /* The EABI specifies that all array cookies are 8 bytes long. */
14667 static tree
14668 arm_get_cookie_size (tree type)
14670 tree size;
14672 if (!TARGET_AAPCS_BASED)
14673 return default_cxx_get_cookie_size (type);
14675 size = build_int_2 (8, 0);
14676 TREE_TYPE (size) = sizetype;
14677 return size;
14681 /* The EABI says that array cookies should also contain the element size. */
14683 static bool
14684 arm_cookie_has_size (void)
14686 return TARGET_AAPCS_BASED;
14690 /* The EABI says constructors and destructors should return a pointer to
14691 the object constructed/destroyed. */
14693 static bool
14694 arm_cxx_cdtor_returns_this (void)
14696 return TARGET_AAPCS_BASED;
14700 void
14701 arm_set_return_address (rtx source, rtx scratch)
14703 arm_stack_offsets *offsets;
14704 HOST_WIDE_INT delta;
14705 rtx addr;
14706 unsigned long saved_regs;
14708 saved_regs = arm_compute_save_reg_mask ();
14710 if ((saved_regs & (1 << LR_REGNUM)) == 0)
14711 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNUM), source);
14712 else
14714 if (frame_pointer_needed)
14715 addr = plus_constant(hard_frame_pointer_rtx, -4);
14716 else
14718 /* LR will be the first saved register. */
14719 offsets = arm_get_frame_offsets ();
14720 delta = offsets->outgoing_args - (offsets->frame + 4);
14723 if (delta >= 4096)
14725 emit_insn (gen_addsi3 (scratch, stack_pointer_rtx,
14726 GEN_INT (delta & ~4095)));
14727 addr = scratch;
14728 delta &= 4095;
14730 else
14731 addr = stack_pointer_rtx;
14733 addr = plus_constant (addr, delta);
14735 emit_move_insn (gen_rtx_MEM (Pmode, addr), source);
14740 void
14741 thumb_set_return_address (rtx source, rtx scratch)
14743 arm_stack_offsets *offsets;
14744 bool lr_saved;
14745 HOST_WIDE_INT delta;
14746 int reg;
14747 rtx addr;
14749 emit_insn (gen_rtx_USE (VOIDmode, source));
14750 lr_saved = FALSE;
14751 for (reg = 0; reg <= LAST_LO_REGNUM; reg++)
14753 if (THUMB_REG_PUSHED_P (reg))
14755 lr_saved = TRUE;
14756 break;
14759 lr_saved |= thumb_force_lr_save ();
14761 if (lr_saved)
14763 offsets = arm_get_frame_offsets ();
14765 /* Find the saved regs. */
14766 if (frame_pointer_needed)
14768 delta = offsets->soft_frame - offsets->saved_args;
14769 reg = THUMB_HARD_FRAME_POINTER_REGNUM;
14771 else
14773 delta = offsets->outgoing_args - offsets->saved_args;
14774 reg = SP_REGNUM;
14776 /* Allow for the stack frame. */
14777 if (TARGET_BACKTRACE)
14778 delta -= 16;
14779 /* The link register is always the first saved register. */
14780 delta -= 4;
14782 /* Construct the address. */
14783 addr = gen_rtx_REG (SImode, reg);
14784 if ((reg != SP_REGNUM && delta >= 128)
14785 || delta >= 1024)
14787 emit_insn (gen_movsi (scratch, GEN_INT (delta)));
14788 emit_insn (gen_addsi3 (scratch, scratch, stack_pointer_rtx));
14789 addr = scratch;
14791 else
14792 addr = plus_constant (addr, delta);
14794 emit_move_insn (gen_rtx_MEM (Pmode, addr), source);
14796 else
14797 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNUM), source);