* rtl.h (MEM_READONLY_P): Replace RTX_UNCHANGING_P.
[official-gcc.git] / gcc / config / arm / arm.c
blobfb96c38ab5245c517e58655d5a5648033805a92f
1 /* Output routines for GCC for ARM.
2 Copyright (C) 1991, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004 Free Software Foundation, Inc.
4 Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
5 and Martin Simmons (@harleqn.co.uk).
6 More major hacks by Richard Earnshaw (rearnsha@arm.com).
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it
11 under the terms of the GNU General Public License as published
12 by the Free Software Foundation; either version 2, or (at your
13 option) any later version.
15 GCC is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
17 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
18 License for more details.
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING. If not, write to
22 the Free Software Foundation, 59 Temple Place - Suite 330,
23 Boston, MA 02111-1307, USA. */
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "tm.h"
29 #include "rtl.h"
30 #include "tree.h"
31 #include "obstack.h"
32 #include "regs.h"
33 #include "hard-reg-set.h"
34 #include "real.h"
35 #include "insn-config.h"
36 #include "conditions.h"
37 #include "output.h"
38 #include "insn-attr.h"
39 #include "flags.h"
40 #include "reload.h"
41 #include "function.h"
42 #include "expr.h"
43 #include "optabs.h"
44 #include "toplev.h"
45 #include "recog.h"
46 #include "ggc.h"
47 #include "except.h"
48 #include "c-pragma.h"
49 #include "integrate.h"
50 #include "tm_p.h"
51 #include "target.h"
52 #include "target-def.h"
53 #include "debug.h"
54 #include "langhooks.h"
56 /* Forward definitions of types. */
57 typedef struct minipool_node Mnode;
58 typedef struct minipool_fixup Mfix;
60 const struct attribute_spec arm_attribute_table[];
62 /* Forward function declarations. */
63 static arm_stack_offsets *arm_get_frame_offsets (void);
64 static void arm_add_gc_roots (void);
65 static int arm_gen_constant (enum rtx_code, enum machine_mode, rtx,
66 HOST_WIDE_INT, rtx, rtx, int, int);
67 static unsigned bit_count (unsigned long);
68 static int arm_address_register_rtx_p (rtx, int);
69 static int arm_legitimate_index_p (enum machine_mode, rtx, RTX_CODE, int);
70 static int thumb_base_register_rtx_p (rtx, enum machine_mode, int);
71 inline static int thumb_index_register_rtx_p (rtx, int);
72 static int thumb_far_jump_used_p (void);
73 static bool thumb_force_lr_save (void);
74 static unsigned long thumb_compute_save_reg_mask (void);
75 static int const_ok_for_op (HOST_WIDE_INT, enum rtx_code);
76 static rtx emit_multi_reg_push (int);
77 static rtx emit_sfm (int, int);
78 #ifndef AOF_ASSEMBLER
79 static bool arm_assemble_integer (rtx, unsigned int, int);
80 #endif
81 static const char *fp_const_from_val (REAL_VALUE_TYPE *);
82 static arm_cc get_arm_condition_code (rtx);
83 static HOST_WIDE_INT int_log2 (HOST_WIDE_INT);
84 static rtx is_jump_table (rtx);
85 static const char *output_multi_immediate (rtx *, const char *, const char *,
86 int, HOST_WIDE_INT);
87 static void print_multi_reg (FILE *, const char *, int, int);
88 static const char *shift_op (rtx, HOST_WIDE_INT *);
89 static struct machine_function *arm_init_machine_status (void);
90 static int number_of_first_bit_set (int);
91 static void replace_symbols_in_block (tree, rtx, rtx);
92 static void thumb_exit (FILE *, int);
93 static void thumb_pushpop (FILE *, int, int, int *, int);
94 static rtx is_jump_table (rtx);
95 static HOST_WIDE_INT get_jump_table_size (rtx);
96 static Mnode *move_minipool_fix_forward_ref (Mnode *, Mnode *, HOST_WIDE_INT);
97 static Mnode *add_minipool_forward_ref (Mfix *);
98 static Mnode *move_minipool_fix_backward_ref (Mnode *, Mnode *, HOST_WIDE_INT);
99 static Mnode *add_minipool_backward_ref (Mfix *);
100 static void assign_minipool_offsets (Mfix *);
101 static void arm_print_value (FILE *, rtx);
102 static void dump_minipool (rtx);
103 static int arm_barrier_cost (rtx);
104 static Mfix *create_fix_barrier (Mfix *, HOST_WIDE_INT);
105 static void push_minipool_barrier (rtx, HOST_WIDE_INT);
106 static void push_minipool_fix (rtx, HOST_WIDE_INT, rtx *, enum machine_mode,
107 rtx);
108 static void arm_reorg (void);
109 static bool note_invalid_constants (rtx, HOST_WIDE_INT, int);
110 static int current_file_function_operand (rtx);
111 static unsigned long arm_compute_save_reg0_reg12_mask (void);
112 static unsigned long arm_compute_save_reg_mask (void);
113 static unsigned long arm_isr_value (tree);
114 static unsigned long arm_compute_func_type (void);
115 static tree arm_handle_fndecl_attribute (tree *, tree, tree, int, bool *);
116 static tree arm_handle_isr_attribute (tree *, tree, tree, int, bool *);
117 static void arm_output_function_epilogue (FILE *, HOST_WIDE_INT);
118 static void arm_output_function_prologue (FILE *, HOST_WIDE_INT);
119 static void thumb_output_function_prologue (FILE *, HOST_WIDE_INT);
120 static int arm_comp_type_attributes (tree, tree);
121 static void arm_set_default_type_attributes (tree);
122 static int arm_adjust_cost (rtx, rtx, rtx, int);
123 static int count_insns_for_constant (HOST_WIDE_INT, int);
124 static int arm_get_strip_length (int);
125 static bool arm_function_ok_for_sibcall (tree, tree);
126 static void arm_internal_label (FILE *, const char *, unsigned long);
127 static void arm_output_mi_thunk (FILE *, tree, HOST_WIDE_INT, HOST_WIDE_INT,
128 tree);
129 static int arm_rtx_costs_1 (rtx, enum rtx_code, enum rtx_code);
130 static bool arm_slowmul_rtx_costs (rtx, int, int, int *);
131 static bool arm_fastmul_rtx_costs (rtx, int, int, int *);
132 static bool arm_xscale_rtx_costs (rtx, int, int, int *);
133 static bool arm_9e_rtx_costs (rtx, int, int, int *);
134 static int arm_address_cost (rtx);
135 static bool arm_memory_load_p (rtx);
136 static bool arm_cirrus_insn_p (rtx);
137 static void cirrus_reorg (rtx);
138 static void arm_init_builtins (void);
139 static rtx arm_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
140 static void arm_init_iwmmxt_builtins (void);
141 static rtx safe_vector_operand (rtx, enum machine_mode);
142 static rtx arm_expand_binop_builtin (enum insn_code, tree, rtx);
143 static rtx arm_expand_unop_builtin (enum insn_code, tree, rtx, int);
144 static rtx arm_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
145 static void emit_constant_insn (rtx cond, rtx pattern);
147 #ifdef OBJECT_FORMAT_ELF
148 static void arm_elf_asm_named_section (const char *, unsigned int);
149 #endif
150 #ifndef ARM_PE
151 static void arm_encode_section_info (tree, rtx, int);
152 #endif
153 #ifdef AOF_ASSEMBLER
154 static void aof_globalize_label (FILE *, const char *);
155 static void aof_dump_imports (FILE *);
156 static void aof_dump_pic_table (FILE *);
157 static void aof_file_start (void);
158 static void aof_file_end (void);
159 #endif
160 static rtx arm_struct_value_rtx (tree, int);
161 static void arm_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
162 tree, int *, int);
163 static bool arm_pass_by_reference (CUMULATIVE_ARGS *,
164 enum machine_mode, tree, bool);
165 static bool arm_promote_prototypes (tree);
166 static bool arm_default_short_enums (void);
167 static bool arm_align_anon_bitfield (void);
169 static tree arm_cxx_guard_type (void);
170 static bool arm_cxx_guard_mask_bit (void);
171 static tree arm_get_cookie_size (tree);
172 static bool arm_cookie_has_size (void);
173 static bool arm_cxx_cdtor_returns_this (void);
174 static void arm_init_libfuncs (void);
177 /* Initialize the GCC target structure. */
178 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
179 #undef TARGET_MERGE_DECL_ATTRIBUTES
180 #define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
181 #endif
183 #undef TARGET_ATTRIBUTE_TABLE
184 #define TARGET_ATTRIBUTE_TABLE arm_attribute_table
186 #ifdef AOF_ASSEMBLER
187 #undef TARGET_ASM_BYTE_OP
188 #define TARGET_ASM_BYTE_OP "\tDCB\t"
189 #undef TARGET_ASM_ALIGNED_HI_OP
190 #define TARGET_ASM_ALIGNED_HI_OP "\tDCW\t"
191 #undef TARGET_ASM_ALIGNED_SI_OP
192 #define TARGET_ASM_ALIGNED_SI_OP "\tDCD\t"
193 #undef TARGET_ASM_GLOBALIZE_LABEL
194 #define TARGET_ASM_GLOBALIZE_LABEL aof_globalize_label
195 #undef TARGET_ASM_FILE_START
196 #define TARGET_ASM_FILE_START aof_file_start
197 #undef TARGET_ASM_FILE_END
198 #define TARGET_ASM_FILE_END aof_file_end
199 #else
200 #undef TARGET_ASM_ALIGNED_SI_OP
201 #define TARGET_ASM_ALIGNED_SI_OP NULL
202 #undef TARGET_ASM_INTEGER
203 #define TARGET_ASM_INTEGER arm_assemble_integer
204 #endif
206 #undef TARGET_ASM_FUNCTION_PROLOGUE
207 #define TARGET_ASM_FUNCTION_PROLOGUE arm_output_function_prologue
209 #undef TARGET_ASM_FUNCTION_EPILOGUE
210 #define TARGET_ASM_FUNCTION_EPILOGUE arm_output_function_epilogue
212 #undef TARGET_COMP_TYPE_ATTRIBUTES
213 #define TARGET_COMP_TYPE_ATTRIBUTES arm_comp_type_attributes
215 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
216 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES arm_set_default_type_attributes
218 #undef TARGET_SCHED_ADJUST_COST
219 #define TARGET_SCHED_ADJUST_COST arm_adjust_cost
221 #undef TARGET_ENCODE_SECTION_INFO
222 #ifdef ARM_PE
223 #define TARGET_ENCODE_SECTION_INFO arm_pe_encode_section_info
224 #else
225 #define TARGET_ENCODE_SECTION_INFO arm_encode_section_info
226 #endif
228 #undef TARGET_STRIP_NAME_ENCODING
229 #define TARGET_STRIP_NAME_ENCODING arm_strip_name_encoding
231 #undef TARGET_ASM_INTERNAL_LABEL
232 #define TARGET_ASM_INTERNAL_LABEL arm_internal_label
234 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
235 #define TARGET_FUNCTION_OK_FOR_SIBCALL arm_function_ok_for_sibcall
237 #undef TARGET_ASM_OUTPUT_MI_THUNK
238 #define TARGET_ASM_OUTPUT_MI_THUNK arm_output_mi_thunk
239 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
240 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
242 /* This will be overridden in arm_override_options. */
243 #undef TARGET_RTX_COSTS
244 #define TARGET_RTX_COSTS arm_slowmul_rtx_costs
245 #undef TARGET_ADDRESS_COST
246 #define TARGET_ADDRESS_COST arm_address_cost
248 #undef TARGET_MACHINE_DEPENDENT_REORG
249 #define TARGET_MACHINE_DEPENDENT_REORG arm_reorg
251 #undef TARGET_INIT_BUILTINS
252 #define TARGET_INIT_BUILTINS arm_init_builtins
253 #undef TARGET_EXPAND_BUILTIN
254 #define TARGET_EXPAND_BUILTIN arm_expand_builtin
256 #undef TARGET_INIT_LIBFUNCS
257 #define TARGET_INIT_LIBFUNCS arm_init_libfuncs
259 #undef TARGET_PROMOTE_FUNCTION_ARGS
260 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
261 #undef TARGET_PROMOTE_FUNCTION_RETURN
262 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
263 #undef TARGET_PROMOTE_PROTOTYPES
264 #define TARGET_PROMOTE_PROTOTYPES arm_promote_prototypes
265 #undef TARGET_PASS_BY_REFERENCE
266 #define TARGET_PASS_BY_REFERENCE arm_pass_by_reference
268 #undef TARGET_STRUCT_VALUE_RTX
269 #define TARGET_STRUCT_VALUE_RTX arm_struct_value_rtx
271 #undef TARGET_SETUP_INCOMING_VARARGS
272 #define TARGET_SETUP_INCOMING_VARARGS arm_setup_incoming_varargs
274 #undef TARGET_DEFAULT_SHORT_ENUMS
275 #define TARGET_DEFAULT_SHORT_ENUMS arm_default_short_enums
277 #undef TARGET_ALIGN_ANON_BITFIELD
278 #define TARGET_ALIGN_ANON_BITFIELD arm_align_anon_bitfield
280 #undef TARGET_CXX_GUARD_TYPE
281 #define TARGET_CXX_GUARD_TYPE arm_cxx_guard_type
283 #undef TARGET_CXX_GUARD_MASK_BIT
284 #define TARGET_CXX_GUARD_MASK_BIT arm_cxx_guard_mask_bit
286 #undef TARGET_CXX_GET_COOKIE_SIZE
287 #define TARGET_CXX_GET_COOKIE_SIZE arm_get_cookie_size
289 #undef TARGET_CXX_COOKIE_HAS_SIZE
290 #define TARGET_CXX_COOKIE_HAS_SIZE arm_cookie_has_size
292 #undef TARGET_CXX_CDTOR_RETURNS_THIS
293 #define TARGET_CXX_CDTOR_RETURNS_THIS arm_cxx_cdtor_returns_this
295 struct gcc_target targetm = TARGET_INITIALIZER;
297 /* Obstack for minipool constant handling. */
298 static struct obstack minipool_obstack;
299 static char * minipool_startobj;
301 /* The maximum number of insns skipped which
302 will be conditionalised if possible. */
303 static int max_insns_skipped = 5;
305 extern FILE * asm_out_file;
307 /* True if we are currently building a constant table. */
308 int making_const_table;
310 /* Define the information needed to generate branch insns. This is
311 stored from the compare operation. */
312 rtx arm_compare_op0, arm_compare_op1;
314 /* The processor for which instructions should be scheduled. */
315 enum processor_type arm_tune = arm_none;
317 /* Which floating point model to use. */
318 enum arm_fp_model arm_fp_model;
320 /* Which floating point hardware is available. */
321 enum fputype arm_fpu_arch;
323 /* Which floating point hardware to schedule for. */
324 enum fputype arm_fpu_tune;
326 /* Whether to use floating point hardware. */
327 enum float_abi_type arm_float_abi;
329 /* Which ABI to use. */
330 enum arm_abi_type arm_abi;
332 /* Set by the -mfpu=... option. */
333 const char * target_fpu_name = NULL;
335 /* Set by the -mfpe=... option. */
336 const char * target_fpe_name = NULL;
338 /* Set by the -mfloat-abi=... option. */
339 const char * target_float_abi_name = NULL;
341 /* Set by the -mabi=... option. */
342 const char * target_abi_name = NULL;
344 /* Used to parse -mstructure_size_boundary command line option. */
345 const char * structure_size_string = NULL;
346 int arm_structure_size_boundary = DEFAULT_STRUCTURE_SIZE_BOUNDARY;
348 /* Bit values used to identify processor capabilities. */
349 #define FL_CO_PROC (1 << 0) /* Has external co-processor bus */
350 #define FL_ARCH3M (1 << 1) /* Extended multiply */
351 #define FL_MODE26 (1 << 2) /* 26-bit mode support */
352 #define FL_MODE32 (1 << 3) /* 32-bit mode support */
353 #define FL_ARCH4 (1 << 4) /* Architecture rel 4 */
354 #define FL_ARCH5 (1 << 5) /* Architecture rel 5 */
355 #define FL_THUMB (1 << 6) /* Thumb aware */
356 #define FL_LDSCHED (1 << 7) /* Load scheduling necessary */
357 #define FL_STRONG (1 << 8) /* StrongARM */
358 #define FL_ARCH5E (1 << 9) /* DSP extensions to v5 */
359 #define FL_XSCALE (1 << 10) /* XScale */
360 #define FL_CIRRUS (1 << 11) /* Cirrus/DSP. */
361 #define FL_ARCH6 (1 << 12) /* Architecture rel 6. Adds
362 media instructions. */
363 #define FL_VFPV2 (1 << 13) /* Vector Floating Point V2. */
365 #define FL_IWMMXT (1 << 29) /* XScale v2 or "Intel Wireless MMX technology". */
367 #define FL_FOR_ARCH2 0
368 #define FL_FOR_ARCH3 FL_MODE32
369 #define FL_FOR_ARCH3M (FL_FOR_ARCH3 | FL_ARCH3M)
370 #define FL_FOR_ARCH4 (FL_FOR_ARCH3M | FL_ARCH4)
371 #define FL_FOR_ARCH4T (FL_FOR_ARCH4 | FL_THUMB)
372 #define FL_FOR_ARCH5 (FL_FOR_ARCH4 | FL_ARCH5)
373 #define FL_FOR_ARCH5T (FL_FOR_ARCH5 | FL_THUMB)
374 #define FL_FOR_ARCH5E (FL_FOR_ARCH5 | FL_ARCH5E)
375 #define FL_FOR_ARCH5TE (FL_FOR_ARCH5E | FL_THUMB)
376 #define FL_FOR_ARCH5TEJ FL_FOR_ARCH5TE
377 #define FL_FOR_ARCH6 (FL_FOR_ARCH5TE | FL_ARCH6)
378 #define FL_FOR_ARCH6J FL_FOR_ARCH6
380 /* The bits in this mask specify which
381 instructions we are allowed to generate. */
382 static unsigned long insn_flags = 0;
384 /* The bits in this mask specify which instruction scheduling options should
385 be used. */
386 static unsigned long tune_flags = 0;
388 /* The following are used in the arm.md file as equivalents to bits
389 in the above two flag variables. */
391 /* Nonzero if this chip supports the ARM Architecture 3M extensions. */
392 int arm_arch3m = 0;
394 /* Nonzero if this chip supports the ARM Architecture 4 extensions. */
395 int arm_arch4 = 0;
397 /* Nonzero if this chip supports the ARM Architecture 4t extensions. */
398 int arm_arch4t = 0;
400 /* Nonzero if this chip supports the ARM Architecture 5 extensions. */
401 int arm_arch5 = 0;
403 /* Nonzero if this chip supports the ARM Architecture 5E extensions. */
404 int arm_arch5e = 0;
406 /* Nonzero if this chip supports the ARM Architecture 6 extensions. */
407 int arm_arch6 = 0;
409 /* Nonzero if this chip can benefit from load scheduling. */
410 int arm_ld_sched = 0;
412 /* Nonzero if this chip is a StrongARM. */
413 int arm_is_strong = 0;
415 /* Nonzero if this chip is a Cirrus variant. */
416 int arm_arch_cirrus = 0;
418 /* Nonzero if this chip supports Intel Wireless MMX technology. */
419 int arm_arch_iwmmxt = 0;
421 /* Nonzero if this chip is an XScale. */
422 int arm_arch_xscale = 0;
424 /* Nonzero if tuning for XScale */
425 int arm_tune_xscale = 0;
427 /* Nonzero if this chip is an ARM6 or an ARM7. */
428 int arm_is_6_or_7 = 0;
430 /* Nonzero if generating Thumb instructions. */
431 int thumb_code = 0;
433 /* Nonzero if we should define __THUMB_INTERWORK__ in the
434 preprocessor.
435 XXX This is a bit of a hack, it's intended to help work around
436 problems in GLD which doesn't understand that armv5t code is
437 interworking clean. */
438 int arm_cpp_interwork = 0;
440 /* In case of a PRE_INC, POST_INC, PRE_DEC, POST_DEC memory reference, we
441 must report the mode of the memory reference from PRINT_OPERAND to
442 PRINT_OPERAND_ADDRESS. */
443 enum machine_mode output_memory_reference_mode;
445 /* The register number to be used for the PIC offset register. */
446 const char * arm_pic_register_string = NULL;
447 int arm_pic_register = INVALID_REGNUM;
449 /* Set to 1 when a return insn is output, this means that the epilogue
450 is not needed. */
451 int return_used_this_function;
453 /* Set to 1 after arm_reorg has started. Reset to start at the start of
454 the next function. */
455 static int after_arm_reorg = 0;
457 /* The maximum number of insns to be used when loading a constant. */
458 static int arm_constant_limit = 3;
460 /* For an explanation of these variables, see final_prescan_insn below. */
461 int arm_ccfsm_state;
462 enum arm_cond_code arm_current_cc;
463 rtx arm_target_insn;
464 int arm_target_label;
466 /* The condition codes of the ARM, and the inverse function. */
467 static const char * const arm_condition_codes[] =
469 "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc",
470 "hi", "ls", "ge", "lt", "gt", "le", "al", "nv"
473 #define streq(string1, string2) (strcmp (string1, string2) == 0)
475 /* Initialization code. */
477 struct processors
479 const char *const name;
480 enum processor_type core;
481 const char *arch;
482 const unsigned long flags;
483 bool (* rtx_costs) (rtx, int, int, int *);
486 /* Not all of these give usefully different compilation alternatives,
487 but there is no simple way of generalizing them. */
488 static const struct processors all_cores[] =
490 /* ARM Cores */
491 #define ARM_CORE(NAME, ARCH, FLAGS, COSTS) \
492 {#NAME, arm_none, #ARCH, FLAGS | FL_FOR_ARCH##ARCH, arm_##COSTS##_rtx_costs},
493 #include "arm-cores.def"
494 #undef ARM_CORE
495 {NULL, arm_none, NULL, 0, NULL}
498 static const struct processors all_architectures[] =
500 /* ARM Architectures */
501 /* We don't specify rtx_costs here as it will be figured out
502 from the core. */
504 {"armv2", arm2, "2", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH2, NULL},
505 {"armv2a", arm2, "2", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH2, NULL},
506 {"armv3", arm6, "3", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH3, NULL},
507 {"armv3m", arm7m, "3M", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH3M, NULL},
508 {"armv4", arm7tdmi, "4", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH4, NULL},
509 /* Strictly, FL_MODE26 is a permitted option for v4t, but there are no
510 implementations that support it, so we will leave it out for now. */
511 {"armv4t", arm7tdmi, "4T", FL_CO_PROC | FL_FOR_ARCH4T, NULL},
512 {"armv5", arm10tdmi, "5", FL_CO_PROC | FL_FOR_ARCH5, NULL},
513 {"armv5t", arm10tdmi, "5T", FL_CO_PROC | FL_FOR_ARCH5T, NULL},
514 {"armv5e", arm1026ejs, "5E", FL_CO_PROC | FL_FOR_ARCH5E, NULL},
515 {"armv5te", arm1026ejs, "5TE", FL_CO_PROC | FL_FOR_ARCH5TE, NULL},
516 {"armv6", arm1136js, "6", FL_CO_PROC | FL_FOR_ARCH6, NULL},
517 {"armv6j", arm1136js, "6J", FL_CO_PROC | FL_FOR_ARCH6J, NULL},
518 {"ep9312", ep9312, "4T", FL_LDSCHED | FL_CIRRUS | FL_FOR_ARCH4, NULL},
519 {"iwmmxt", iwmmxt, "5TE", FL_LDSCHED | FL_STRONG | FL_FOR_ARCH5TE | FL_XSCALE | FL_IWMMXT , NULL},
520 {NULL, arm_none, NULL, 0 , NULL}
523 /* This is a magic structure. The 'string' field is magically filled in
524 with a pointer to the value specified by the user on the command line
525 assuming that the user has specified such a value. */
527 struct arm_cpu_select arm_select[] =
529 /* string name processors */
530 { NULL, "-mcpu=", all_cores },
531 { NULL, "-march=", all_architectures },
532 { NULL, "-mtune=", all_cores }
536 /* The name of the proprocessor macro to define for this architecture. */
538 char arm_arch_name[] = "__ARM_ARCH_0UNK__";
540 struct fpu_desc
542 const char * name;
543 enum fputype fpu;
547 /* Available values for for -mfpu=. */
549 static const struct fpu_desc all_fpus[] =
551 {"fpa", FPUTYPE_FPA},
552 {"fpe2", FPUTYPE_FPA_EMU2},
553 {"fpe3", FPUTYPE_FPA_EMU2},
554 {"maverick", FPUTYPE_MAVERICK},
555 {"vfp", FPUTYPE_VFP}
559 /* Floating point models used by the different hardware.
560 See fputype in arm.h. */
562 static const enum fputype fp_model_for_fpu[] =
564 /* No FP hardware. */
565 ARM_FP_MODEL_UNKNOWN, /* FPUTYPE_NONE */
566 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA */
567 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA_EMU2 */
568 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA_EMU3 */
569 ARM_FP_MODEL_MAVERICK, /* FPUTYPE_MAVERICK */
570 ARM_FP_MODEL_VFP /* FPUTYPE_VFP */
574 struct float_abi
576 const char * name;
577 enum float_abi_type abi_type;
581 /* Available values for -mfloat-abi=. */
583 static const struct float_abi all_float_abis[] =
585 {"soft", ARM_FLOAT_ABI_SOFT},
586 {"softfp", ARM_FLOAT_ABI_SOFTFP},
587 {"hard", ARM_FLOAT_ABI_HARD}
591 struct abi_name
593 const char *name;
594 enum arm_abi_type abi_type;
598 /* Available values for -mabi=. */
600 static const struct abi_name arm_all_abis[] =
602 {"apcs-gnu", ARM_ABI_APCS},
603 {"atpcs", ARM_ABI_ATPCS},
604 {"aapcs", ARM_ABI_AAPCS},
605 {"iwmmxt", ARM_ABI_IWMMXT}
608 /* Return the number of bits set in VALUE. */
609 static unsigned
610 bit_count (unsigned long value)
612 unsigned long count = 0;
614 while (value)
616 count++;
617 value &= value - 1; /* Clear the least-significant set bit. */
620 return count;
623 /* Set up library functions uqniue to ARM. */
625 static void
626 arm_init_libfuncs (void)
628 /* There are no special library functions unless we are using the
629 ARM BPABI. */
630 if (!TARGET_BPABI)
631 return;
633 /* The functions below are described in Section 4 of the "Run-Time
634 ABI for the ARM architecture", Version 1.0. */
636 /* Double-precision floating-point arithmetic. Table 2. */
637 set_optab_libfunc (add_optab, DFmode, "__aeabi_dadd");
638 set_optab_libfunc (sdiv_optab, DFmode, "__aeabi_ddiv");
639 set_optab_libfunc (smul_optab, DFmode, "__aeabi_dmul");
640 set_optab_libfunc (neg_optab, DFmode, "__aeabi_dneg");
641 set_optab_libfunc (sub_optab, DFmode, "__aeabi_dsub");
643 /* Double-precision comparisions. Table 3. */
644 set_optab_libfunc (eq_optab, DFmode, "__aeabi_dcmpeq");
645 set_optab_libfunc (ne_optab, DFmode, NULL);
646 set_optab_libfunc (lt_optab, DFmode, "__aeabi_dcmplt");
647 set_optab_libfunc (le_optab, DFmode, "__aeabi_dcmple");
648 set_optab_libfunc (ge_optab, DFmode, "__aeabi_dcmpge");
649 set_optab_libfunc (gt_optab, DFmode, "__aeabi_dcmpgt");
650 set_optab_libfunc (unord_optab, DFmode, "__aeabi_dcmpun");
652 /* Single-precision floating-point arithmetic. Table 4. */
653 set_optab_libfunc (add_optab, SFmode, "__aeabi_fadd");
654 set_optab_libfunc (sdiv_optab, SFmode, "__aeabi_fdiv");
655 set_optab_libfunc (smul_optab, SFmode, "__aeabi_fmul");
656 set_optab_libfunc (neg_optab, SFmode, "__aeabi_fneg");
657 set_optab_libfunc (sub_optab, SFmode, "__aeabi_fsub");
659 /* Single-precision comparisions. Table 5. */
660 set_optab_libfunc (eq_optab, SFmode, "__aeabi_fcmpeq");
661 set_optab_libfunc (ne_optab, SFmode, NULL);
662 set_optab_libfunc (lt_optab, SFmode, "__aeabi_fcmplt");
663 set_optab_libfunc (le_optab, SFmode, "__aeabi_fcmple");
664 set_optab_libfunc (ge_optab, SFmode, "__aeabi_fcmpge");
665 set_optab_libfunc (gt_optab, SFmode, "__aeabi_fcmpgt");
666 set_optab_libfunc (unord_optab, SFmode, "__aeabi_fcmpun");
668 /* Floating-point to integer conversions. Table 6. */
669 set_conv_libfunc (sfix_optab, SImode, DFmode, "__aeabi_d2iz");
670 set_conv_libfunc (ufix_optab, SImode, DFmode, "__aeabi_d2uiz");
671 set_conv_libfunc (sfix_optab, DImode, DFmode, "__aeabi_d2lz");
672 set_conv_libfunc (ufix_optab, DImode, DFmode, "__aeabi_d2ulz");
673 set_conv_libfunc (sfix_optab, SImode, SFmode, "__aeabi_f2iz");
674 set_conv_libfunc (ufix_optab, SImode, SFmode, "__aeabi_f2uiz");
675 set_conv_libfunc (sfix_optab, DImode, SFmode, "__aeabi_f2lz");
676 set_conv_libfunc (ufix_optab, DImode, SFmode, "__aeabi_f2ulz");
678 /* Conversions between floating types. Table 7. */
679 set_conv_libfunc (trunc_optab, SFmode, DFmode, "__aeabi_d2f");
680 set_conv_libfunc (sext_optab, DFmode, SFmode, "__aeabi_f2d");
682 /* Integer to floating-point converisons. Table 8. */
683 set_conv_libfunc (sfloat_optab, DFmode, SImode, "__aeabi_i2d");
684 set_conv_libfunc (ufloat_optab, DFmode, SImode, "__aeabi_ui2d");
685 set_conv_libfunc (sfloat_optab, DFmode, DImode, "__aeabi_l2d");
686 set_conv_libfunc (ufloat_optab, DFmode, DImode, "__aeabi_ul2d");
687 set_conv_libfunc (sfloat_optab, SFmode, SImode, "__aeabi_i2f");
688 set_conv_libfunc (ufloat_optab, SFmode, SImode, "__aeabi_ui2f");
689 set_conv_libfunc (sfloat_optab, SFmode, DImode, "__aeabi_l2f");
690 set_conv_libfunc (ufloat_optab, SFmode, DImode, "__aeabi_ul2f");
692 /* Long long. Table 9. */
693 set_optab_libfunc (smul_optab, DImode, "__aeabi_lmul");
694 set_optab_libfunc (sdivmod_optab, DImode, "__aeabi_ldivmod");
695 set_optab_libfunc (udivmod_optab, DImode, "__aeabi_uldivmod");
696 set_optab_libfunc (ashl_optab, DImode, "__aeabi_llsl");
697 set_optab_libfunc (lshr_optab, DImode, "__aeabi_llsr");
698 set_optab_libfunc (ashr_optab, DImode, "__aeabi_lasr");
699 set_optab_libfunc (cmp_optab, DImode, "__aeabi_lcmp");
700 set_optab_libfunc (ucmp_optab, DImode, "__aeabi_ulcmp");
702 /* Integer (32/32->32) division. \S 4.3.1. */
703 set_optab_libfunc (sdivmod_optab, SImode, "__aeabi_idivmod");
704 set_optab_libfunc (udivmod_optab, SImode, "__aeabi_uidivmod");
706 /* The divmod functions are designed so that they can be used for
707 plain division, even though they return both the quotient and the
708 remainder. The quotient is returned in the usual location (i.e.,
709 r0 for SImode, {r0, r1} for DImode), just as would be expected
710 for an ordinary division routine. Because the AAPCS calling
711 conventions specify that all of { r0, r1, r2, r3 } are
712 callee-saved registers, there is no need to tell the compiler
713 explicitly that those registers are clobbered by these
714 routines. */
715 set_optab_libfunc (sdiv_optab, DImode, "__aeabi_ldivmod");
716 set_optab_libfunc (udiv_optab, DImode, "__aeabi_uldivmod");
717 set_optab_libfunc (sdiv_optab, SImode, "__aeabi_idivmod");
718 set_optab_libfunc (udiv_optab, SImode, "__aeabi_uidivmod");
721 /* Fix up any incompatible options that the user has specified.
722 This has now turned into a maze. */
723 void
724 arm_override_options (void)
726 unsigned i;
728 /* Set up the flags based on the cpu/architecture selected by the user. */
729 for (i = ARRAY_SIZE (arm_select); i--;)
731 struct arm_cpu_select * ptr = arm_select + i;
733 if (ptr->string != NULL && ptr->string[0] != '\0')
735 const struct processors * sel;
737 for (sel = ptr->processors; sel->name != NULL; sel++)
738 if (streq (ptr->string, sel->name))
740 /* Set the architecture define. */
741 if (i != 2)
742 sprintf (arm_arch_name, "__ARM_ARCH_%s__", sel->arch);
744 /* Determine the processor core for which we should
745 tune code-generation. */
746 if (/* -mcpu= is a sensible default. */
747 i == 0
748 /* If -march= is used, and -mcpu= has not been used,
749 assume that we should tune for a representative
750 CPU from that architecture. */
751 || i == 1
752 /* -mtune= overrides -mcpu= and -march=. */
753 || i == 2)
754 arm_tune = (enum processor_type) (sel - ptr->processors);
756 if (i != 2)
758 /* If we have been given an architecture and a processor
759 make sure that they are compatible. We only generate
760 a warning though, and we prefer the CPU over the
761 architecture. */
762 if (insn_flags != 0 && (insn_flags ^ sel->flags))
763 warning ("switch -mcpu=%s conflicts with -march= switch",
764 ptr->string);
766 insn_flags = sel->flags;
769 break;
772 if (sel->name == NULL)
773 error ("bad value (%s) for %s switch", ptr->string, ptr->name);
777 /* If the user did not specify a processor, choose one for them. */
778 if (insn_flags == 0)
780 const struct processors * sel;
781 unsigned int sought;
782 enum processor_type cpu;
784 cpu = TARGET_CPU_DEFAULT;
785 if (cpu == arm_none)
787 #ifdef SUBTARGET_CPU_DEFAULT
788 /* Use the subtarget default CPU if none was specified by
789 configure. */
790 cpu = SUBTARGET_CPU_DEFAULT;
791 #endif
792 /* Default to ARM6. */
793 if (cpu == arm_none)
794 cpu = arm6;
796 sel = &all_cores[cpu];
798 insn_flags = sel->flags;
800 /* Now check to see if the user has specified some command line
801 switch that require certain abilities from the cpu. */
802 sought = 0;
804 if (TARGET_INTERWORK || TARGET_THUMB)
806 sought |= (FL_THUMB | FL_MODE32);
808 /* There are no ARM processors that support both APCS-26 and
809 interworking. Therefore we force FL_MODE26 to be removed
810 from insn_flags here (if it was set), so that the search
811 below will always be able to find a compatible processor. */
812 insn_flags &= ~FL_MODE26;
815 if (sought != 0 && ((sought & insn_flags) != sought))
817 /* Try to locate a CPU type that supports all of the abilities
818 of the default CPU, plus the extra abilities requested by
819 the user. */
820 for (sel = all_cores; sel->name != NULL; sel++)
821 if ((sel->flags & sought) == (sought | insn_flags))
822 break;
824 if (sel->name == NULL)
826 unsigned current_bit_count = 0;
827 const struct processors * best_fit = NULL;
829 /* Ideally we would like to issue an error message here
830 saying that it was not possible to find a CPU compatible
831 with the default CPU, but which also supports the command
832 line options specified by the programmer, and so they
833 ought to use the -mcpu=<name> command line option to
834 override the default CPU type.
836 If we cannot find a cpu that has both the
837 characteristics of the default cpu and the given
838 command line options we scan the array again looking
839 for a best match. */
840 for (sel = all_cores; sel->name != NULL; sel++)
841 if ((sel->flags & sought) == sought)
843 unsigned count;
845 count = bit_count (sel->flags & insn_flags);
847 if (count >= current_bit_count)
849 best_fit = sel;
850 current_bit_count = count;
854 if (best_fit == NULL)
855 abort ();
856 else
857 sel = best_fit;
860 insn_flags = sel->flags;
862 sprintf (arm_arch_name, "__ARM_ARCH_%s__", sel->arch);
863 if (arm_tune == arm_none)
864 arm_tune = (enum processor_type) (sel - all_cores);
867 /* The processor for which we should tune should now have been
868 chosen. */
869 if (arm_tune == arm_none)
870 abort ();
872 tune_flags = all_cores[(int)arm_tune].flags;
873 targetm.rtx_costs = all_cores[(int)arm_tune].rtx_costs;
875 /* Make sure that the processor choice does not conflict with any of the
876 other command line choices. */
877 if (TARGET_INTERWORK && !(insn_flags & FL_THUMB))
879 warning ("target CPU does not support interworking" );
880 target_flags &= ~ARM_FLAG_INTERWORK;
883 if (TARGET_THUMB && !(insn_flags & FL_THUMB))
885 warning ("target CPU does not support THUMB instructions");
886 target_flags &= ~ARM_FLAG_THUMB;
889 if (TARGET_APCS_FRAME && TARGET_THUMB)
891 /* warning ("ignoring -mapcs-frame because -mthumb was used"); */
892 target_flags &= ~ARM_FLAG_APCS_FRAME;
895 /* TARGET_BACKTRACE calls leaf_function_p, which causes a crash if done
896 from here where no function is being compiled currently. */
897 if ((target_flags & (THUMB_FLAG_LEAF_BACKTRACE | THUMB_FLAG_BACKTRACE))
898 && TARGET_ARM)
899 warning ("enabling backtrace support is only meaningful when compiling for the Thumb");
901 if (TARGET_ARM && TARGET_CALLEE_INTERWORKING)
902 warning ("enabling callee interworking support is only meaningful when compiling for the Thumb");
904 if (TARGET_ARM && TARGET_CALLER_INTERWORKING)
905 warning ("enabling caller interworking support is only meaningful when compiling for the Thumb");
907 if (TARGET_APCS_STACK && !TARGET_APCS_FRAME)
909 warning ("-mapcs-stack-check incompatible with -mno-apcs-frame");
910 target_flags |= ARM_FLAG_APCS_FRAME;
913 if (TARGET_POKE_FUNCTION_NAME)
914 target_flags |= ARM_FLAG_APCS_FRAME;
916 if (TARGET_APCS_REENT && flag_pic)
917 error ("-fpic and -mapcs-reent are incompatible");
919 if (TARGET_APCS_REENT)
920 warning ("APCS reentrant code not supported. Ignored");
922 /* If this target is normally configured to use APCS frames, warn if they
923 are turned off and debugging is turned on. */
924 if (TARGET_ARM
925 && write_symbols != NO_DEBUG
926 && !TARGET_APCS_FRAME
927 && (TARGET_DEFAULT & ARM_FLAG_APCS_FRAME))
928 warning ("-g with -mno-apcs-frame may not give sensible debugging");
930 /* If stack checking is disabled, we can use r10 as the PIC register,
931 which keeps r9 available. */
932 if (flag_pic)
933 arm_pic_register = TARGET_APCS_STACK ? 9 : 10;
935 if (TARGET_APCS_FLOAT)
936 warning ("passing floating point arguments in fp regs not yet supported");
938 /* Initialize boolean versions of the flags, for use in the arm.md file. */
939 arm_arch3m = (insn_flags & FL_ARCH3M) != 0;
940 arm_arch4 = (insn_flags & FL_ARCH4) != 0;
941 arm_arch4t = arm_arch4 & ((insn_flags & FL_THUMB) != 0);
942 arm_arch5 = (insn_flags & FL_ARCH5) != 0;
943 arm_arch5e = (insn_flags & FL_ARCH5E) != 0;
944 arm_arch6 = (insn_flags & FL_ARCH6) != 0;
945 arm_arch_xscale = (insn_flags & FL_XSCALE) != 0;
946 arm_arch_cirrus = (insn_flags & FL_CIRRUS) != 0;
948 arm_ld_sched = (tune_flags & FL_LDSCHED) != 0;
949 arm_is_strong = (tune_flags & FL_STRONG) != 0;
950 thumb_code = (TARGET_ARM == 0);
951 arm_is_6_or_7 = (((tune_flags & (FL_MODE26 | FL_MODE32))
952 && !(tune_flags & FL_ARCH4))) != 0;
953 arm_tune_xscale = (tune_flags & FL_XSCALE) != 0;
954 arm_arch_iwmmxt = (insn_flags & FL_IWMMXT) != 0;
956 /* V5 code we generate is completely interworking capable, so we turn off
957 TARGET_INTERWORK here to avoid many tests later on. */
959 /* XXX However, we must pass the right pre-processor defines to CPP
960 or GLD can get confused. This is a hack. */
961 if (TARGET_INTERWORK)
962 arm_cpp_interwork = 1;
964 if (arm_arch5)
965 target_flags &= ~ARM_FLAG_INTERWORK;
967 if (target_abi_name)
969 for (i = 0; i < ARRAY_SIZE (arm_all_abis); i++)
971 if (streq (arm_all_abis[i].name, target_abi_name))
973 arm_abi = arm_all_abis[i].abi_type;
974 break;
977 if (i == ARRAY_SIZE (arm_all_abis))
978 error ("invalid ABI option: -mabi=%s", target_abi_name);
980 else
981 arm_abi = ARM_DEFAULT_ABI;
983 if (TARGET_IWMMXT && !ARM_DOUBLEWORD_ALIGN)
984 error ("iwmmxt requires an AAPCS compatible ABI for proper operation");
986 if (TARGET_IWMMXT_ABI && !TARGET_IWMMXT)
987 error ("iwmmxt abi requires an iwmmxt capable cpu");
989 arm_fp_model = ARM_FP_MODEL_UNKNOWN;
990 if (target_fpu_name == NULL && target_fpe_name != NULL)
992 if (streq (target_fpe_name, "2"))
993 target_fpu_name = "fpe2";
994 else if (streq (target_fpe_name, "3"))
995 target_fpu_name = "fpe3";
996 else
997 error ("invalid floating point emulation option: -mfpe=%s",
998 target_fpe_name);
1000 if (target_fpu_name != NULL)
1002 /* The user specified a FPU. */
1003 for (i = 0; i < ARRAY_SIZE (all_fpus); i++)
1005 if (streq (all_fpus[i].name, target_fpu_name))
1007 arm_fpu_arch = all_fpus[i].fpu;
1008 arm_fpu_tune = arm_fpu_arch;
1009 arm_fp_model = fp_model_for_fpu[arm_fpu_arch];
1010 break;
1013 if (arm_fp_model == ARM_FP_MODEL_UNKNOWN)
1014 error ("invalid floating point option: -mfpu=%s", target_fpu_name);
1016 else
1018 #ifdef FPUTYPE_DEFAULT
1019 /* Use the default if it is specified for this platform. */
1020 arm_fpu_arch = FPUTYPE_DEFAULT;
1021 arm_fpu_tune = FPUTYPE_DEFAULT;
1022 #else
1023 /* Pick one based on CPU type. */
1024 /* ??? Some targets assume FPA is the default.
1025 if ((insn_flags & FL_VFP) != 0)
1026 arm_fpu_arch = FPUTYPE_VFP;
1027 else
1029 if (arm_arch_cirrus)
1030 arm_fpu_arch = FPUTYPE_MAVERICK;
1031 else
1032 arm_fpu_arch = FPUTYPE_FPA_EMU2;
1033 #endif
1034 if (tune_flags & FL_CO_PROC && arm_fpu_arch == FPUTYPE_FPA_EMU2)
1035 arm_fpu_tune = FPUTYPE_FPA;
1036 else
1037 arm_fpu_tune = arm_fpu_arch;
1038 arm_fp_model = fp_model_for_fpu[arm_fpu_arch];
1039 if (arm_fp_model == ARM_FP_MODEL_UNKNOWN)
1040 abort ();
1043 if (target_float_abi_name != NULL)
1045 /* The user specified a FP ABI. */
1046 for (i = 0; i < ARRAY_SIZE (all_float_abis); i++)
1048 if (streq (all_float_abis[i].name, target_float_abi_name))
1050 arm_float_abi = all_float_abis[i].abi_type;
1051 break;
1054 if (i == ARRAY_SIZE (all_float_abis))
1055 error ("invalid floating point abi: -mfloat-abi=%s",
1056 target_float_abi_name);
1058 else
1060 /* Use soft-float target flag. */
1061 if (target_flags & ARM_FLAG_SOFT_FLOAT)
1062 arm_float_abi = ARM_FLOAT_ABI_SOFT;
1063 else
1064 arm_float_abi = ARM_FLOAT_ABI_HARD;
1067 if (arm_float_abi == ARM_FLOAT_ABI_SOFTFP)
1068 sorry ("-mfloat-abi=softfp");
1069 /* If soft-float is specified then don't use FPU. */
1070 if (TARGET_SOFT_FLOAT)
1071 arm_fpu_arch = FPUTYPE_NONE;
1073 /* For arm2/3 there is no need to do any scheduling if there is only
1074 a floating point emulator, or we are doing software floating-point. */
1075 if ((TARGET_SOFT_FLOAT
1076 || arm_fpu_tune == FPUTYPE_FPA_EMU2
1077 || arm_fpu_tune == FPUTYPE_FPA_EMU3)
1078 && (tune_flags & FL_MODE32) == 0)
1079 flag_schedule_insns = flag_schedule_insns_after_reload = 0;
1081 /* Override the default structure alignment for AAPCS ABI. */
1082 if (arm_abi == ARM_ABI_AAPCS)
1083 arm_structure_size_boundary = 8;
1085 if (structure_size_string != NULL)
1087 int size = strtol (structure_size_string, NULL, 0);
1089 if (size == 8 || size == 32
1090 || (ARM_DOUBLEWORD_ALIGN && size == 64))
1091 arm_structure_size_boundary = size;
1092 else
1093 warning ("structure size boundary can only be set to %s",
1094 ARM_DOUBLEWORD_ALIGN ? "8, 32 or 64": "8 or 32");
1097 if (arm_pic_register_string != NULL)
1099 int pic_register = decode_reg_name (arm_pic_register_string);
1101 if (!flag_pic)
1102 warning ("-mpic-register= is useless without -fpic");
1104 /* Prevent the user from choosing an obviously stupid PIC register. */
1105 else if (pic_register < 0 || call_used_regs[pic_register]
1106 || pic_register == HARD_FRAME_POINTER_REGNUM
1107 || pic_register == STACK_POINTER_REGNUM
1108 || pic_register >= PC_REGNUM)
1109 error ("unable to use '%s' for PIC register", arm_pic_register_string);
1110 else
1111 arm_pic_register = pic_register;
1114 if (TARGET_THUMB && flag_schedule_insns)
1116 /* Don't warn since it's on by default in -O2. */
1117 flag_schedule_insns = 0;
1120 if (optimize_size)
1122 /* There's some dispute as to whether this should be 1 or 2. However,
1123 experiments seem to show that in pathological cases a setting of
1124 1 degrades less severely than a setting of 2. This could change if
1125 other parts of the compiler change their behavior. */
1126 arm_constant_limit = 1;
1128 /* If optimizing for size, bump the number of instructions that we
1129 are prepared to conditionally execute (even on a StrongARM). */
1130 max_insns_skipped = 6;
1132 else
1134 /* For processors with load scheduling, it never costs more than
1135 2 cycles to load a constant, and the load scheduler may well
1136 reduce that to 1. */
1137 if (tune_flags & FL_LDSCHED)
1138 arm_constant_limit = 1;
1140 /* On XScale the longer latency of a load makes it more difficult
1141 to achieve a good schedule, so it's faster to synthesize
1142 constants that can be done in two insns. */
1143 if (arm_tune_xscale)
1144 arm_constant_limit = 2;
1146 /* StrongARM has early execution of branches, so a sequence
1147 that is worth skipping is shorter. */
1148 if (arm_is_strong)
1149 max_insns_skipped = 3;
1152 /* Register global variables with the garbage collector. */
1153 arm_add_gc_roots ();
1156 static void
1157 arm_add_gc_roots (void)
1159 gcc_obstack_init(&minipool_obstack);
1160 minipool_startobj = (char *) obstack_alloc (&minipool_obstack, 0);
1163 /* A table of known ARM exception types.
1164 For use with the interrupt function attribute. */
1166 typedef struct
1168 const char *const arg;
1169 const unsigned long return_value;
1171 isr_attribute_arg;
1173 static const isr_attribute_arg isr_attribute_args [] =
1175 { "IRQ", ARM_FT_ISR },
1176 { "irq", ARM_FT_ISR },
1177 { "FIQ", ARM_FT_FIQ },
1178 { "fiq", ARM_FT_FIQ },
1179 { "ABORT", ARM_FT_ISR },
1180 { "abort", ARM_FT_ISR },
1181 { "ABORT", ARM_FT_ISR },
1182 { "abort", ARM_FT_ISR },
1183 { "UNDEF", ARM_FT_EXCEPTION },
1184 { "undef", ARM_FT_EXCEPTION },
1185 { "SWI", ARM_FT_EXCEPTION },
1186 { "swi", ARM_FT_EXCEPTION },
1187 { NULL, ARM_FT_NORMAL }
1190 /* Returns the (interrupt) function type of the current
1191 function, or ARM_FT_UNKNOWN if the type cannot be determined. */
1193 static unsigned long
1194 arm_isr_value (tree argument)
1196 const isr_attribute_arg * ptr;
1197 const char * arg;
1199 /* No argument - default to IRQ. */
1200 if (argument == NULL_TREE)
1201 return ARM_FT_ISR;
1203 /* Get the value of the argument. */
1204 if (TREE_VALUE (argument) == NULL_TREE
1205 || TREE_CODE (TREE_VALUE (argument)) != STRING_CST)
1206 return ARM_FT_UNKNOWN;
1208 arg = TREE_STRING_POINTER (TREE_VALUE (argument));
1210 /* Check it against the list of known arguments. */
1211 for (ptr = isr_attribute_args; ptr->arg != NULL; ptr++)
1212 if (streq (arg, ptr->arg))
1213 return ptr->return_value;
1215 /* An unrecognized interrupt type. */
1216 return ARM_FT_UNKNOWN;
1219 /* Computes the type of the current function. */
1221 static unsigned long
1222 arm_compute_func_type (void)
1224 unsigned long type = ARM_FT_UNKNOWN;
1225 tree a;
1226 tree attr;
1228 if (TREE_CODE (current_function_decl) != FUNCTION_DECL)
1229 abort ();
1231 /* Decide if the current function is volatile. Such functions
1232 never return, and many memory cycles can be saved by not storing
1233 register values that will never be needed again. This optimization
1234 was added to speed up context switching in a kernel application. */
1235 if (optimize > 0
1236 && TREE_NOTHROW (current_function_decl)
1237 && TREE_THIS_VOLATILE (current_function_decl))
1238 type |= ARM_FT_VOLATILE;
1240 if (cfun->static_chain_decl != NULL)
1241 type |= ARM_FT_NESTED;
1243 attr = DECL_ATTRIBUTES (current_function_decl);
1245 a = lookup_attribute ("naked", attr);
1246 if (a != NULL_TREE)
1247 type |= ARM_FT_NAKED;
1249 a = lookup_attribute ("isr", attr);
1250 if (a == NULL_TREE)
1251 a = lookup_attribute ("interrupt", attr);
1253 if (a == NULL_TREE)
1254 type |= TARGET_INTERWORK ? ARM_FT_INTERWORKED : ARM_FT_NORMAL;
1255 else
1256 type |= arm_isr_value (TREE_VALUE (a));
1258 return type;
1261 /* Returns the type of the current function. */
1263 unsigned long
1264 arm_current_func_type (void)
1266 if (ARM_FUNC_TYPE (cfun->machine->func_type) == ARM_FT_UNKNOWN)
1267 cfun->machine->func_type = arm_compute_func_type ();
1269 return cfun->machine->func_type;
1272 /* Return 1 if it is possible to return using a single instruction.
1273 If SIBLING is non-null, this is a test for a return before a sibling
1274 call. SIBLING is the call insn, so we can examine its register usage. */
1277 use_return_insn (int iscond, rtx sibling)
1279 int regno;
1280 unsigned int func_type;
1281 unsigned long saved_int_regs;
1282 unsigned HOST_WIDE_INT stack_adjust;
1283 arm_stack_offsets *offsets;
1285 /* Never use a return instruction before reload has run. */
1286 if (!reload_completed)
1287 return 0;
1289 func_type = arm_current_func_type ();
1291 /* Naked functions and volatile functions need special
1292 consideration. */
1293 if (func_type & (ARM_FT_VOLATILE | ARM_FT_NAKED))
1294 return 0;
1296 /* So do interrupt functions that use the frame pointer. */
1297 if (IS_INTERRUPT (func_type) && frame_pointer_needed)
1298 return 0;
1300 offsets = arm_get_frame_offsets ();
1301 stack_adjust = offsets->outgoing_args - offsets->saved_regs;
1303 /* As do variadic functions. */
1304 if (current_function_pretend_args_size
1305 || cfun->machine->uses_anonymous_args
1306 /* Or if the function calls __builtin_eh_return () */
1307 || current_function_calls_eh_return
1308 /* Or if the function calls alloca */
1309 || current_function_calls_alloca
1310 /* Or if there is a stack adjustment. However, if the stack pointer
1311 is saved on the stack, we can use a pre-incrementing stack load. */
1312 || !(stack_adjust == 0 || (frame_pointer_needed && stack_adjust == 4)))
1313 return 0;
1315 saved_int_regs = arm_compute_save_reg_mask ();
1317 /* Unfortunately, the insn
1319 ldmib sp, {..., sp, ...}
1321 triggers a bug on most SA-110 based devices, such that the stack
1322 pointer won't be correctly restored if the instruction takes a
1323 page fault. We work around this problem by popping r3 along with
1324 the other registers, since that is never slower than executing
1325 another instruction.
1327 We test for !arm_arch5 here, because code for any architecture
1328 less than this could potentially be run on one of the buggy
1329 chips. */
1330 if (stack_adjust == 4 && !arm_arch5)
1332 /* Validate that r3 is a call-clobbered register (always true in
1333 the default abi) ... */
1334 if (!call_used_regs[3])
1335 return 0;
1337 /* ... that it isn't being used for a return value (always true
1338 until we implement return-in-regs), or for a tail-call
1339 argument ... */
1340 if (sibling)
1342 if (GET_CODE (sibling) != CALL_INSN)
1343 abort ();
1345 if (find_regno_fusage (sibling, USE, 3))
1346 return 0;
1349 /* ... and that there are no call-saved registers in r0-r2
1350 (always true in the default ABI). */
1351 if (saved_int_regs & 0x7)
1352 return 0;
1355 /* Can't be done if interworking with Thumb, and any registers have been
1356 stacked. */
1357 if (TARGET_INTERWORK && saved_int_regs != 0)
1358 return 0;
1360 /* On StrongARM, conditional returns are expensive if they aren't
1361 taken and multiple registers have been stacked. */
1362 if (iscond && arm_is_strong)
1364 /* Conditional return when just the LR is stored is a simple
1365 conditional-load instruction, that's not expensive. */
1366 if (saved_int_regs != 0 && saved_int_regs != (1 << LR_REGNUM))
1367 return 0;
1369 if (flag_pic && regs_ever_live[PIC_OFFSET_TABLE_REGNUM])
1370 return 0;
1373 /* If there are saved registers but the LR isn't saved, then we need
1374 two instructions for the return. */
1375 if (saved_int_regs && !(saved_int_regs & (1 << LR_REGNUM)))
1376 return 0;
1378 /* Can't be done if any of the FPA regs are pushed,
1379 since this also requires an insn. */
1380 if (TARGET_HARD_FLOAT && TARGET_FPA)
1381 for (regno = FIRST_FPA_REGNUM; regno <= LAST_FPA_REGNUM; regno++)
1382 if (regs_ever_live[regno] && !call_used_regs[regno])
1383 return 0;
1385 /* Likewise VFP regs. */
1386 if (TARGET_HARD_FLOAT && TARGET_VFP)
1387 for (regno = FIRST_VFP_REGNUM; regno <= LAST_VFP_REGNUM; regno++)
1388 if (regs_ever_live[regno] && !call_used_regs[regno])
1389 return 0;
1391 if (TARGET_REALLY_IWMMXT)
1392 for (regno = FIRST_IWMMXT_REGNUM; regno <= LAST_IWMMXT_REGNUM; regno++)
1393 if (regs_ever_live[regno] && ! call_used_regs [regno])
1394 return 0;
1396 return 1;
1399 /* Return TRUE if int I is a valid immediate ARM constant. */
1402 const_ok_for_arm (HOST_WIDE_INT i)
1404 unsigned HOST_WIDE_INT mask = ~(unsigned HOST_WIDE_INT)0xFF;
1406 /* For machines with >32 bit HOST_WIDE_INT, the bits above bit 31 must
1407 be all zero, or all one. */
1408 if ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff) != 0
1409 && ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff)
1410 != ((~(unsigned HOST_WIDE_INT) 0)
1411 & ~(unsigned HOST_WIDE_INT) 0xffffffff)))
1412 return FALSE;
1414 /* Fast return for 0 and powers of 2 */
1415 if ((i & (i - 1)) == 0)
1416 return TRUE;
1420 if ((i & mask & (unsigned HOST_WIDE_INT) 0xffffffff) == 0)
1421 return TRUE;
1422 mask =
1423 (mask << 2) | ((mask & (unsigned HOST_WIDE_INT) 0xffffffff)
1424 >> (32 - 2)) | ~(unsigned HOST_WIDE_INT) 0xffffffff;
1426 while (mask != ~(unsigned HOST_WIDE_INT) 0xFF);
1428 return FALSE;
1431 /* Return true if I is a valid constant for the operation CODE. */
1432 static int
1433 const_ok_for_op (HOST_WIDE_INT i, enum rtx_code code)
1435 if (const_ok_for_arm (i))
1436 return 1;
1438 switch (code)
1440 case PLUS:
1441 return const_ok_for_arm (ARM_SIGN_EXTEND (-i));
1443 case MINUS: /* Should only occur with (MINUS I reg) => rsb */
1444 case XOR:
1445 case IOR:
1446 return 0;
1448 case AND:
1449 return const_ok_for_arm (ARM_SIGN_EXTEND (~i));
1451 default:
1452 abort ();
1456 /* Emit a sequence of insns to handle a large constant.
1457 CODE is the code of the operation required, it can be any of SET, PLUS,
1458 IOR, AND, XOR, MINUS;
1459 MODE is the mode in which the operation is being performed;
1460 VAL is the integer to operate on;
1461 SOURCE is the other operand (a register, or a null-pointer for SET);
1462 SUBTARGETS means it is safe to create scratch registers if that will
1463 either produce a simpler sequence, or we will want to cse the values.
1464 Return value is the number of insns emitted. */
1467 arm_split_constant (enum rtx_code code, enum machine_mode mode, rtx insn,
1468 HOST_WIDE_INT val, rtx target, rtx source, int subtargets)
1470 rtx cond;
1472 if (insn && GET_CODE (PATTERN (insn)) == COND_EXEC)
1473 cond = COND_EXEC_TEST (PATTERN (insn));
1474 else
1475 cond = NULL_RTX;
1477 if (subtargets || code == SET
1478 || (GET_CODE (target) == REG && GET_CODE (source) == REG
1479 && REGNO (target) != REGNO (source)))
1481 /* After arm_reorg has been called, we can't fix up expensive
1482 constants by pushing them into memory so we must synthesize
1483 them in-line, regardless of the cost. This is only likely to
1484 be more costly on chips that have load delay slots and we are
1485 compiling without running the scheduler (so no splitting
1486 occurred before the final instruction emission).
1488 Ref: gcc -O1 -mcpu=strongarm gcc.c-torture/compile/980506-2.c
1490 if (!after_arm_reorg
1491 && !cond
1492 && (arm_gen_constant (code, mode, NULL_RTX, val, target, source,
1493 1, 0)
1494 > arm_constant_limit + (code != SET)))
1496 if (code == SET)
1498 /* Currently SET is the only monadic value for CODE, all
1499 the rest are diadic. */
1500 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (val)));
1501 return 1;
1503 else
1505 rtx temp = subtargets ? gen_reg_rtx (mode) : target;
1507 emit_insn (gen_rtx_SET (VOIDmode, temp, GEN_INT (val)));
1508 /* For MINUS, the value is subtracted from, since we never
1509 have subtraction of a constant. */
1510 if (code == MINUS)
1511 emit_insn (gen_rtx_SET (VOIDmode, target,
1512 gen_rtx_MINUS (mode, temp, source)));
1513 else
1514 emit_insn (gen_rtx_SET (VOIDmode, target,
1515 gen_rtx_fmt_ee (code, mode, source, temp)));
1516 return 2;
1521 return arm_gen_constant (code, mode, cond, val, target, source, subtargets,
1525 static int
1526 count_insns_for_constant (HOST_WIDE_INT remainder, int i)
1528 HOST_WIDE_INT temp1;
1529 int num_insns = 0;
1532 int end;
1534 if (i <= 0)
1535 i += 32;
1536 if (remainder & (3 << (i - 2)))
1538 end = i - 8;
1539 if (end < 0)
1540 end += 32;
1541 temp1 = remainder & ((0x0ff << end)
1542 | ((i < end) ? (0xff >> (32 - end)) : 0));
1543 remainder &= ~temp1;
1544 num_insns++;
1545 i -= 6;
1547 i -= 2;
1548 } while (remainder);
1549 return num_insns;
1552 /* Emit an instruction with the indicated PATTERN. If COND is
1553 non-NULL, conditionalize the execution of the instruction on COND
1554 being true. */
1556 static void
1557 emit_constant_insn (rtx cond, rtx pattern)
1559 if (cond)
1560 pattern = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (cond), pattern);
1561 emit_insn (pattern);
1564 /* As above, but extra parameter GENERATE which, if clear, suppresses
1565 RTL generation. */
1567 static int
1568 arm_gen_constant (enum rtx_code code, enum machine_mode mode, rtx cond,
1569 HOST_WIDE_INT val, rtx target, rtx source, int subtargets,
1570 int generate)
1572 int can_invert = 0;
1573 int can_negate = 0;
1574 int can_negate_initial = 0;
1575 int can_shift = 0;
1576 int i;
1577 int num_bits_set = 0;
1578 int set_sign_bit_copies = 0;
1579 int clear_sign_bit_copies = 0;
1580 int clear_zero_bit_copies = 0;
1581 int set_zero_bit_copies = 0;
1582 int insns = 0;
1583 unsigned HOST_WIDE_INT temp1, temp2;
1584 unsigned HOST_WIDE_INT remainder = val & 0xffffffff;
1586 /* Find out which operations are safe for a given CODE. Also do a quick
1587 check for degenerate cases; these can occur when DImode operations
1588 are split. */
1589 switch (code)
1591 case SET:
1592 can_invert = 1;
1593 can_shift = 1;
1594 can_negate = 1;
1595 break;
1597 case PLUS:
1598 can_negate = 1;
1599 can_negate_initial = 1;
1600 break;
1602 case IOR:
1603 if (remainder == 0xffffffff)
1605 if (generate)
1606 emit_constant_insn (cond,
1607 gen_rtx_SET (VOIDmode, target,
1608 GEN_INT (ARM_SIGN_EXTEND (val))));
1609 return 1;
1611 if (remainder == 0)
1613 if (reload_completed && rtx_equal_p (target, source))
1614 return 0;
1615 if (generate)
1616 emit_constant_insn (cond,
1617 gen_rtx_SET (VOIDmode, target, source));
1618 return 1;
1620 break;
1622 case AND:
1623 if (remainder == 0)
1625 if (generate)
1626 emit_constant_insn (cond,
1627 gen_rtx_SET (VOIDmode, target, const0_rtx));
1628 return 1;
1630 if (remainder == 0xffffffff)
1632 if (reload_completed && rtx_equal_p (target, source))
1633 return 0;
1634 if (generate)
1635 emit_constant_insn (cond,
1636 gen_rtx_SET (VOIDmode, target, source));
1637 return 1;
1639 can_invert = 1;
1640 break;
1642 case XOR:
1643 if (remainder == 0)
1645 if (reload_completed && rtx_equal_p (target, source))
1646 return 0;
1647 if (generate)
1648 emit_constant_insn (cond,
1649 gen_rtx_SET (VOIDmode, target, source));
1650 return 1;
1652 if (remainder == 0xffffffff)
1654 if (generate)
1655 emit_constant_insn (cond,
1656 gen_rtx_SET (VOIDmode, target,
1657 gen_rtx_NOT (mode, source)));
1658 return 1;
1661 /* We don't know how to handle this yet below. */
1662 abort ();
1664 case MINUS:
1665 /* We treat MINUS as (val - source), since (source - val) is always
1666 passed as (source + (-val)). */
1667 if (remainder == 0)
1669 if (generate)
1670 emit_constant_insn (cond,
1671 gen_rtx_SET (VOIDmode, target,
1672 gen_rtx_NEG (mode, source)));
1673 return 1;
1675 if (const_ok_for_arm (val))
1677 if (generate)
1678 emit_constant_insn (cond,
1679 gen_rtx_SET (VOIDmode, target,
1680 gen_rtx_MINUS (mode, GEN_INT (val),
1681 source)));
1682 return 1;
1684 can_negate = 1;
1686 break;
1688 default:
1689 abort ();
1692 /* If we can do it in one insn get out quickly. */
1693 if (const_ok_for_arm (val)
1694 || (can_negate_initial && const_ok_for_arm (-val))
1695 || (can_invert && const_ok_for_arm (~val)))
1697 if (generate)
1698 emit_constant_insn (cond,
1699 gen_rtx_SET (VOIDmode, target,
1700 (source
1701 ? gen_rtx_fmt_ee (code, mode, source,
1702 GEN_INT (val))
1703 : GEN_INT (val))));
1704 return 1;
1707 /* Calculate a few attributes that may be useful for specific
1708 optimizations. */
1709 for (i = 31; i >= 0; i--)
1711 if ((remainder & (1 << i)) == 0)
1712 clear_sign_bit_copies++;
1713 else
1714 break;
1717 for (i = 31; i >= 0; i--)
1719 if ((remainder & (1 << i)) != 0)
1720 set_sign_bit_copies++;
1721 else
1722 break;
1725 for (i = 0; i <= 31; i++)
1727 if ((remainder & (1 << i)) == 0)
1728 clear_zero_bit_copies++;
1729 else
1730 break;
1733 for (i = 0; i <= 31; i++)
1735 if ((remainder & (1 << i)) != 0)
1736 set_zero_bit_copies++;
1737 else
1738 break;
1741 switch (code)
1743 case SET:
1744 /* See if we can do this by sign_extending a constant that is known
1745 to be negative. This is a good, way of doing it, since the shift
1746 may well merge into a subsequent insn. */
1747 if (set_sign_bit_copies > 1)
1749 if (const_ok_for_arm
1750 (temp1 = ARM_SIGN_EXTEND (remainder
1751 << (set_sign_bit_copies - 1))))
1753 if (generate)
1755 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1756 emit_constant_insn (cond,
1757 gen_rtx_SET (VOIDmode, new_src,
1758 GEN_INT (temp1)));
1759 emit_constant_insn (cond,
1760 gen_ashrsi3 (target, new_src,
1761 GEN_INT (set_sign_bit_copies - 1)));
1763 return 2;
1765 /* For an inverted constant, we will need to set the low bits,
1766 these will be shifted out of harm's way. */
1767 temp1 |= (1 << (set_sign_bit_copies - 1)) - 1;
1768 if (const_ok_for_arm (~temp1))
1770 if (generate)
1772 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1773 emit_constant_insn (cond,
1774 gen_rtx_SET (VOIDmode, new_src,
1775 GEN_INT (temp1)));
1776 emit_constant_insn (cond,
1777 gen_ashrsi3 (target, new_src,
1778 GEN_INT (set_sign_bit_copies - 1)));
1780 return 2;
1784 /* See if we can generate this by setting the bottom (or the top)
1785 16 bits, and then shifting these into the other half of the
1786 word. We only look for the simplest cases, to do more would cost
1787 too much. Be careful, however, not to generate this when the
1788 alternative would take fewer insns. */
1789 if (val & 0xffff0000)
1791 temp1 = remainder & 0xffff0000;
1792 temp2 = remainder & 0x0000ffff;
1794 /* Overlaps outside this range are best done using other methods. */
1795 for (i = 9; i < 24; i++)
1797 if ((((temp2 | (temp2 << i)) & 0xffffffff) == remainder)
1798 && !const_ok_for_arm (temp2))
1800 rtx new_src = (subtargets
1801 ? (generate ? gen_reg_rtx (mode) : NULL_RTX)
1802 : target);
1803 insns = arm_gen_constant (code, mode, cond, temp2, new_src,
1804 source, subtargets, generate);
1805 source = new_src;
1806 if (generate)
1807 emit_constant_insn
1808 (cond,
1809 gen_rtx_SET
1810 (VOIDmode, target,
1811 gen_rtx_IOR (mode,
1812 gen_rtx_ASHIFT (mode, source,
1813 GEN_INT (i)),
1814 source)));
1815 return insns + 1;
1819 /* Don't duplicate cases already considered. */
1820 for (i = 17; i < 24; i++)
1822 if (((temp1 | (temp1 >> i)) == remainder)
1823 && !const_ok_for_arm (temp1))
1825 rtx new_src = (subtargets
1826 ? (generate ? gen_reg_rtx (mode) : NULL_RTX)
1827 : target);
1828 insns = arm_gen_constant (code, mode, cond, temp1, new_src,
1829 source, subtargets, generate);
1830 source = new_src;
1831 if (generate)
1832 emit_constant_insn
1833 (cond,
1834 gen_rtx_SET (VOIDmode, target,
1835 gen_rtx_IOR
1836 (mode,
1837 gen_rtx_LSHIFTRT (mode, source,
1838 GEN_INT (i)),
1839 source)));
1840 return insns + 1;
1844 break;
1846 case IOR:
1847 case XOR:
1848 /* If we have IOR or XOR, and the constant can be loaded in a
1849 single instruction, and we can find a temporary to put it in,
1850 then this can be done in two instructions instead of 3-4. */
1851 if (subtargets
1852 /* TARGET can't be NULL if SUBTARGETS is 0 */
1853 || (reload_completed && !reg_mentioned_p (target, source)))
1855 if (const_ok_for_arm (ARM_SIGN_EXTEND (~val)))
1857 if (generate)
1859 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1861 emit_constant_insn (cond,
1862 gen_rtx_SET (VOIDmode, sub,
1863 GEN_INT (val)));
1864 emit_constant_insn (cond,
1865 gen_rtx_SET (VOIDmode, target,
1866 gen_rtx_fmt_ee (code, mode,
1867 source, sub)));
1869 return 2;
1873 if (code == XOR)
1874 break;
1876 if (set_sign_bit_copies > 8
1877 && (val & (-1 << (32 - set_sign_bit_copies))) == val)
1879 if (generate)
1881 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1882 rtx shift = GEN_INT (set_sign_bit_copies);
1884 emit_constant_insn
1885 (cond,
1886 gen_rtx_SET (VOIDmode, sub,
1887 gen_rtx_NOT (mode,
1888 gen_rtx_ASHIFT (mode,
1889 source,
1890 shift))));
1891 emit_constant_insn
1892 (cond,
1893 gen_rtx_SET (VOIDmode, target,
1894 gen_rtx_NOT (mode,
1895 gen_rtx_LSHIFTRT (mode, sub,
1896 shift))));
1898 return 2;
1901 if (set_zero_bit_copies > 8
1902 && (remainder & ((1 << set_zero_bit_copies) - 1)) == remainder)
1904 if (generate)
1906 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1907 rtx shift = GEN_INT (set_zero_bit_copies);
1909 emit_constant_insn
1910 (cond,
1911 gen_rtx_SET (VOIDmode, sub,
1912 gen_rtx_NOT (mode,
1913 gen_rtx_LSHIFTRT (mode,
1914 source,
1915 shift))));
1916 emit_constant_insn
1917 (cond,
1918 gen_rtx_SET (VOIDmode, target,
1919 gen_rtx_NOT (mode,
1920 gen_rtx_ASHIFT (mode, sub,
1921 shift))));
1923 return 2;
1926 if (const_ok_for_arm (temp1 = ARM_SIGN_EXTEND (~val)))
1928 if (generate)
1930 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1931 emit_constant_insn (cond,
1932 gen_rtx_SET (VOIDmode, sub,
1933 gen_rtx_NOT (mode, source)));
1934 source = sub;
1935 if (subtargets)
1936 sub = gen_reg_rtx (mode);
1937 emit_constant_insn (cond,
1938 gen_rtx_SET (VOIDmode, sub,
1939 gen_rtx_AND (mode, source,
1940 GEN_INT (temp1))));
1941 emit_constant_insn (cond,
1942 gen_rtx_SET (VOIDmode, target,
1943 gen_rtx_NOT (mode, sub)));
1945 return 3;
1947 break;
1949 case AND:
1950 /* See if two shifts will do 2 or more insn's worth of work. */
1951 if (clear_sign_bit_copies >= 16 && clear_sign_bit_copies < 24)
1953 HOST_WIDE_INT shift_mask = ((0xffffffff
1954 << (32 - clear_sign_bit_copies))
1955 & 0xffffffff);
1957 if ((remainder | shift_mask) != 0xffffffff)
1959 if (generate)
1961 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1962 insns = arm_gen_constant (AND, mode, cond,
1963 remainder | shift_mask,
1964 new_src, source, subtargets, 1);
1965 source = new_src;
1967 else
1969 rtx targ = subtargets ? NULL_RTX : target;
1970 insns = arm_gen_constant (AND, mode, cond,
1971 remainder | shift_mask,
1972 targ, source, subtargets, 0);
1976 if (generate)
1978 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1979 rtx shift = GEN_INT (clear_sign_bit_copies);
1981 emit_insn (gen_ashlsi3 (new_src, source, shift));
1982 emit_insn (gen_lshrsi3 (target, new_src, shift));
1985 return insns + 2;
1988 if (clear_zero_bit_copies >= 16 && clear_zero_bit_copies < 24)
1990 HOST_WIDE_INT shift_mask = (1 << clear_zero_bit_copies) - 1;
1992 if ((remainder | shift_mask) != 0xffffffff)
1994 if (generate)
1996 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1998 insns = arm_gen_constant (AND, mode, cond,
1999 remainder | shift_mask,
2000 new_src, source, subtargets, 1);
2001 source = new_src;
2003 else
2005 rtx targ = subtargets ? NULL_RTX : target;
2007 insns = arm_gen_constant (AND, mode, cond,
2008 remainder | shift_mask,
2009 targ, source, subtargets, 0);
2013 if (generate)
2015 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2016 rtx shift = GEN_INT (clear_zero_bit_copies);
2018 emit_insn (gen_lshrsi3 (new_src, source, shift));
2019 emit_insn (gen_ashlsi3 (target, new_src, shift));
2022 return insns + 2;
2025 break;
2027 default:
2028 break;
2031 for (i = 0; i < 32; i++)
2032 if (remainder & (1 << i))
2033 num_bits_set++;
2035 if (code == AND || (can_invert && num_bits_set > 16))
2036 remainder = (~remainder) & 0xffffffff;
2037 else if (code == PLUS && num_bits_set > 16)
2038 remainder = (-remainder) & 0xffffffff;
2039 else
2041 can_invert = 0;
2042 can_negate = 0;
2045 /* Now try and find a way of doing the job in either two or three
2046 instructions.
2047 We start by looking for the largest block of zeros that are aligned on
2048 a 2-bit boundary, we then fill up the temps, wrapping around to the
2049 top of the word when we drop off the bottom.
2050 In the worst case this code should produce no more than four insns. */
2052 int best_start = 0;
2053 int best_consecutive_zeros = 0;
2055 for (i = 0; i < 32; i += 2)
2057 int consecutive_zeros = 0;
2059 if (!(remainder & (3 << i)))
2061 while ((i < 32) && !(remainder & (3 << i)))
2063 consecutive_zeros += 2;
2064 i += 2;
2066 if (consecutive_zeros > best_consecutive_zeros)
2068 best_consecutive_zeros = consecutive_zeros;
2069 best_start = i - consecutive_zeros;
2071 i -= 2;
2075 /* So long as it won't require any more insns to do so, it's
2076 desirable to emit a small constant (in bits 0...9) in the last
2077 insn. This way there is more chance that it can be combined with
2078 a later addressing insn to form a pre-indexed load or store
2079 operation. Consider:
2081 *((volatile int *)0xe0000100) = 1;
2082 *((volatile int *)0xe0000110) = 2;
2084 We want this to wind up as:
2086 mov rA, #0xe0000000
2087 mov rB, #1
2088 str rB, [rA, #0x100]
2089 mov rB, #2
2090 str rB, [rA, #0x110]
2092 rather than having to synthesize both large constants from scratch.
2094 Therefore, we calculate how many insns would be required to emit
2095 the constant starting from `best_start', and also starting from
2096 zero (ie with bit 31 first to be output). If `best_start' doesn't
2097 yield a shorter sequence, we may as well use zero. */
2098 if (best_start != 0
2099 && ((((unsigned HOST_WIDE_INT) 1) << best_start) < remainder)
2100 && (count_insns_for_constant (remainder, 0) <=
2101 count_insns_for_constant (remainder, best_start)))
2102 best_start = 0;
2104 /* Now start emitting the insns. */
2105 i = best_start;
2108 int end;
2110 if (i <= 0)
2111 i += 32;
2112 if (remainder & (3 << (i - 2)))
2114 end = i - 8;
2115 if (end < 0)
2116 end += 32;
2117 temp1 = remainder & ((0x0ff << end)
2118 | ((i < end) ? (0xff >> (32 - end)) : 0));
2119 remainder &= ~temp1;
2121 if (generate)
2123 rtx new_src, temp1_rtx;
2125 if (code == SET || code == MINUS)
2127 new_src = (subtargets ? gen_reg_rtx (mode) : target);
2128 if (can_invert && code != MINUS)
2129 temp1 = ~temp1;
2131 else
2133 if (remainder && subtargets)
2134 new_src = gen_reg_rtx (mode);
2135 else
2136 new_src = target;
2137 if (can_invert)
2138 temp1 = ~temp1;
2139 else if (can_negate)
2140 temp1 = -temp1;
2143 temp1 = trunc_int_for_mode (temp1, mode);
2144 temp1_rtx = GEN_INT (temp1);
2146 if (code == SET)
2148 else if (code == MINUS)
2149 temp1_rtx = gen_rtx_MINUS (mode, temp1_rtx, source);
2150 else
2151 temp1_rtx = gen_rtx_fmt_ee (code, mode, source, temp1_rtx);
2153 emit_constant_insn (cond,
2154 gen_rtx_SET (VOIDmode, new_src,
2155 temp1_rtx));
2156 source = new_src;
2159 if (code == SET)
2161 can_invert = 0;
2162 code = PLUS;
2164 else if (code == MINUS)
2165 code = PLUS;
2167 insns++;
2168 i -= 6;
2170 i -= 2;
2172 while (remainder);
2175 return insns;
2178 /* Canonicalize a comparison so that we are more likely to recognize it.
2179 This can be done for a few constant compares, where we can make the
2180 immediate value easier to load. */
2182 enum rtx_code
2183 arm_canonicalize_comparison (enum rtx_code code, rtx * op1)
2185 unsigned HOST_WIDE_INT i = INTVAL (*op1);
2187 switch (code)
2189 case EQ:
2190 case NE:
2191 return code;
2193 case GT:
2194 case LE:
2195 if (i != ((((unsigned HOST_WIDE_INT) 1) << (HOST_BITS_PER_WIDE_INT - 1)) - 1)
2196 && (const_ok_for_arm (i + 1) || const_ok_for_arm (-(i + 1))))
2198 *op1 = GEN_INT (i + 1);
2199 return code == GT ? GE : LT;
2201 break;
2203 case GE:
2204 case LT:
2205 if (i != (((unsigned HOST_WIDE_INT) 1) << (HOST_BITS_PER_WIDE_INT - 1))
2206 && (const_ok_for_arm (i - 1) || const_ok_for_arm (-(i - 1))))
2208 *op1 = GEN_INT (i - 1);
2209 return code == GE ? GT : LE;
2211 break;
2213 case GTU:
2214 case LEU:
2215 if (i != ~((unsigned HOST_WIDE_INT) 0)
2216 && (const_ok_for_arm (i + 1) || const_ok_for_arm (-(i + 1))))
2218 *op1 = GEN_INT (i + 1);
2219 return code == GTU ? GEU : LTU;
2221 break;
2223 case GEU:
2224 case LTU:
2225 if (i != 0
2226 && (const_ok_for_arm (i - 1) || const_ok_for_arm (-(i - 1))))
2228 *op1 = GEN_INT (i - 1);
2229 return code == GEU ? GTU : LEU;
2231 break;
2233 default:
2234 abort ();
2237 return code;
2241 /* Define how to find the value returned by a function. */
2243 rtx arm_function_value(tree type, tree func ATTRIBUTE_UNUSED)
2245 enum machine_mode mode;
2246 int unsignedp ATTRIBUTE_UNUSED;
2247 rtx r ATTRIBUTE_UNUSED;
2250 mode = TYPE_MODE (type);
2251 /* Promote integer types. */
2252 if (INTEGRAL_TYPE_P (type))
2253 PROMOTE_FUNCTION_MODE (mode, unsignedp, type);
2254 return LIBCALL_VALUE(mode);
2258 /* Decide whether a type should be returned in memory (true)
2259 or in a register (false). This is called by the macro
2260 RETURN_IN_MEMORY. */
2262 arm_return_in_memory (tree type)
2264 HOST_WIDE_INT size;
2266 if (!AGGREGATE_TYPE_P (type))
2267 /* All simple types are returned in registers. */
2268 return 0;
2270 size = int_size_in_bytes (type);
2272 if (arm_abi != ARM_ABI_APCS)
2274 /* ATPCS and later return aggregate types in memory only if they are
2275 larger than a word (or are variable size). */
2276 return (size < 0 || size > UNITS_PER_WORD);
2279 /* For the arm-wince targets we choose to be compatible with Microsoft's
2280 ARM and Thumb compilers, which always return aggregates in memory. */
2281 #ifndef ARM_WINCE
2282 /* All structures/unions bigger than one word are returned in memory.
2283 Also catch the case where int_size_in_bytes returns -1. In this case
2284 the aggregate is either huge or of variable size, and in either case
2285 we will want to return it via memory and not in a register. */
2286 if (size < 0 || size > UNITS_PER_WORD)
2287 return 1;
2289 if (TREE_CODE (type) == RECORD_TYPE)
2291 tree field;
2293 /* For a struct the APCS says that we only return in a register
2294 if the type is 'integer like' and every addressable element
2295 has an offset of zero. For practical purposes this means
2296 that the structure can have at most one non bit-field element
2297 and that this element must be the first one in the structure. */
2299 /* Find the first field, ignoring non FIELD_DECL things which will
2300 have been created by C++. */
2301 for (field = TYPE_FIELDS (type);
2302 field && TREE_CODE (field) != FIELD_DECL;
2303 field = TREE_CHAIN (field))
2304 continue;
2306 if (field == NULL)
2307 return 0; /* An empty structure. Allowed by an extension to ANSI C. */
2309 /* Check that the first field is valid for returning in a register. */
2311 /* ... Floats are not allowed */
2312 if (FLOAT_TYPE_P (TREE_TYPE (field)))
2313 return 1;
2315 /* ... Aggregates that are not themselves valid for returning in
2316 a register are not allowed. */
2317 if (RETURN_IN_MEMORY (TREE_TYPE (field)))
2318 return 1;
2320 /* Now check the remaining fields, if any. Only bitfields are allowed,
2321 since they are not addressable. */
2322 for (field = TREE_CHAIN (field);
2323 field;
2324 field = TREE_CHAIN (field))
2326 if (TREE_CODE (field) != FIELD_DECL)
2327 continue;
2329 if (!DECL_BIT_FIELD_TYPE (field))
2330 return 1;
2333 return 0;
2336 if (TREE_CODE (type) == UNION_TYPE)
2338 tree field;
2340 /* Unions can be returned in registers if every element is
2341 integral, or can be returned in an integer register. */
2342 for (field = TYPE_FIELDS (type);
2343 field;
2344 field = TREE_CHAIN (field))
2346 if (TREE_CODE (field) != FIELD_DECL)
2347 continue;
2349 if (FLOAT_TYPE_P (TREE_TYPE (field)))
2350 return 1;
2352 if (RETURN_IN_MEMORY (TREE_TYPE (field)))
2353 return 1;
2356 return 0;
2358 #endif /* not ARM_WINCE */
2360 /* Return all other types in memory. */
2361 return 1;
2364 /* Indicate whether or not words of a double are in big-endian order. */
2367 arm_float_words_big_endian (void)
2369 if (TARGET_MAVERICK)
2370 return 0;
2372 /* For FPA, float words are always big-endian. For VFP, floats words
2373 follow the memory system mode. */
2375 if (TARGET_FPA)
2377 return 1;
2380 if (TARGET_VFP)
2381 return (TARGET_BIG_END ? 1 : 0);
2383 return 1;
2386 /* Initialize a variable CUM of type CUMULATIVE_ARGS
2387 for a call to a function whose data type is FNTYPE.
2388 For a library call, FNTYPE is NULL. */
2389 void
2390 arm_init_cumulative_args (CUMULATIVE_ARGS *pcum, tree fntype,
2391 rtx libname ATTRIBUTE_UNUSED,
2392 tree fndecl ATTRIBUTE_UNUSED)
2394 /* On the ARM, the offset starts at 0. */
2395 pcum->nregs = ((fntype && aggregate_value_p (TREE_TYPE (fntype), fntype)) ? 1 : 0);
2396 pcum->iwmmxt_nregs = 0;
2397 pcum->can_split = true;
2399 pcum->call_cookie = CALL_NORMAL;
2401 if (TARGET_LONG_CALLS)
2402 pcum->call_cookie = CALL_LONG;
2404 /* Check for long call/short call attributes. The attributes
2405 override any command line option. */
2406 if (fntype)
2408 if (lookup_attribute ("short_call", TYPE_ATTRIBUTES (fntype)))
2409 pcum->call_cookie = CALL_SHORT;
2410 else if (lookup_attribute ("long_call", TYPE_ATTRIBUTES (fntype)))
2411 pcum->call_cookie = CALL_LONG;
2414 /* Varargs vectors are treated the same as long long.
2415 named_count avoids having to change the way arm handles 'named' */
2416 pcum->named_count = 0;
2417 pcum->nargs = 0;
2419 if (TARGET_REALLY_IWMMXT && fntype)
2421 tree fn_arg;
2423 for (fn_arg = TYPE_ARG_TYPES (fntype);
2424 fn_arg;
2425 fn_arg = TREE_CHAIN (fn_arg))
2426 pcum->named_count += 1;
2428 if (! pcum->named_count)
2429 pcum->named_count = INT_MAX;
2434 /* Return true if mode/type need doubleword alignment. */
2435 bool
2436 arm_needs_doubleword_align (enum machine_mode mode, tree type)
2438 return (GET_MODE_ALIGNMENT (mode) > PARM_BOUNDARY
2439 || (type && TYPE_ALIGN (type) > PARM_BOUNDARY));
2443 /* Determine where to put an argument to a function.
2444 Value is zero to push the argument on the stack,
2445 or a hard register in which to store the argument.
2447 MODE is the argument's machine mode.
2448 TYPE is the data type of the argument (as a tree).
2449 This is null for libcalls where that information may
2450 not be available.
2451 CUM is a variable of type CUMULATIVE_ARGS which gives info about
2452 the preceding args and about the function being called.
2453 NAMED is nonzero if this argument is a named parameter
2454 (otherwise it is an extra parameter matching an ellipsis). */
2457 arm_function_arg (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
2458 tree type, int named)
2460 int nregs;
2462 /* Varargs vectors are treated the same as long long.
2463 named_count avoids having to change the way arm handles 'named' */
2464 if (TARGET_IWMMXT_ABI
2465 && VECTOR_MODE_SUPPORTED_P (mode)
2466 && pcum->named_count > pcum->nargs + 1)
2468 if (pcum->iwmmxt_nregs <= 9)
2469 return gen_rtx_REG (mode, pcum->iwmmxt_nregs + FIRST_IWMMXT_REGNUM);
2470 else
2472 pcum->can_split = false;
2473 return NULL_RTX;
2477 /* Put doubleword aligned quantities in even register pairs. */
2478 if (pcum->nregs & 1
2479 && ARM_DOUBLEWORD_ALIGN
2480 && arm_needs_doubleword_align (mode, type))
2481 pcum->nregs++;
2483 if (mode == VOIDmode)
2484 /* Compute operand 2 of the call insn. */
2485 return GEN_INT (pcum->call_cookie);
2487 /* Only allow splitting an arg between regs and memory if all preceding
2488 args were allocated to regs. For args passed by reference we only count
2489 the reference pointer. */
2490 if (pcum->can_split)
2491 nregs = 1;
2492 else
2493 nregs = ARM_NUM_REGS2 (mode, type);
2495 if (!named || pcum->nregs + nregs > NUM_ARG_REGS)
2496 return NULL_RTX;
2498 return gen_rtx_REG (mode, pcum->nregs);
2501 /* Variable sized types are passed by reference. This is a GCC
2502 extension to the ARM ABI. */
2504 static bool
2505 arm_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
2506 enum machine_mode mode ATTRIBUTE_UNUSED,
2507 tree type, bool named ATTRIBUTE_UNUSED)
2509 return type && TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST;
2512 /* Encode the current state of the #pragma [no_]long_calls. */
2513 typedef enum
2515 OFF, /* No #pramgma [no_]long_calls is in effect. */
2516 LONG, /* #pragma long_calls is in effect. */
2517 SHORT /* #pragma no_long_calls is in effect. */
2518 } arm_pragma_enum;
2520 static arm_pragma_enum arm_pragma_long_calls = OFF;
2522 void
2523 arm_pr_long_calls (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2525 arm_pragma_long_calls = LONG;
2528 void
2529 arm_pr_no_long_calls (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2531 arm_pragma_long_calls = SHORT;
2534 void
2535 arm_pr_long_calls_off (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2537 arm_pragma_long_calls = OFF;
2540 /* Table of machine attributes. */
2541 const struct attribute_spec arm_attribute_table[] =
2543 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
2544 /* Function calls made to this symbol must be done indirectly, because
2545 it may lie outside of the 26 bit addressing range of a normal function
2546 call. */
2547 { "long_call", 0, 0, false, true, true, NULL },
2548 /* Whereas these functions are always known to reside within the 26 bit
2549 addressing range. */
2550 { "short_call", 0, 0, false, true, true, NULL },
2551 /* Interrupt Service Routines have special prologue and epilogue requirements. */
2552 { "isr", 0, 1, false, false, false, arm_handle_isr_attribute },
2553 { "interrupt", 0, 1, false, false, false, arm_handle_isr_attribute },
2554 { "naked", 0, 0, true, false, false, arm_handle_fndecl_attribute },
2555 #ifdef ARM_PE
2556 /* ARM/PE has three new attributes:
2557 interfacearm - ?
2558 dllexport - for exporting a function/variable that will live in a dll
2559 dllimport - for importing a function/variable from a dll
2561 Microsoft allows multiple declspecs in one __declspec, separating
2562 them with spaces. We do NOT support this. Instead, use __declspec
2563 multiple times.
2565 { "dllimport", 0, 0, true, false, false, NULL },
2566 { "dllexport", 0, 0, true, false, false, NULL },
2567 { "interfacearm", 0, 0, true, false, false, arm_handle_fndecl_attribute },
2568 #elif TARGET_DLLIMPORT_DECL_ATTRIBUTES
2569 { "dllimport", 0, 0, false, false, false, handle_dll_attribute },
2570 { "dllexport", 0, 0, false, false, false, handle_dll_attribute },
2571 #endif
2572 { NULL, 0, 0, false, false, false, NULL }
2575 /* Handle an attribute requiring a FUNCTION_DECL;
2576 arguments as in struct attribute_spec.handler. */
2577 static tree
2578 arm_handle_fndecl_attribute (tree *node, tree name, tree args ATTRIBUTE_UNUSED,
2579 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
2581 if (TREE_CODE (*node) != FUNCTION_DECL)
2583 warning ("`%s' attribute only applies to functions",
2584 IDENTIFIER_POINTER (name));
2585 *no_add_attrs = true;
2588 return NULL_TREE;
2591 /* Handle an "interrupt" or "isr" attribute;
2592 arguments as in struct attribute_spec.handler. */
2593 static tree
2594 arm_handle_isr_attribute (tree *node, tree name, tree args, int flags,
2595 bool *no_add_attrs)
2597 if (DECL_P (*node))
2599 if (TREE_CODE (*node) != FUNCTION_DECL)
2601 warning ("`%s' attribute only applies to functions",
2602 IDENTIFIER_POINTER (name));
2603 *no_add_attrs = true;
2605 /* FIXME: the argument if any is checked for type attributes;
2606 should it be checked for decl ones? */
2608 else
2610 if (TREE_CODE (*node) == FUNCTION_TYPE
2611 || TREE_CODE (*node) == METHOD_TYPE)
2613 if (arm_isr_value (args) == ARM_FT_UNKNOWN)
2615 warning ("`%s' attribute ignored", IDENTIFIER_POINTER (name));
2616 *no_add_attrs = true;
2619 else if (TREE_CODE (*node) == POINTER_TYPE
2620 && (TREE_CODE (TREE_TYPE (*node)) == FUNCTION_TYPE
2621 || TREE_CODE (TREE_TYPE (*node)) == METHOD_TYPE)
2622 && arm_isr_value (args) != ARM_FT_UNKNOWN)
2624 *node = build_type_copy (*node);
2625 TREE_TYPE (*node) = build_type_attribute_variant
2626 (TREE_TYPE (*node),
2627 tree_cons (name, args, TYPE_ATTRIBUTES (TREE_TYPE (*node))));
2628 *no_add_attrs = true;
2630 else
2632 /* Possibly pass this attribute on from the type to a decl. */
2633 if (flags & ((int) ATTR_FLAG_DECL_NEXT
2634 | (int) ATTR_FLAG_FUNCTION_NEXT
2635 | (int) ATTR_FLAG_ARRAY_NEXT))
2637 *no_add_attrs = true;
2638 return tree_cons (name, args, NULL_TREE);
2640 else
2642 warning ("`%s' attribute ignored", IDENTIFIER_POINTER (name));
2647 return NULL_TREE;
2650 /* Return 0 if the attributes for two types are incompatible, 1 if they
2651 are compatible, and 2 if they are nearly compatible (which causes a
2652 warning to be generated). */
2653 static int
2654 arm_comp_type_attributes (tree type1, tree type2)
2656 int l1, l2, s1, s2;
2658 /* Check for mismatch of non-default calling convention. */
2659 if (TREE_CODE (type1) != FUNCTION_TYPE)
2660 return 1;
2662 /* Check for mismatched call attributes. */
2663 l1 = lookup_attribute ("long_call", TYPE_ATTRIBUTES (type1)) != NULL;
2664 l2 = lookup_attribute ("long_call", TYPE_ATTRIBUTES (type2)) != NULL;
2665 s1 = lookup_attribute ("short_call", TYPE_ATTRIBUTES (type1)) != NULL;
2666 s2 = lookup_attribute ("short_call", TYPE_ATTRIBUTES (type2)) != NULL;
2668 /* Only bother to check if an attribute is defined. */
2669 if (l1 | l2 | s1 | s2)
2671 /* If one type has an attribute, the other must have the same attribute. */
2672 if ((l1 != l2) || (s1 != s2))
2673 return 0;
2675 /* Disallow mixed attributes. */
2676 if ((l1 & s2) || (l2 & s1))
2677 return 0;
2680 /* Check for mismatched ISR attribute. */
2681 l1 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type1)) != NULL;
2682 if (! l1)
2683 l1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type1)) != NULL;
2684 l2 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type2)) != NULL;
2685 if (! l2)
2686 l1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type2)) != NULL;
2687 if (l1 != l2)
2688 return 0;
2690 return 1;
2693 /* Encode long_call or short_call attribute by prefixing
2694 symbol name in DECL with a special character FLAG. */
2695 void
2696 arm_encode_call_attribute (tree decl, int flag)
2698 const char * str = XSTR (XEXP (DECL_RTL (decl), 0), 0);
2699 int len = strlen (str);
2700 char * newstr;
2702 /* Do not allow weak functions to be treated as short call. */
2703 if (DECL_WEAK (decl) && flag == SHORT_CALL_FLAG_CHAR)
2704 return;
2706 newstr = alloca (len + 2);
2707 newstr[0] = flag;
2708 strcpy (newstr + 1, str);
2710 newstr = (char *) ggc_alloc_string (newstr, len + 1);
2711 XSTR (XEXP (DECL_RTL (decl), 0), 0) = newstr;
2714 /* Assigns default attributes to newly defined type. This is used to
2715 set short_call/long_call attributes for function types of
2716 functions defined inside corresponding #pragma scopes. */
2717 static void
2718 arm_set_default_type_attributes (tree type)
2720 /* Add __attribute__ ((long_call)) to all functions, when
2721 inside #pragma long_calls or __attribute__ ((short_call)),
2722 when inside #pragma no_long_calls. */
2723 if (TREE_CODE (type) == FUNCTION_TYPE || TREE_CODE (type) == METHOD_TYPE)
2725 tree type_attr_list, attr_name;
2726 type_attr_list = TYPE_ATTRIBUTES (type);
2728 if (arm_pragma_long_calls == LONG)
2729 attr_name = get_identifier ("long_call");
2730 else if (arm_pragma_long_calls == SHORT)
2731 attr_name = get_identifier ("short_call");
2732 else
2733 return;
2735 type_attr_list = tree_cons (attr_name, NULL_TREE, type_attr_list);
2736 TYPE_ATTRIBUTES (type) = type_attr_list;
2740 /* Return 1 if the operand is a SYMBOL_REF for a function known to be
2741 defined within the current compilation unit. If this cannot be
2742 determined, then 0 is returned. */
2743 static int
2744 current_file_function_operand (rtx sym_ref)
2746 /* This is a bit of a fib. A function will have a short call flag
2747 applied to its name if it has the short call attribute, or it has
2748 already been defined within the current compilation unit. */
2749 if (ENCODED_SHORT_CALL_ATTR_P (XSTR (sym_ref, 0)))
2750 return 1;
2752 /* The current function is always defined within the current compilation
2753 unit. If it s a weak definition however, then this may not be the real
2754 definition of the function, and so we have to say no. */
2755 if (sym_ref == XEXP (DECL_RTL (current_function_decl), 0)
2756 && !DECL_WEAK (current_function_decl))
2757 return 1;
2759 /* We cannot make the determination - default to returning 0. */
2760 return 0;
2763 /* Return nonzero if a 32 bit "long_call" should be generated for
2764 this call. We generate a long_call if the function:
2766 a. has an __attribute__((long call))
2767 or b. is within the scope of a #pragma long_calls
2768 or c. the -mlong-calls command line switch has been specified
2769 . and either:
2770 1. -ffunction-sections is in effect
2771 or 2. the current function has __attribute__ ((section))
2772 or 3. the target function has __attribute__ ((section))
2774 However we do not generate a long call if the function:
2776 d. has an __attribute__ ((short_call))
2777 or e. is inside the scope of a #pragma no_long_calls
2778 or f. is defined within the current compilation unit.
2780 This function will be called by C fragments contained in the machine
2781 description file. SYM_REF and CALL_COOKIE correspond to the matched
2782 rtl operands. CALL_SYMBOL is used to distinguish between
2783 two different callers of the function. It is set to 1 in the
2784 "call_symbol" and "call_symbol_value" patterns and to 0 in the "call"
2785 and "call_value" patterns. This is because of the difference in the
2786 SYM_REFs passed by these patterns. */
2788 arm_is_longcall_p (rtx sym_ref, int call_cookie, int call_symbol)
2790 if (!call_symbol)
2792 if (GET_CODE (sym_ref) != MEM)
2793 return 0;
2795 sym_ref = XEXP (sym_ref, 0);
2798 if (GET_CODE (sym_ref) != SYMBOL_REF)
2799 return 0;
2801 if (call_cookie & CALL_SHORT)
2802 return 0;
2804 if (TARGET_LONG_CALLS)
2806 if (flag_function_sections
2807 || DECL_SECTION_NAME (current_function_decl))
2808 /* c.3 is handled by the defintion of the
2809 ARM_DECLARE_FUNCTION_SIZE macro. */
2810 return 1;
2813 if (current_file_function_operand (sym_ref))
2814 return 0;
2816 return (call_cookie & CALL_LONG)
2817 || ENCODED_LONG_CALL_ATTR_P (XSTR (sym_ref, 0))
2818 || TARGET_LONG_CALLS;
2821 /* Return nonzero if it is ok to make a tail-call to DECL. */
2822 static bool
2823 arm_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
2825 int call_type = TARGET_LONG_CALLS ? CALL_LONG : CALL_NORMAL;
2827 if (cfun->machine->sibcall_blocked)
2828 return false;
2830 /* Never tailcall something for which we have no decl, or if we
2831 are in Thumb mode. */
2832 if (decl == NULL || TARGET_THUMB)
2833 return false;
2835 /* Get the calling method. */
2836 if (lookup_attribute ("short_call", TYPE_ATTRIBUTES (TREE_TYPE (decl))))
2837 call_type = CALL_SHORT;
2838 else if (lookup_attribute ("long_call", TYPE_ATTRIBUTES (TREE_TYPE (decl))))
2839 call_type = CALL_LONG;
2841 /* Cannot tail-call to long calls, since these are out of range of
2842 a branch instruction. However, if not compiling PIC, we know
2843 we can reach the symbol if it is in this compilation unit. */
2844 if (call_type == CALL_LONG && (flag_pic || !TREE_ASM_WRITTEN (decl)))
2845 return false;
2847 /* If we are interworking and the function is not declared static
2848 then we can't tail-call it unless we know that it exists in this
2849 compilation unit (since it might be a Thumb routine). */
2850 if (TARGET_INTERWORK && TREE_PUBLIC (decl) && !TREE_ASM_WRITTEN (decl))
2851 return false;
2853 /* Never tailcall from an ISR routine - it needs a special exit sequence. */
2854 if (IS_INTERRUPT (arm_current_func_type ()))
2855 return false;
2857 /* Everything else is ok. */
2858 return true;
2862 /* Addressing mode support functions. */
2864 /* Return nonzero if X is a legitimate immediate operand when compiling
2865 for PIC. */
2867 legitimate_pic_operand_p (rtx x)
2869 if (CONSTANT_P (x)
2870 && flag_pic
2871 && (GET_CODE (x) == SYMBOL_REF
2872 || (GET_CODE (x) == CONST
2873 && GET_CODE (XEXP (x, 0)) == PLUS
2874 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF)))
2875 return 0;
2877 return 1;
2881 legitimize_pic_address (rtx orig, enum machine_mode mode, rtx reg)
2883 if (GET_CODE (orig) == SYMBOL_REF
2884 || GET_CODE (orig) == LABEL_REF)
2886 #ifndef AOF_ASSEMBLER
2887 rtx pic_ref, address;
2888 #endif
2889 rtx insn;
2890 int subregs = 0;
2892 if (reg == 0)
2894 if (no_new_pseudos)
2895 abort ();
2896 else
2897 reg = gen_reg_rtx (Pmode);
2899 subregs = 1;
2902 #ifdef AOF_ASSEMBLER
2903 /* The AOF assembler can generate relocations for these directly, and
2904 understands that the PIC register has to be added into the offset. */
2905 insn = emit_insn (gen_pic_load_addr_based (reg, orig));
2906 #else
2907 if (subregs)
2908 address = gen_reg_rtx (Pmode);
2909 else
2910 address = reg;
2912 if (TARGET_ARM)
2913 emit_insn (gen_pic_load_addr_arm (address, orig));
2914 else
2915 emit_insn (gen_pic_load_addr_thumb (address, orig));
2917 if ((GET_CODE (orig) == LABEL_REF
2918 || (GET_CODE (orig) == SYMBOL_REF &&
2919 SYMBOL_REF_LOCAL_P (orig)))
2920 && NEED_GOT_RELOC)
2921 pic_ref = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, address);
2922 else
2924 pic_ref = gen_rtx_MEM (Pmode,
2925 gen_rtx_PLUS (Pmode, pic_offset_table_rtx,
2926 address));
2927 MEM_READONLY_P (pic_ref) = 1;
2930 insn = emit_move_insn (reg, pic_ref);
2931 #endif
2932 current_function_uses_pic_offset_table = 1;
2933 /* Put a REG_EQUAL note on this insn, so that it can be optimized
2934 by loop. */
2935 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EQUAL, orig,
2936 REG_NOTES (insn));
2937 return reg;
2939 else if (GET_CODE (orig) == CONST)
2941 rtx base, offset;
2943 if (GET_CODE (XEXP (orig, 0)) == PLUS
2944 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
2945 return orig;
2947 if (reg == 0)
2949 if (no_new_pseudos)
2950 abort ();
2951 else
2952 reg = gen_reg_rtx (Pmode);
2955 if (GET_CODE (XEXP (orig, 0)) == PLUS)
2957 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
2958 offset = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
2959 base == reg ? 0 : reg);
2961 else
2962 abort ();
2964 if (GET_CODE (offset) == CONST_INT)
2966 /* The base register doesn't really matter, we only want to
2967 test the index for the appropriate mode. */
2968 if (!arm_legitimate_index_p (mode, offset, SET, 0))
2970 if (!no_new_pseudos)
2971 offset = force_reg (Pmode, offset);
2972 else
2973 abort ();
2976 if (GET_CODE (offset) == CONST_INT)
2977 return plus_constant (base, INTVAL (offset));
2980 if (GET_MODE_SIZE (mode) > 4
2981 && (GET_MODE_CLASS (mode) == MODE_INT
2982 || TARGET_SOFT_FLOAT))
2984 emit_insn (gen_addsi3 (reg, base, offset));
2985 return reg;
2988 return gen_rtx_PLUS (Pmode, base, offset);
2991 return orig;
2995 /* Find a spare low register. */
2997 static int
2998 thumb_find_work_register (int live_regs_mask)
3000 int reg;
3002 /* Use a spare arg register. */
3003 if (!regs_ever_live[LAST_ARG_REGNUM])
3004 return LAST_ARG_REGNUM;
3006 /* Look for a pushed register. */
3007 for (reg = 0; reg < LAST_LO_REGNUM; reg++)
3008 if (live_regs_mask & (1 << reg))
3009 return reg;
3011 /* Something went wrong. */
3012 abort ();
3016 /* Generate code to load the PIC register. */
3018 void
3019 arm_load_pic_register (void)
3021 #ifndef AOF_ASSEMBLER
3022 rtx l1, pic_tmp, pic_tmp2, pic_rtx;
3023 rtx global_offset_table;
3025 if (current_function_uses_pic_offset_table == 0 || TARGET_SINGLE_PIC_BASE)
3026 return;
3028 if (!flag_pic)
3029 abort ();
3031 l1 = gen_label_rtx ();
3033 global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
3034 /* On the ARM the PC register contains 'dot + 8' at the time of the
3035 addition, on the Thumb it is 'dot + 4'. */
3036 pic_tmp = plus_constant (gen_rtx_LABEL_REF (Pmode, l1), TARGET_ARM ? 8 : 4);
3037 if (GOT_PCREL)
3038 pic_tmp2 = gen_rtx_CONST (VOIDmode,
3039 gen_rtx_PLUS (Pmode, global_offset_table, pc_rtx));
3040 else
3041 pic_tmp2 = gen_rtx_CONST (VOIDmode, global_offset_table);
3043 pic_rtx = gen_rtx_CONST (Pmode, gen_rtx_MINUS (Pmode, pic_tmp2, pic_tmp));
3045 if (TARGET_ARM)
3047 emit_insn (gen_pic_load_addr_arm (pic_offset_table_rtx, pic_rtx));
3048 emit_insn (gen_pic_add_dot_plus_eight (pic_offset_table_rtx, l1));
3050 else
3052 if (REGNO (pic_offset_table_rtx) > LAST_LO_REGNUM)
3054 int reg;
3056 /* We will have pushed the pic register, so should always be
3057 able to find a work register. */
3058 reg = thumb_find_work_register (thumb_compute_save_reg_mask ());
3059 pic_tmp = gen_rtx_REG (SImode, reg);
3060 emit_insn (gen_pic_load_addr_thumb (pic_tmp, pic_rtx));
3061 emit_insn (gen_movsi (pic_offset_table_rtx, pic_tmp));
3063 else
3064 emit_insn (gen_pic_load_addr_thumb (pic_offset_table_rtx, pic_rtx));
3065 emit_insn (gen_pic_add_dot_plus_four (pic_offset_table_rtx, l1));
3068 /* Need to emit this whether or not we obey regdecls,
3069 since setjmp/longjmp can cause life info to screw up. */
3070 emit_insn (gen_rtx_USE (VOIDmode, pic_offset_table_rtx));
3071 #endif /* AOF_ASSEMBLER */
3075 /* Return nonzero if X is valid as an ARM state addressing register. */
3076 static int
3077 arm_address_register_rtx_p (rtx x, int strict_p)
3079 int regno;
3081 if (GET_CODE (x) != REG)
3082 return 0;
3084 regno = REGNO (x);
3086 if (strict_p)
3087 return ARM_REGNO_OK_FOR_BASE_P (regno);
3089 return (regno <= LAST_ARM_REGNUM
3090 || regno >= FIRST_PSEUDO_REGISTER
3091 || regno == FRAME_POINTER_REGNUM
3092 || regno == ARG_POINTER_REGNUM);
3095 /* Return nonzero if X is a valid ARM state address operand. */
3097 arm_legitimate_address_p (enum machine_mode mode, rtx x, RTX_CODE outer,
3098 int strict_p)
3100 bool use_ldrd;
3101 enum rtx_code code = GET_CODE (x);
3103 if (arm_address_register_rtx_p (x, strict_p))
3104 return 1;
3106 use_ldrd = (TARGET_LDRD
3107 && (mode == DImode
3108 || (mode == DFmode && (TARGET_SOFT_FLOAT || TARGET_VFP))));
3110 if (code == POST_INC || code == PRE_DEC
3111 || ((code == PRE_INC || code == POST_DEC)
3112 && (use_ldrd || GET_MODE_SIZE (mode) <= 4)))
3113 return arm_address_register_rtx_p (XEXP (x, 0), strict_p);
3115 else if ((code == POST_MODIFY || code == PRE_MODIFY)
3116 && arm_address_register_rtx_p (XEXP (x, 0), strict_p)
3117 && GET_CODE (XEXP (x, 1)) == PLUS
3118 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
3120 rtx addend = XEXP (XEXP (x, 1), 1);
3122 /* Don't allow ldrd post increment by register becuase it's hard
3123 to fixup invalid register choices. */
3124 if (use_ldrd
3125 && GET_CODE (x) == POST_MODIFY
3126 && GET_CODE (addend) == REG)
3127 return 0;
3129 return ((use_ldrd || GET_MODE_SIZE (mode) <= 4)
3130 && arm_legitimate_index_p (mode, addend, outer, strict_p));
3133 /* After reload constants split into minipools will have addresses
3134 from a LABEL_REF. */
3135 else if (reload_completed
3136 && (code == LABEL_REF
3137 || (code == CONST
3138 && GET_CODE (XEXP (x, 0)) == PLUS
3139 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
3140 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
3141 return 1;
3143 else if (mode == TImode)
3144 return 0;
3146 else if (code == PLUS)
3148 rtx xop0 = XEXP (x, 0);
3149 rtx xop1 = XEXP (x, 1);
3151 return ((arm_address_register_rtx_p (xop0, strict_p)
3152 && arm_legitimate_index_p (mode, xop1, outer, strict_p))
3153 || (arm_address_register_rtx_p (xop1, strict_p)
3154 && arm_legitimate_index_p (mode, xop0, outer, strict_p)));
3157 #if 0
3158 /* Reload currently can't handle MINUS, so disable this for now */
3159 else if (GET_CODE (x) == MINUS)
3161 rtx xop0 = XEXP (x, 0);
3162 rtx xop1 = XEXP (x, 1);
3164 return (arm_address_register_rtx_p (xop0, strict_p)
3165 && arm_legitimate_index_p (mode, xop1, outer, strict_p));
3167 #endif
3169 else if (GET_MODE_CLASS (mode) != MODE_FLOAT
3170 && code == SYMBOL_REF
3171 && CONSTANT_POOL_ADDRESS_P (x)
3172 && ! (flag_pic
3173 && symbol_mentioned_p (get_pool_constant (x))))
3174 return 1;
3176 return 0;
3179 /* Return nonzero if INDEX is valid for an address index operand in
3180 ARM state. */
3181 static int
3182 arm_legitimate_index_p (enum machine_mode mode, rtx index, RTX_CODE outer,
3183 int strict_p)
3185 HOST_WIDE_INT range;
3186 enum rtx_code code = GET_CODE (index);
3188 /* Standard coprocessor addressing modes. */
3189 if (TARGET_HARD_FLOAT
3190 && (TARGET_FPA || TARGET_MAVERICK)
3191 && (GET_MODE_CLASS (mode) == MODE_FLOAT
3192 || (TARGET_MAVERICK && mode == DImode)))
3193 return (code == CONST_INT && INTVAL (index) < 1024
3194 && INTVAL (index) > -1024
3195 && (INTVAL (index) & 3) == 0);
3197 if (TARGET_REALLY_IWMMXT && VALID_IWMMXT_REG_MODE (mode))
3198 return (code == CONST_INT
3199 && INTVAL (index) < 1024
3200 && INTVAL (index) > -1024
3201 && (INTVAL (index) & 3) == 0);
3203 if (arm_address_register_rtx_p (index, strict_p)
3204 && (GET_MODE_SIZE (mode) <= 4))
3205 return 1;
3207 if (mode == DImode || mode == DFmode)
3209 if (code == CONST_INT)
3211 HOST_WIDE_INT val = INTVAL (index);
3213 if (TARGET_LDRD)
3214 return val > -256 && val < 256;
3215 else
3216 return val > -4096 && val < 4092;
3219 return TARGET_LDRD && arm_address_register_rtx_p (index, strict_p);
3222 if (GET_MODE_SIZE (mode) <= 4
3223 && ! (arm_arch4
3224 && (mode == HImode
3225 || (mode == QImode && outer == SIGN_EXTEND))))
3227 if (code == MULT)
3229 rtx xiop0 = XEXP (index, 0);
3230 rtx xiop1 = XEXP (index, 1);
3232 return ((arm_address_register_rtx_p (xiop0, strict_p)
3233 && power_of_two_operand (xiop1, SImode))
3234 || (arm_address_register_rtx_p (xiop1, strict_p)
3235 && power_of_two_operand (xiop0, SImode)));
3237 else if (code == LSHIFTRT || code == ASHIFTRT
3238 || code == ASHIFT || code == ROTATERT)
3240 rtx op = XEXP (index, 1);
3242 return (arm_address_register_rtx_p (XEXP (index, 0), strict_p)
3243 && GET_CODE (op) == CONST_INT
3244 && INTVAL (op) > 0
3245 && INTVAL (op) <= 31);
3249 /* For ARM v4 we may be doing a sign-extend operation during the
3250 load. */
3251 if (arm_arch4)
3253 if (mode == HImode || (outer == SIGN_EXTEND && mode == QImode))
3254 range = 256;
3255 else
3256 range = 4096;
3258 else
3259 range = (mode == HImode) ? 4095 : 4096;
3261 return (code == CONST_INT
3262 && INTVAL (index) < range
3263 && INTVAL (index) > -range);
3266 /* Return nonzero if X is valid as a Thumb state base register. */
3267 static int
3268 thumb_base_register_rtx_p (rtx x, enum machine_mode mode, int strict_p)
3270 int regno;
3272 if (GET_CODE (x) != REG)
3273 return 0;
3275 regno = REGNO (x);
3277 if (strict_p)
3278 return THUMB_REGNO_MODE_OK_FOR_BASE_P (regno, mode);
3280 return (regno <= LAST_LO_REGNUM
3281 || regno > LAST_VIRTUAL_REGISTER
3282 || regno == FRAME_POINTER_REGNUM
3283 || (GET_MODE_SIZE (mode) >= 4
3284 && (regno == STACK_POINTER_REGNUM
3285 || regno >= FIRST_PSEUDO_REGISTER
3286 || x == hard_frame_pointer_rtx
3287 || x == arg_pointer_rtx)));
3290 /* Return nonzero if x is a legitimate index register. This is the case
3291 for any base register that can access a QImode object. */
3292 inline static int
3293 thumb_index_register_rtx_p (rtx x, int strict_p)
3295 return thumb_base_register_rtx_p (x, QImode, strict_p);
3298 /* Return nonzero if x is a legitimate Thumb-state address.
3300 The AP may be eliminated to either the SP or the FP, so we use the
3301 least common denominator, e.g. SImode, and offsets from 0 to 64.
3303 ??? Verify whether the above is the right approach.
3305 ??? Also, the FP may be eliminated to the SP, so perhaps that
3306 needs special handling also.
3308 ??? Look at how the mips16 port solves this problem. It probably uses
3309 better ways to solve some of these problems.
3311 Although it is not incorrect, we don't accept QImode and HImode
3312 addresses based on the frame pointer or arg pointer until the
3313 reload pass starts. This is so that eliminating such addresses
3314 into stack based ones won't produce impossible code. */
3316 thumb_legitimate_address_p (enum machine_mode mode, rtx x, int strict_p)
3318 /* ??? Not clear if this is right. Experiment. */
3319 if (GET_MODE_SIZE (mode) < 4
3320 && !(reload_in_progress || reload_completed)
3321 && (reg_mentioned_p (frame_pointer_rtx, x)
3322 || reg_mentioned_p (arg_pointer_rtx, x)
3323 || reg_mentioned_p (virtual_incoming_args_rtx, x)
3324 || reg_mentioned_p (virtual_outgoing_args_rtx, x)
3325 || reg_mentioned_p (virtual_stack_dynamic_rtx, x)
3326 || reg_mentioned_p (virtual_stack_vars_rtx, x)))
3327 return 0;
3329 /* Accept any base register. SP only in SImode or larger. */
3330 else if (thumb_base_register_rtx_p (x, mode, strict_p))
3331 return 1;
3333 /* This is PC relative data before arm_reorg runs. */
3334 else if (GET_MODE_SIZE (mode) >= 4 && CONSTANT_P (x)
3335 && GET_CODE (x) == SYMBOL_REF
3336 && CONSTANT_POOL_ADDRESS_P (x) && ! flag_pic)
3337 return 1;
3339 /* This is PC relative data after arm_reorg runs. */
3340 else if (GET_MODE_SIZE (mode) >= 4 && reload_completed
3341 && (GET_CODE (x) == LABEL_REF
3342 || (GET_CODE (x) == CONST
3343 && GET_CODE (XEXP (x, 0)) == PLUS
3344 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
3345 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
3346 return 1;
3348 /* Post-inc indexing only supported for SImode and larger. */
3349 else if (GET_CODE (x) == POST_INC && GET_MODE_SIZE (mode) >= 4
3350 && thumb_index_register_rtx_p (XEXP (x, 0), strict_p))
3351 return 1;
3353 else if (GET_CODE (x) == PLUS)
3355 /* REG+REG address can be any two index registers. */
3356 /* We disallow FRAME+REG addressing since we know that FRAME
3357 will be replaced with STACK, and SP relative addressing only
3358 permits SP+OFFSET. */
3359 if (GET_MODE_SIZE (mode) <= 4
3360 && XEXP (x, 0) != frame_pointer_rtx
3361 && XEXP (x, 1) != frame_pointer_rtx
3362 && thumb_index_register_rtx_p (XEXP (x, 0), strict_p)
3363 && thumb_index_register_rtx_p (XEXP (x, 1), strict_p))
3364 return 1;
3366 /* REG+const has 5-7 bit offset for non-SP registers. */
3367 else if ((thumb_index_register_rtx_p (XEXP (x, 0), strict_p)
3368 || XEXP (x, 0) == arg_pointer_rtx)
3369 && GET_CODE (XEXP (x, 1)) == CONST_INT
3370 && thumb_legitimate_offset_p (mode, INTVAL (XEXP (x, 1))))
3371 return 1;
3373 /* REG+const has 10 bit offset for SP, but only SImode and
3374 larger is supported. */
3375 /* ??? Should probably check for DI/DFmode overflow here
3376 just like GO_IF_LEGITIMATE_OFFSET does. */
3377 else if (GET_CODE (XEXP (x, 0)) == REG
3378 && REGNO (XEXP (x, 0)) == STACK_POINTER_REGNUM
3379 && GET_MODE_SIZE (mode) >= 4
3380 && GET_CODE (XEXP (x, 1)) == CONST_INT
3381 && INTVAL (XEXP (x, 1)) >= 0
3382 && INTVAL (XEXP (x, 1)) + GET_MODE_SIZE (mode) <= 1024
3383 && (INTVAL (XEXP (x, 1)) & 3) == 0)
3384 return 1;
3386 else if (GET_CODE (XEXP (x, 0)) == REG
3387 && REGNO (XEXP (x, 0)) == FRAME_POINTER_REGNUM
3388 && GET_MODE_SIZE (mode) >= 4
3389 && GET_CODE (XEXP (x, 1)) == CONST_INT
3390 && (INTVAL (XEXP (x, 1)) & 3) == 0)
3391 return 1;
3394 else if (GET_MODE_CLASS (mode) != MODE_FLOAT
3395 && GET_MODE_SIZE (mode) == 4
3396 && GET_CODE (x) == SYMBOL_REF
3397 && CONSTANT_POOL_ADDRESS_P (x)
3398 && !(flag_pic
3399 && symbol_mentioned_p (get_pool_constant (x))))
3400 return 1;
3402 return 0;
3405 /* Return nonzero if VAL can be used as an offset in a Thumb-state address
3406 instruction of mode MODE. */
3408 thumb_legitimate_offset_p (enum machine_mode mode, HOST_WIDE_INT val)
3410 switch (GET_MODE_SIZE (mode))
3412 case 1:
3413 return val >= 0 && val < 32;
3415 case 2:
3416 return val >= 0 && val < 64 && (val & 1) == 0;
3418 default:
3419 return (val >= 0
3420 && (val + GET_MODE_SIZE (mode)) <= 128
3421 && (val & 3) == 0);
3425 /* Try machine-dependent ways of modifying an illegitimate address
3426 to be legitimate. If we find one, return the new, valid address. */
3428 arm_legitimize_address (rtx x, rtx orig_x, enum machine_mode mode)
3430 if (GET_CODE (x) == PLUS)
3432 rtx xop0 = XEXP (x, 0);
3433 rtx xop1 = XEXP (x, 1);
3435 if (CONSTANT_P (xop0) && !symbol_mentioned_p (xop0))
3436 xop0 = force_reg (SImode, xop0);
3438 if (CONSTANT_P (xop1) && !symbol_mentioned_p (xop1))
3439 xop1 = force_reg (SImode, xop1);
3441 if (ARM_BASE_REGISTER_RTX_P (xop0)
3442 && GET_CODE (xop1) == CONST_INT)
3444 HOST_WIDE_INT n, low_n;
3445 rtx base_reg, val;
3446 n = INTVAL (xop1);
3448 /* VFP addressing modes actually allow greater offsets, but for
3449 now we just stick with the lowest common denominator. */
3450 if (mode == DImode
3451 || ((TARGET_SOFT_FLOAT || TARGET_VFP) && mode == DFmode))
3453 low_n = n & 0x0f;
3454 n &= ~0x0f;
3455 if (low_n > 4)
3457 n += 16;
3458 low_n -= 16;
3461 else
3463 low_n = ((mode) == TImode ? 0
3464 : n >= 0 ? (n & 0xfff) : -((-n) & 0xfff));
3465 n -= low_n;
3468 base_reg = gen_reg_rtx (SImode);
3469 val = force_operand (gen_rtx_PLUS (SImode, xop0,
3470 GEN_INT (n)), NULL_RTX);
3471 emit_move_insn (base_reg, val);
3472 x = (low_n == 0 ? base_reg
3473 : gen_rtx_PLUS (SImode, base_reg, GEN_INT (low_n)));
3475 else if (xop0 != XEXP (x, 0) || xop1 != XEXP (x, 1))
3476 x = gen_rtx_PLUS (SImode, xop0, xop1);
3479 /* XXX We don't allow MINUS any more -- see comment in
3480 arm_legitimate_address_p (). */
3481 else if (GET_CODE (x) == MINUS)
3483 rtx xop0 = XEXP (x, 0);
3484 rtx xop1 = XEXP (x, 1);
3486 if (CONSTANT_P (xop0))
3487 xop0 = force_reg (SImode, xop0);
3489 if (CONSTANT_P (xop1) && ! symbol_mentioned_p (xop1))
3490 xop1 = force_reg (SImode, xop1);
3492 if (xop0 != XEXP (x, 0) || xop1 != XEXP (x, 1))
3493 x = gen_rtx_MINUS (SImode, xop0, xop1);
3496 if (flag_pic)
3498 /* We need to find and carefully transform any SYMBOL and LABEL
3499 references; so go back to the original address expression. */
3500 rtx new_x = legitimize_pic_address (orig_x, mode, NULL_RTX);
3502 if (new_x != orig_x)
3503 x = new_x;
3506 return x;
3510 /* Try machine-dependent ways of modifying an illegitimate Thumb address
3511 to be legitimate. If we find one, return the new, valid address. */
3513 thumb_legitimize_address (rtx x, rtx orig_x, enum machine_mode mode)
3515 if (GET_CODE (x) == PLUS
3516 && GET_CODE (XEXP (x, 1)) == CONST_INT
3517 && (INTVAL (XEXP (x, 1)) >= 32 * GET_MODE_SIZE (mode)
3518 || INTVAL (XEXP (x, 1)) < 0))
3520 rtx xop0 = XEXP (x, 0);
3521 rtx xop1 = XEXP (x, 1);
3522 HOST_WIDE_INT offset = INTVAL (xop1);
3524 /* Try and fold the offset into a biasing of the base register and
3525 then offsetting that. Don't do this when optimizing for space
3526 since it can cause too many CSEs. */
3527 if (optimize_size && offset >= 0
3528 && offset < 256 + 31 * GET_MODE_SIZE (mode))
3530 HOST_WIDE_INT delta;
3532 if (offset >= 256)
3533 delta = offset - (256 - GET_MODE_SIZE (mode));
3534 else if (offset < 32 * GET_MODE_SIZE (mode) + 8)
3535 delta = 31 * GET_MODE_SIZE (mode);
3536 else
3537 delta = offset & (~31 * GET_MODE_SIZE (mode));
3539 xop0 = force_operand (plus_constant (xop0, offset - delta),
3540 NULL_RTX);
3541 x = plus_constant (xop0, delta);
3543 else if (offset < 0 && offset > -256)
3544 /* Small negative offsets are best done with a subtract before the
3545 dereference, forcing these into a register normally takes two
3546 instructions. */
3547 x = force_operand (x, NULL_RTX);
3548 else
3550 /* For the remaining cases, force the constant into a register. */
3551 xop1 = force_reg (SImode, xop1);
3552 x = gen_rtx_PLUS (SImode, xop0, xop1);
3555 else if (GET_CODE (x) == PLUS
3556 && s_register_operand (XEXP (x, 1), SImode)
3557 && !s_register_operand (XEXP (x, 0), SImode))
3559 rtx xop0 = force_operand (XEXP (x, 0), NULL_RTX);
3561 x = gen_rtx_PLUS (SImode, xop0, XEXP (x, 1));
3564 if (flag_pic)
3566 /* We need to find and carefully transform any SYMBOL and LABEL
3567 references; so go back to the original address expression. */
3568 rtx new_x = legitimize_pic_address (orig_x, mode, NULL_RTX);
3570 if (new_x != orig_x)
3571 x = new_x;
3574 return x;
3579 #define REG_OR_SUBREG_REG(X) \
3580 (GET_CODE (X) == REG \
3581 || (GET_CODE (X) == SUBREG && GET_CODE (SUBREG_REG (X)) == REG))
3583 #define REG_OR_SUBREG_RTX(X) \
3584 (GET_CODE (X) == REG ? (X) : SUBREG_REG (X))
3586 #ifndef COSTS_N_INSNS
3587 #define COSTS_N_INSNS(N) ((N) * 4 - 2)
3588 #endif
3589 static inline int
3590 thumb_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer)
3592 enum machine_mode mode = GET_MODE (x);
3594 switch (code)
3596 case ASHIFT:
3597 case ASHIFTRT:
3598 case LSHIFTRT:
3599 case ROTATERT:
3600 case PLUS:
3601 case MINUS:
3602 case COMPARE:
3603 case NEG:
3604 case NOT:
3605 return COSTS_N_INSNS (1);
3607 case MULT:
3608 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
3610 int cycles = 0;
3611 unsigned HOST_WIDE_INT i = INTVAL (XEXP (x, 1));
3613 while (i)
3615 i >>= 2;
3616 cycles++;
3618 return COSTS_N_INSNS (2) + cycles;
3620 return COSTS_N_INSNS (1) + 16;
3622 case SET:
3623 return (COSTS_N_INSNS (1)
3624 + 4 * ((GET_CODE (SET_SRC (x)) == MEM)
3625 + GET_CODE (SET_DEST (x)) == MEM));
3627 case CONST_INT:
3628 if (outer == SET)
3630 if ((unsigned HOST_WIDE_INT) INTVAL (x) < 256)
3631 return 0;
3632 if (thumb_shiftable_const (INTVAL (x)))
3633 return COSTS_N_INSNS (2);
3634 return COSTS_N_INSNS (3);
3636 else if ((outer == PLUS || outer == COMPARE)
3637 && INTVAL (x) < 256 && INTVAL (x) > -256)
3638 return 0;
3639 else if (outer == AND
3640 && INTVAL (x) < 256 && INTVAL (x) >= -256)
3641 return COSTS_N_INSNS (1);
3642 else if (outer == ASHIFT || outer == ASHIFTRT
3643 || outer == LSHIFTRT)
3644 return 0;
3645 return COSTS_N_INSNS (2);
3647 case CONST:
3648 case CONST_DOUBLE:
3649 case LABEL_REF:
3650 case SYMBOL_REF:
3651 return COSTS_N_INSNS (3);
3653 case UDIV:
3654 case UMOD:
3655 case DIV:
3656 case MOD:
3657 return 100;
3659 case TRUNCATE:
3660 return 99;
3662 case AND:
3663 case XOR:
3664 case IOR:
3665 /* XXX guess. */
3666 return 8;
3668 case MEM:
3669 /* XXX another guess. */
3670 /* Memory costs quite a lot for the first word, but subsequent words
3671 load at the equivalent of a single insn each. */
3672 return (10 + 4 * ((GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD)
3673 + ((GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
3674 ? 4 : 0));
3676 case IF_THEN_ELSE:
3677 /* XXX a guess. */
3678 if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
3679 return 14;
3680 return 2;
3682 case ZERO_EXTEND:
3683 /* XXX still guessing. */
3684 switch (GET_MODE (XEXP (x, 0)))
3686 case QImode:
3687 return (1 + (mode == DImode ? 4 : 0)
3688 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3690 case HImode:
3691 return (4 + (mode == DImode ? 4 : 0)
3692 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3694 case SImode:
3695 return (1 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3697 default:
3698 return 99;
3701 default:
3702 return 99;
3707 /* Worker routine for arm_rtx_costs. */
3708 static inline int
3709 arm_rtx_costs_1 (rtx x, enum rtx_code code, enum rtx_code outer)
3711 enum machine_mode mode = GET_MODE (x);
3712 enum rtx_code subcode;
3713 int extra_cost;
3715 switch (code)
3717 case MEM:
3718 /* Memory costs quite a lot for the first word, but subsequent words
3719 load at the equivalent of a single insn each. */
3720 return (10 + 4 * ((GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD)
3721 + (GET_CODE (x) == SYMBOL_REF
3722 && CONSTANT_POOL_ADDRESS_P (x) ? 4 : 0));
3724 case DIV:
3725 case MOD:
3726 case UDIV:
3727 case UMOD:
3728 return optimize_size ? COSTS_N_INSNS (2) : 100;
3730 case ROTATE:
3731 if (mode == SImode && GET_CODE (XEXP (x, 1)) == REG)
3732 return 4;
3733 /* Fall through */
3734 case ROTATERT:
3735 if (mode != SImode)
3736 return 8;
3737 /* Fall through */
3738 case ASHIFT: case LSHIFTRT: case ASHIFTRT:
3739 if (mode == DImode)
3740 return (8 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : 8)
3741 + ((GET_CODE (XEXP (x, 0)) == REG
3742 || (GET_CODE (XEXP (x, 0)) == SUBREG
3743 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG))
3744 ? 0 : 8));
3745 return (1 + ((GET_CODE (XEXP (x, 0)) == REG
3746 || (GET_CODE (XEXP (x, 0)) == SUBREG
3747 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG))
3748 ? 0 : 4)
3749 + ((GET_CODE (XEXP (x, 1)) == REG
3750 || (GET_CODE (XEXP (x, 1)) == SUBREG
3751 && GET_CODE (SUBREG_REG (XEXP (x, 1))) == REG)
3752 || (GET_CODE (XEXP (x, 1)) == CONST_INT))
3753 ? 0 : 4));
3755 case MINUS:
3756 if (mode == DImode)
3757 return (4 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 8)
3758 + ((REG_OR_SUBREG_REG (XEXP (x, 0))
3759 || (GET_CODE (XEXP (x, 0)) == CONST_INT
3760 && const_ok_for_arm (INTVAL (XEXP (x, 0)))))
3761 ? 0 : 8));
3763 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
3764 return (2 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
3765 || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
3766 && arm_const_double_rtx (XEXP (x, 1))))
3767 ? 0 : 8)
3768 + ((REG_OR_SUBREG_REG (XEXP (x, 0))
3769 || (GET_CODE (XEXP (x, 0)) == CONST_DOUBLE
3770 && arm_const_double_rtx (XEXP (x, 0))))
3771 ? 0 : 8));
3773 if (((GET_CODE (XEXP (x, 0)) == CONST_INT
3774 && const_ok_for_arm (INTVAL (XEXP (x, 0)))
3775 && REG_OR_SUBREG_REG (XEXP (x, 1))))
3776 || (((subcode = GET_CODE (XEXP (x, 1))) == ASHIFT
3777 || subcode == ASHIFTRT || subcode == LSHIFTRT
3778 || subcode == ROTATE || subcode == ROTATERT
3779 || (subcode == MULT
3780 && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
3781 && ((INTVAL (XEXP (XEXP (x, 1), 1)) &
3782 (INTVAL (XEXP (XEXP (x, 1), 1)) - 1)) == 0)))
3783 && REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 0))
3784 && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 1))
3785 || GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT)
3786 && REG_OR_SUBREG_REG (XEXP (x, 0))))
3787 return 1;
3788 /* Fall through */
3790 case PLUS:
3791 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
3792 return (2 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8)
3793 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
3794 || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
3795 && arm_const_double_rtx (XEXP (x, 1))))
3796 ? 0 : 8));
3798 /* Fall through */
3799 case AND: case XOR: case IOR:
3800 extra_cost = 0;
3802 /* Normally the frame registers will be spilt into reg+const during
3803 reload, so it is a bad idea to combine them with other instructions,
3804 since then they might not be moved outside of loops. As a compromise
3805 we allow integration with ops that have a constant as their second
3806 operand. */
3807 if ((REG_OR_SUBREG_REG (XEXP (x, 0))
3808 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0)))
3809 && GET_CODE (XEXP (x, 1)) != CONST_INT)
3810 || (REG_OR_SUBREG_REG (XEXP (x, 0))
3811 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0)))))
3812 extra_cost = 4;
3814 if (mode == DImode)
3815 return (4 + extra_cost + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8)
3816 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
3817 || (GET_CODE (XEXP (x, 1)) == CONST_INT
3818 && const_ok_for_op (INTVAL (XEXP (x, 1)), code)))
3819 ? 0 : 8));
3821 if (REG_OR_SUBREG_REG (XEXP (x, 0)))
3822 return (1 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : extra_cost)
3823 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
3824 || (GET_CODE (XEXP (x, 1)) == CONST_INT
3825 && const_ok_for_op (INTVAL (XEXP (x, 1)), code)))
3826 ? 0 : 4));
3828 else if (REG_OR_SUBREG_REG (XEXP (x, 1)))
3829 return (1 + extra_cost
3830 + ((((subcode = GET_CODE (XEXP (x, 0))) == ASHIFT
3831 || subcode == LSHIFTRT || subcode == ASHIFTRT
3832 || subcode == ROTATE || subcode == ROTATERT
3833 || (subcode == MULT
3834 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
3835 && ((INTVAL (XEXP (XEXP (x, 0), 1)) &
3836 (INTVAL (XEXP (XEXP (x, 0), 1)) - 1)) == 0)))
3837 && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 0)))
3838 && ((REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 1)))
3839 || GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT))
3840 ? 0 : 4));
3842 return 8;
3844 case MULT:
3845 /* This should have been handled by the CPU specific routines. */
3846 abort ();
3848 case TRUNCATE:
3849 if (arm_arch3m && mode == SImode
3850 && GET_CODE (XEXP (x, 0)) == LSHIFTRT
3851 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
3852 && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0))
3853 == GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)))
3854 && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == ZERO_EXTEND
3855 || GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == SIGN_EXTEND))
3856 return 8;
3857 return 99;
3859 case NEG:
3860 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
3861 return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 6);
3862 /* Fall through */
3863 case NOT:
3864 if (mode == DImode)
3865 return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4);
3867 return 1 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4);
3869 case IF_THEN_ELSE:
3870 if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
3871 return 14;
3872 return 2;
3874 case COMPARE:
3875 return 1;
3877 case ABS:
3878 return 4 + (mode == DImode ? 4 : 0);
3880 case SIGN_EXTEND:
3881 if (GET_MODE (XEXP (x, 0)) == QImode)
3882 return (4 + (mode == DImode ? 4 : 0)
3883 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3884 /* Fall through */
3885 case ZERO_EXTEND:
3886 switch (GET_MODE (XEXP (x, 0)))
3888 case QImode:
3889 return (1 + (mode == DImode ? 4 : 0)
3890 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3892 case HImode:
3893 return (4 + (mode == DImode ? 4 : 0)
3894 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3896 case SImode:
3897 return (1 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3899 case V8QImode:
3900 case V4HImode:
3901 case V2SImode:
3902 case V4QImode:
3903 case V2HImode:
3904 return 1;
3906 default:
3907 break;
3909 abort ();
3911 case CONST_INT:
3912 if (const_ok_for_arm (INTVAL (x)))
3913 return outer == SET ? 2 : -1;
3914 else if (outer == AND
3915 && const_ok_for_arm (~INTVAL (x)))
3916 return -1;
3917 else if ((outer == COMPARE
3918 || outer == PLUS || outer == MINUS)
3919 && const_ok_for_arm (-INTVAL (x)))
3920 return -1;
3921 else
3922 return 5;
3924 case CONST:
3925 case LABEL_REF:
3926 case SYMBOL_REF:
3927 return 6;
3929 case CONST_DOUBLE:
3930 if (arm_const_double_rtx (x))
3931 return outer == SET ? 2 : -1;
3932 else if ((outer == COMPARE || outer == PLUS)
3933 && neg_const_double_rtx_ok_for_fpa (x))
3934 return -1;
3935 return 7;
3937 default:
3938 return 99;
3942 /* RTX costs for cores with a slow MUL implementation. */
3944 static bool
3945 arm_slowmul_rtx_costs (rtx x, int code, int outer_code, int *total)
3947 enum machine_mode mode = GET_MODE (x);
3949 if (TARGET_THUMB)
3951 *total = thumb_rtx_costs (x, code, outer_code);
3952 return true;
3955 switch (code)
3957 case MULT:
3958 if (GET_MODE_CLASS (mode) == MODE_FLOAT
3959 || mode == DImode)
3961 *total = 30;
3962 return true;
3965 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
3967 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
3968 & (unsigned HOST_WIDE_INT) 0xffffffff);
3969 int cost, const_ok = const_ok_for_arm (i);
3970 int j, booth_unit_size;
3972 /* Tune as appropriate. */
3973 cost = const_ok ? 4 : 8;
3974 booth_unit_size = 2;
3975 for (j = 0; i && j < 32; j += booth_unit_size)
3977 i >>= booth_unit_size;
3978 cost += 2;
3981 *total = cost;
3982 return true;
3985 *total = 30 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
3986 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
3987 return true;
3989 default:
3990 *total = arm_rtx_costs_1 (x, code, outer_code);
3991 return true;
3996 /* RTX cost for cores with a fast multiply unit (M variants). */
3998 static bool
3999 arm_fastmul_rtx_costs (rtx x, int code, int outer_code, int *total)
4001 enum machine_mode mode = GET_MODE (x);
4003 if (TARGET_THUMB)
4005 *total = thumb_rtx_costs (x, code, outer_code);
4006 return true;
4009 switch (code)
4011 case MULT:
4012 /* There is no point basing this on the tuning, since it is always the
4013 fast variant if it exists at all. */
4014 if (mode == DImode
4015 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
4016 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
4017 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
4019 *total = 8;
4020 return true;
4024 if (GET_MODE_CLASS (mode) == MODE_FLOAT
4025 || mode == DImode)
4027 *total = 30;
4028 return true;
4031 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
4033 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
4034 & (unsigned HOST_WIDE_INT) 0xffffffff);
4035 int cost, const_ok = const_ok_for_arm (i);
4036 int j, booth_unit_size;
4038 /* Tune as appropriate. */
4039 cost = const_ok ? 4 : 8;
4040 booth_unit_size = 8;
4041 for (j = 0; i && j < 32; j += booth_unit_size)
4043 i >>= booth_unit_size;
4044 cost += 2;
4047 *total = cost;
4048 return true;
4051 *total = 8 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
4052 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
4053 return true;
4055 default:
4056 *total = arm_rtx_costs_1 (x, code, outer_code);
4057 return true;
4062 /* RTX cost for XScale CPUs. */
4064 static bool
4065 arm_xscale_rtx_costs (rtx x, int code, int outer_code, int *total)
4067 enum machine_mode mode = GET_MODE (x);
4069 if (TARGET_THUMB)
4071 *total = thumb_rtx_costs (x, code, outer_code);
4072 return true;
4075 switch (code)
4077 case MULT:
4078 /* There is no point basing this on the tuning, since it is always the
4079 fast variant if it exists at all. */
4080 if (mode == DImode
4081 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
4082 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
4083 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
4085 *total = 8;
4086 return true;
4090 if (GET_MODE_CLASS (mode) == MODE_FLOAT
4091 || mode == DImode)
4093 *total = 30;
4094 return true;
4097 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
4099 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
4100 & (unsigned HOST_WIDE_INT) 0xffffffff);
4101 int cost, const_ok = const_ok_for_arm (i);
4102 unsigned HOST_WIDE_INT masked_const;
4104 /* The cost will be related to two insns.
4105 First a load of the constant (MOV or LDR), then a multiply. */
4106 cost = 2;
4107 if (! const_ok)
4108 cost += 1; /* LDR is probably more expensive because
4109 of longer result latency. */
4110 masked_const = i & 0xffff8000;
4111 if (masked_const != 0 && masked_const != 0xffff8000)
4113 masked_const = i & 0xf8000000;
4114 if (masked_const == 0 || masked_const == 0xf8000000)
4115 cost += 1;
4116 else
4117 cost += 2;
4119 *total = cost;
4120 return true;
4123 *total = 8 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
4124 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
4125 return true;
4127 default:
4128 *total = arm_rtx_costs_1 (x, code, outer_code);
4129 return true;
4134 /* RTX costs for 9e (and later) cores. */
4136 static bool
4137 arm_9e_rtx_costs (rtx x, int code, int outer_code, int *total)
4139 enum machine_mode mode = GET_MODE (x);
4140 int nonreg_cost;
4141 int cost;
4143 if (TARGET_THUMB)
4145 switch (code)
4147 case MULT:
4148 *total = COSTS_N_INSNS (3);
4149 return true;
4151 default:
4152 *total = thumb_rtx_costs (x, code, outer_code);
4153 return true;
4157 switch (code)
4159 case MULT:
4160 /* There is no point basing this on the tuning, since it is always the
4161 fast variant if it exists at all. */
4162 if (mode == DImode
4163 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
4164 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
4165 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
4167 *total = 3;
4168 return true;
4172 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
4174 *total = 30;
4175 return true;
4177 if (mode == DImode)
4179 cost = 7;
4180 nonreg_cost = 8;
4182 else
4184 cost = 2;
4185 nonreg_cost = 4;
4189 *total = cost + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : nonreg_cost)
4190 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : nonreg_cost);
4191 return true;
4193 default:
4194 *total = arm_rtx_costs_1 (x, code, outer_code);
4195 return true;
4198 /* All address computations that can be done are free, but rtx cost returns
4199 the same for practically all of them. So we weight the different types
4200 of address here in the order (most pref first):
4201 PRE/POST_INC/DEC, SHIFT or NON-INT sum, INT sum, REG, MEM or LABEL. */
4202 static inline int
4203 arm_arm_address_cost (rtx x)
4205 enum rtx_code c = GET_CODE (x);
4207 if (c == PRE_INC || c == PRE_DEC || c == POST_INC || c == POST_DEC)
4208 return 0;
4209 if (c == MEM || c == LABEL_REF || c == SYMBOL_REF)
4210 return 10;
4212 if (c == PLUS || c == MINUS)
4214 if (GET_CODE (XEXP (x, 0)) == CONST_INT)
4215 return 2;
4217 if (ARITHMETIC_P (XEXP (x, 0)) || ARITHMETIC_P (XEXP (x, 1)))
4218 return 3;
4220 return 4;
4223 return 6;
4226 static inline int
4227 arm_thumb_address_cost (rtx x)
4229 enum rtx_code c = GET_CODE (x);
4231 if (c == REG)
4232 return 1;
4233 if (c == PLUS
4234 && GET_CODE (XEXP (x, 0)) == REG
4235 && GET_CODE (XEXP (x, 1)) == CONST_INT)
4236 return 1;
4238 return 2;
4241 static int
4242 arm_address_cost (rtx x)
4244 return TARGET_ARM ? arm_arm_address_cost (x) : arm_thumb_address_cost (x);
4247 static int
4248 arm_adjust_cost (rtx insn, rtx link, rtx dep, int cost)
4250 rtx i_pat, d_pat;
4252 /* Some true dependencies can have a higher cost depending
4253 on precisely how certain input operands are used. */
4254 if (arm_tune_xscale
4255 && REG_NOTE_KIND (link) == 0
4256 && recog_memoized (insn) >= 0
4257 && recog_memoized (dep) >= 0)
4259 int shift_opnum = get_attr_shift (insn);
4260 enum attr_type attr_type = get_attr_type (dep);
4262 /* If nonzero, SHIFT_OPNUM contains the operand number of a shifted
4263 operand for INSN. If we have a shifted input operand and the
4264 instruction we depend on is another ALU instruction, then we may
4265 have to account for an additional stall. */
4266 if (shift_opnum != 0
4267 && (attr_type == TYPE_ALU_SHIFT || attr_type == TYPE_ALU_SHIFT_REG))
4269 rtx shifted_operand;
4270 int opno;
4272 /* Get the shifted operand. */
4273 extract_insn (insn);
4274 shifted_operand = recog_data.operand[shift_opnum];
4276 /* Iterate over all the operands in DEP. If we write an operand
4277 that overlaps with SHIFTED_OPERAND, then we have increase the
4278 cost of this dependency. */
4279 extract_insn (dep);
4280 preprocess_constraints ();
4281 for (opno = 0; opno < recog_data.n_operands; opno++)
4283 /* We can ignore strict inputs. */
4284 if (recog_data.operand_type[opno] == OP_IN)
4285 continue;
4287 if (reg_overlap_mentioned_p (recog_data.operand[opno],
4288 shifted_operand))
4289 return 2;
4294 /* XXX This is not strictly true for the FPA. */
4295 if (REG_NOTE_KIND (link) == REG_DEP_ANTI
4296 || REG_NOTE_KIND (link) == REG_DEP_OUTPUT)
4297 return 0;
4299 /* Call insns don't incur a stall, even if they follow a load. */
4300 if (REG_NOTE_KIND (link) == 0
4301 && GET_CODE (insn) == CALL_INSN)
4302 return 1;
4304 if ((i_pat = single_set (insn)) != NULL
4305 && GET_CODE (SET_SRC (i_pat)) == MEM
4306 && (d_pat = single_set (dep)) != NULL
4307 && GET_CODE (SET_DEST (d_pat)) == MEM)
4309 rtx src_mem = XEXP (SET_SRC (i_pat), 0);
4310 /* This is a load after a store, there is no conflict if the load reads
4311 from a cached area. Assume that loads from the stack, and from the
4312 constant pool are cached, and that others will miss. This is a
4313 hack. */
4315 if ((GET_CODE (src_mem) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (src_mem))
4316 || reg_mentioned_p (stack_pointer_rtx, src_mem)
4317 || reg_mentioned_p (frame_pointer_rtx, src_mem)
4318 || reg_mentioned_p (hard_frame_pointer_rtx, src_mem))
4319 return 1;
4322 return cost;
4325 static int fp_consts_inited = 0;
4327 /* Only zero is valid for VFP. Other values are also valid for FPA. */
4328 static const char * const strings_fp[8] =
4330 "0", "1", "2", "3",
4331 "4", "5", "0.5", "10"
4334 static REAL_VALUE_TYPE values_fp[8];
4336 static void
4337 init_fp_table (void)
4339 int i;
4340 REAL_VALUE_TYPE r;
4342 if (TARGET_VFP)
4343 fp_consts_inited = 1;
4344 else
4345 fp_consts_inited = 8;
4347 for (i = 0; i < fp_consts_inited; i++)
4349 r = REAL_VALUE_ATOF (strings_fp[i], DFmode);
4350 values_fp[i] = r;
4354 /* Return TRUE if rtx X is a valid immediate FP constant. */
4356 arm_const_double_rtx (rtx x)
4358 REAL_VALUE_TYPE r;
4359 int i;
4361 if (!fp_consts_inited)
4362 init_fp_table ();
4364 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
4365 if (REAL_VALUE_MINUS_ZERO (r))
4366 return 0;
4368 for (i = 0; i < fp_consts_inited; i++)
4369 if (REAL_VALUES_EQUAL (r, values_fp[i]))
4370 return 1;
4372 return 0;
4375 /* Return TRUE if rtx X is a valid immediate FPA constant. */
4377 neg_const_double_rtx_ok_for_fpa (rtx x)
4379 REAL_VALUE_TYPE r;
4380 int i;
4382 if (!fp_consts_inited)
4383 init_fp_table ();
4385 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
4386 r = REAL_VALUE_NEGATE (r);
4387 if (REAL_VALUE_MINUS_ZERO (r))
4388 return 0;
4390 for (i = 0; i < 8; i++)
4391 if (REAL_VALUES_EQUAL (r, values_fp[i]))
4392 return 1;
4394 return 0;
4397 /* Predicates for `match_operand' and `match_operator'. */
4399 /* s_register_operand is the same as register_operand, but it doesn't accept
4400 (SUBREG (MEM)...).
4402 This function exists because at the time it was put in it led to better
4403 code. SUBREG(MEM) always needs a reload in the places where
4404 s_register_operand is used, and this seemed to lead to excessive
4405 reloading. */
4407 s_register_operand (rtx op, enum machine_mode mode)
4409 if (GET_MODE (op) != mode && mode != VOIDmode)
4410 return 0;
4412 if (GET_CODE (op) == SUBREG)
4413 op = SUBREG_REG (op);
4415 /* We don't consider registers whose class is NO_REGS
4416 to be a register operand. */
4417 /* XXX might have to check for lo regs only for thumb ??? */
4418 return (GET_CODE (op) == REG
4419 && (REGNO (op) >= FIRST_PSEUDO_REGISTER
4420 || REGNO_REG_CLASS (REGNO (op)) != NO_REGS));
4423 /* A hard register operand (even before reload. */
4425 arm_hard_register_operand (rtx op, enum machine_mode mode)
4427 if (GET_MODE (op) != mode && mode != VOIDmode)
4428 return 0;
4430 return (GET_CODE (op) == REG
4431 && REGNO (op) < FIRST_PSEUDO_REGISTER);
4434 /* An arm register operand. */
4436 arm_general_register_operand (rtx op, enum machine_mode mode)
4438 if (GET_MODE (op) != mode && mode != VOIDmode)
4439 return 0;
4441 if (GET_CODE (op) == SUBREG)
4442 op = SUBREG_REG (op);
4444 return (GET_CODE (op) == REG
4445 && (REGNO (op) <= LAST_ARM_REGNUM
4446 || REGNO (op) >= FIRST_PSEUDO_REGISTER));
4449 /* Only accept reg, subreg(reg), const_int. */
4451 reg_or_int_operand (rtx op, enum machine_mode mode)
4453 if (GET_CODE (op) == CONST_INT)
4454 return 1;
4456 if (GET_MODE (op) != mode && mode != VOIDmode)
4457 return 0;
4459 if (GET_CODE (op) == SUBREG)
4460 op = SUBREG_REG (op);
4462 /* We don't consider registers whose class is NO_REGS
4463 to be a register operand. */
4464 return (GET_CODE (op) == REG
4465 && (REGNO (op) >= FIRST_PSEUDO_REGISTER
4466 || REGNO_REG_CLASS (REGNO (op)) != NO_REGS));
4469 /* Return 1 if OP is an item in memory, given that we are in reload. */
4471 arm_reload_memory_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
4473 int regno = true_regnum (op);
4475 return (!CONSTANT_P (op)
4476 && (regno == -1
4477 || (GET_CODE (op) == REG
4478 && REGNO (op) >= FIRST_PSEUDO_REGISTER)));
4481 /* Return TRUE for valid operands for the rhs of an ARM instruction. */
4483 arm_rhs_operand (rtx op, enum machine_mode mode)
4485 return (s_register_operand (op, mode)
4486 || (GET_CODE (op) == CONST_INT && const_ok_for_arm (INTVAL (op))));
4489 /* Return TRUE for valid operands for the
4490 rhs of an ARM instruction, or a load. */
4492 arm_rhsm_operand (rtx op, enum machine_mode mode)
4494 return (s_register_operand (op, mode)
4495 || (GET_CODE (op) == CONST_INT && const_ok_for_arm (INTVAL (op)))
4496 || memory_operand (op, mode));
4499 /* Return TRUE for valid operands for the rhs of an ARM instruction, or if a
4500 constant that is valid when negated. */
4502 arm_add_operand (rtx op, enum machine_mode mode)
4504 if (TARGET_THUMB)
4505 return thumb_cmp_operand (op, mode);
4507 return (s_register_operand (op, mode)
4508 || (GET_CODE (op) == CONST_INT
4509 && (const_ok_for_arm (INTVAL (op))
4510 || const_ok_for_arm (-INTVAL (op)))));
4513 /* Return TRUE for valid ARM constants (or when valid if negated). */
4515 arm_addimm_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
4517 return (GET_CODE (op) == CONST_INT
4518 && (const_ok_for_arm (INTVAL (op))
4519 || const_ok_for_arm (-INTVAL (op))));
4523 arm_not_operand (rtx op, enum machine_mode mode)
4525 return (s_register_operand (op, mode)
4526 || (GET_CODE (op) == CONST_INT
4527 && (const_ok_for_arm (INTVAL (op))
4528 || const_ok_for_arm (~INTVAL (op)))));
4531 /* Return TRUE if the operand is a memory reference which contains an
4532 offsettable address. */
4534 offsettable_memory_operand (rtx op, enum machine_mode mode)
4536 if (mode == VOIDmode)
4537 mode = GET_MODE (op);
4539 return (mode == GET_MODE (op)
4540 && GET_CODE (op) == MEM
4541 && offsettable_address_p (reload_completed | reload_in_progress,
4542 mode, XEXP (op, 0)));
4545 /* Return TRUE if the operand is a memory reference which is, or can be
4546 made word aligned by adjusting the offset. */
4548 alignable_memory_operand (rtx op, enum machine_mode mode)
4550 rtx reg;
4552 if (mode == VOIDmode)
4553 mode = GET_MODE (op);
4555 if (mode != GET_MODE (op) || GET_CODE (op) != MEM)
4556 return 0;
4558 op = XEXP (op, 0);
4560 return ((GET_CODE (reg = op) == REG
4561 || (GET_CODE (op) == SUBREG
4562 && GET_CODE (reg = SUBREG_REG (op)) == REG)
4563 || (GET_CODE (op) == PLUS
4564 && GET_CODE (XEXP (op, 1)) == CONST_INT
4565 && (GET_CODE (reg = XEXP (op, 0)) == REG
4566 || (GET_CODE (XEXP (op, 0)) == SUBREG
4567 && GET_CODE (reg = SUBREG_REG (XEXP (op, 0))) == REG))))
4568 && REGNO_POINTER_ALIGN (REGNO (reg)) >= 32);
4571 /* Similar to s_register_operand, but does not allow hard integer
4572 registers. */
4574 f_register_operand (rtx op, enum machine_mode mode)
4576 if (GET_MODE (op) != mode && mode != VOIDmode)
4577 return 0;
4579 if (GET_CODE (op) == SUBREG)
4580 op = SUBREG_REG (op);
4582 /* We don't consider registers whose class is NO_REGS
4583 to be a register operand. */
4584 return (GET_CODE (op) == REG
4585 && (REGNO (op) >= FIRST_PSEUDO_REGISTER
4586 || REGNO_REG_CLASS (REGNO (op)) == FPA_REGS));
4589 /* Return TRUE for valid operands for the rhs of an floating point insns.
4590 Allows regs or certain consts on FPA, just regs for everything else. */
4592 arm_float_rhs_operand (rtx op, enum machine_mode mode)
4594 if (s_register_operand (op, mode))
4595 return TRUE;
4597 if (GET_MODE (op) != mode && mode != VOIDmode)
4598 return FALSE;
4600 if (TARGET_FPA && GET_CODE (op) == CONST_DOUBLE)
4601 return arm_const_double_rtx (op);
4603 return FALSE;
4607 arm_float_add_operand (rtx op, enum machine_mode mode)
4609 if (s_register_operand (op, mode))
4610 return TRUE;
4612 if (GET_MODE (op) != mode && mode != VOIDmode)
4613 return FALSE;
4615 if (TARGET_FPA && GET_CODE (op) == CONST_DOUBLE)
4616 return (arm_const_double_rtx (op)
4617 || neg_const_double_rtx_ok_for_fpa (op));
4619 return FALSE;
4623 /* Return TRUE if OP is suitable for the rhs of a floating point comparison.
4624 Depends which fpu we are targeting. */
4627 arm_float_compare_operand (rtx op, enum machine_mode mode)
4629 if (TARGET_VFP)
4630 return vfp_compare_operand (op, mode);
4631 else
4632 return arm_float_rhs_operand (op, mode);
4636 /* Return nonzero if OP is a valid Cirrus memory address pattern. */
4638 cirrus_memory_offset (rtx op)
4640 /* Reject eliminable registers. */
4641 if (! (reload_in_progress || reload_completed)
4642 && ( reg_mentioned_p (frame_pointer_rtx, op)
4643 || reg_mentioned_p (arg_pointer_rtx, op)
4644 || reg_mentioned_p (virtual_incoming_args_rtx, op)
4645 || reg_mentioned_p (virtual_outgoing_args_rtx, op)
4646 || reg_mentioned_p (virtual_stack_dynamic_rtx, op)
4647 || reg_mentioned_p (virtual_stack_vars_rtx, op)))
4648 return 0;
4650 if (GET_CODE (op) == MEM)
4652 rtx ind;
4654 ind = XEXP (op, 0);
4656 /* Match: (mem (reg)). */
4657 if (GET_CODE (ind) == REG)
4658 return 1;
4660 /* Match:
4661 (mem (plus (reg)
4662 (const))). */
4663 if (GET_CODE (ind) == PLUS
4664 && GET_CODE (XEXP (ind, 0)) == REG
4665 && REG_MODE_OK_FOR_BASE_P (XEXP (ind, 0), VOIDmode)
4666 && GET_CODE (XEXP (ind, 1)) == CONST_INT)
4667 return 1;
4670 return 0;
4674 arm_extendqisi_mem_op (rtx op, enum machine_mode mode)
4676 if (!memory_operand (op, mode))
4677 return 0;
4679 return arm_legitimate_address_p (mode, XEXP (op, 0), SIGN_EXTEND, 0);
4682 /* Return nonzero if OP is a Cirrus or general register. */
4684 cirrus_register_operand (rtx op, enum machine_mode mode)
4686 if (GET_MODE (op) != mode && mode != VOIDmode)
4687 return FALSE;
4689 if (GET_CODE (op) == SUBREG)
4690 op = SUBREG_REG (op);
4692 return (GET_CODE (op) == REG
4693 && (REGNO_REG_CLASS (REGNO (op)) == CIRRUS_REGS
4694 || REGNO_REG_CLASS (REGNO (op)) == GENERAL_REGS));
4697 /* Return nonzero if OP is a cirrus FP register. */
4699 cirrus_fp_register (rtx op, enum machine_mode mode)
4701 if (GET_MODE (op) != mode && mode != VOIDmode)
4702 return FALSE;
4704 if (GET_CODE (op) == SUBREG)
4705 op = SUBREG_REG (op);
4707 return (GET_CODE (op) == REG
4708 && (REGNO (op) >= FIRST_PSEUDO_REGISTER
4709 || REGNO_REG_CLASS (REGNO (op)) == CIRRUS_REGS));
4712 /* Return nonzero if OP is a 6bit constant (0..63). */
4714 cirrus_shift_const (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
4716 return (GET_CODE (op) == CONST_INT
4717 && INTVAL (op) >= 0
4718 && INTVAL (op) < 64);
4722 /* Return TRUE if OP is a valid VFP memory address pattern.
4723 WB if true if writeback address modes are allowed. */
4726 arm_coproc_mem_operand (rtx op, bool wb)
4728 rtx ind;
4730 /* Reject eliminable registers. */
4731 if (! (reload_in_progress || reload_completed)
4732 && ( reg_mentioned_p (frame_pointer_rtx, op)
4733 || reg_mentioned_p (arg_pointer_rtx, op)
4734 || reg_mentioned_p (virtual_incoming_args_rtx, op)
4735 || reg_mentioned_p (virtual_outgoing_args_rtx, op)
4736 || reg_mentioned_p (virtual_stack_dynamic_rtx, op)
4737 || reg_mentioned_p (virtual_stack_vars_rtx, op)))
4738 return FALSE;
4740 /* Constants are converted into offsets from labels. */
4741 if (GET_CODE (op) != MEM)
4742 return FALSE;
4744 ind = XEXP (op, 0);
4746 if (reload_completed
4747 && (GET_CODE (ind) == LABEL_REF
4748 || (GET_CODE (ind) == CONST
4749 && GET_CODE (XEXP (ind, 0)) == PLUS
4750 && GET_CODE (XEXP (XEXP (ind, 0), 0)) == LABEL_REF
4751 && GET_CODE (XEXP (XEXP (ind, 0), 1)) == CONST_INT)))
4752 return TRUE;
4754 /* Match: (mem (reg)). */
4755 if (GET_CODE (ind) == REG)
4756 return arm_address_register_rtx_p (ind, 0);
4758 /* Autoincremment addressing modes. */
4759 if (wb
4760 && (GET_CODE (ind) == PRE_INC
4761 || GET_CODE (ind) == POST_INC
4762 || GET_CODE (ind) == PRE_DEC
4763 || GET_CODE (ind) == POST_DEC))
4764 return arm_address_register_rtx_p (XEXP (ind, 0), 0);
4766 if (wb
4767 && (GET_CODE (ind) == POST_MODIFY || GET_CODE (ind) == PRE_MODIFY)
4768 && arm_address_register_rtx_p (XEXP (ind, 0), 0)
4769 && GET_CODE (XEXP (ind, 1)) == PLUS
4770 && rtx_equal_p (XEXP (XEXP (ind, 1), 0), XEXP (ind, 0)))
4771 ind = XEXP (ind, 1);
4773 /* Match:
4774 (plus (reg)
4775 (const)). */
4776 if (GET_CODE (ind) == PLUS
4777 && GET_CODE (XEXP (ind, 0)) == REG
4778 && REG_MODE_OK_FOR_BASE_P (XEXP (ind, 0), VOIDmode)
4779 && GET_CODE (XEXP (ind, 1)) == CONST_INT
4780 && INTVAL (XEXP (ind, 1)) > -1024
4781 && INTVAL (XEXP (ind, 1)) < 1024
4782 && (INTVAL (XEXP (ind, 1)) & 3) == 0)
4783 return TRUE;
4785 return FALSE;
4789 /* Return TRUE if OP is a REG or constant zero. */
4791 vfp_compare_operand (rtx op, enum machine_mode mode)
4793 if (s_register_operand (op, mode))
4794 return TRUE;
4796 return (GET_CODE (op) == CONST_DOUBLE
4797 && arm_const_double_rtx (op));
4801 /* Return GENERAL_REGS if a scratch register required to reload x to/from
4802 VFP registers. Otherwise return NO_REGS. */
4804 enum reg_class
4805 vfp_secondary_reload_class (enum machine_mode mode, rtx x)
4807 if (arm_coproc_mem_operand (x, FALSE) || s_register_operand (x, mode))
4808 return NO_REGS;
4810 return GENERAL_REGS;
4814 /* Returns TRUE if INSN is an "LDR REG, ADDR" instruction.
4815 Use by the Cirrus Maverick code which has to workaround
4816 a hardware bug triggered by such instructions. */
4817 static bool
4818 arm_memory_load_p (rtx insn)
4820 rtx body, lhs, rhs;;
4822 if (insn == NULL_RTX || GET_CODE (insn) != INSN)
4823 return false;
4825 body = PATTERN (insn);
4827 if (GET_CODE (body) != SET)
4828 return false;
4830 lhs = XEXP (body, 0);
4831 rhs = XEXP (body, 1);
4833 lhs = REG_OR_SUBREG_RTX (lhs);
4835 /* If the destination is not a general purpose
4836 register we do not have to worry. */
4837 if (GET_CODE (lhs) != REG
4838 || REGNO_REG_CLASS (REGNO (lhs)) != GENERAL_REGS)
4839 return false;
4841 /* As well as loads from memory we also have to react
4842 to loads of invalid constants which will be turned
4843 into loads from the minipool. */
4844 return (GET_CODE (rhs) == MEM
4845 || GET_CODE (rhs) == SYMBOL_REF
4846 || note_invalid_constants (insn, -1, false));
4849 /* Return TRUE if INSN is a Cirrus instruction. */
4850 static bool
4851 arm_cirrus_insn_p (rtx insn)
4853 enum attr_cirrus attr;
4855 /* get_attr aborts on USE and CLOBBER. */
4856 if (!insn
4857 || GET_CODE (insn) != INSN
4858 || GET_CODE (PATTERN (insn)) == USE
4859 || GET_CODE (PATTERN (insn)) == CLOBBER)
4860 return 0;
4862 attr = get_attr_cirrus (insn);
4864 return attr != CIRRUS_NOT;
4867 /* Cirrus reorg for invalid instruction combinations. */
4868 static void
4869 cirrus_reorg (rtx first)
4871 enum attr_cirrus attr;
4872 rtx body = PATTERN (first);
4873 rtx t;
4874 int nops;
4876 /* Any branch must be followed by 2 non Cirrus instructions. */
4877 if (GET_CODE (first) == JUMP_INSN && GET_CODE (body) != RETURN)
4879 nops = 0;
4880 t = next_nonnote_insn (first);
4882 if (arm_cirrus_insn_p (t))
4883 ++ nops;
4885 if (arm_cirrus_insn_p (next_nonnote_insn (t)))
4886 ++ nops;
4888 while (nops --)
4889 emit_insn_after (gen_nop (), first);
4891 return;
4894 /* (float (blah)) is in parallel with a clobber. */
4895 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) > 0)
4896 body = XVECEXP (body, 0, 0);
4898 if (GET_CODE (body) == SET)
4900 rtx lhs = XEXP (body, 0), rhs = XEXP (body, 1);
4902 /* cfldrd, cfldr64, cfstrd, cfstr64 must
4903 be followed by a non Cirrus insn. */
4904 if (get_attr_cirrus (first) == CIRRUS_DOUBLE)
4906 if (arm_cirrus_insn_p (next_nonnote_insn (first)))
4907 emit_insn_after (gen_nop (), first);
4909 return;
4911 else if (arm_memory_load_p (first))
4913 unsigned int arm_regno;
4915 /* Any ldr/cfmvdlr, ldr/cfmvdhr, ldr/cfmvsr, ldr/cfmv64lr,
4916 ldr/cfmv64hr combination where the Rd field is the same
4917 in both instructions must be split with a non Cirrus
4918 insn. Example:
4920 ldr r0, blah
4922 cfmvsr mvf0, r0. */
4924 /* Get Arm register number for ldr insn. */
4925 if (GET_CODE (lhs) == REG)
4926 arm_regno = REGNO (lhs);
4927 else if (GET_CODE (rhs) == REG)
4928 arm_regno = REGNO (rhs);
4929 else
4930 abort ();
4932 /* Next insn. */
4933 first = next_nonnote_insn (first);
4935 if (! arm_cirrus_insn_p (first))
4936 return;
4938 body = PATTERN (first);
4940 /* (float (blah)) is in parallel with a clobber. */
4941 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0))
4942 body = XVECEXP (body, 0, 0);
4944 if (GET_CODE (body) == FLOAT)
4945 body = XEXP (body, 0);
4947 if (get_attr_cirrus (first) == CIRRUS_MOVE
4948 && GET_CODE (XEXP (body, 1)) == REG
4949 && arm_regno == REGNO (XEXP (body, 1)))
4950 emit_insn_after (gen_nop (), first);
4952 return;
4956 /* get_attr aborts on USE and CLOBBER. */
4957 if (!first
4958 || GET_CODE (first) != INSN
4959 || GET_CODE (PATTERN (first)) == USE
4960 || GET_CODE (PATTERN (first)) == CLOBBER)
4961 return;
4963 attr = get_attr_cirrus (first);
4965 /* Any coprocessor compare instruction (cfcmps, cfcmpd, ...)
4966 must be followed by a non-coprocessor instruction. */
4967 if (attr == CIRRUS_COMPARE)
4969 nops = 0;
4971 t = next_nonnote_insn (first);
4973 if (arm_cirrus_insn_p (t))
4974 ++ nops;
4976 if (arm_cirrus_insn_p (next_nonnote_insn (t)))
4977 ++ nops;
4979 while (nops --)
4980 emit_insn_after (gen_nop (), first);
4982 return;
4986 /* Return nonzero if OP is a constant power of two. */
4988 power_of_two_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
4990 if (GET_CODE (op) == CONST_INT)
4992 HOST_WIDE_INT value = INTVAL (op);
4994 return value != 0 && (value & (value - 1)) == 0;
4997 return FALSE;
5000 /* Return TRUE for a valid operand of a DImode operation.
5001 Either: REG, SUBREG, CONST_DOUBLE or MEM(DImode_address).
5002 Note that this disallows MEM(REG+REG), but allows
5003 MEM(PRE/POST_INC/DEC(REG)). */
5005 di_operand (rtx op, enum machine_mode mode)
5007 if (s_register_operand (op, mode))
5008 return TRUE;
5010 if (mode != VOIDmode && GET_MODE (op) != VOIDmode && GET_MODE (op) != DImode)
5011 return FALSE;
5013 if (GET_CODE (op) == SUBREG)
5014 op = SUBREG_REG (op);
5016 switch (GET_CODE (op))
5018 case CONST_DOUBLE:
5019 case CONST_INT:
5020 return TRUE;
5022 case MEM:
5023 return memory_address_p (DImode, XEXP (op, 0));
5025 default:
5026 return FALSE;
5030 /* Like di_operand, but don't accept constants. */
5032 nonimmediate_di_operand (rtx op, enum machine_mode mode)
5034 if (s_register_operand (op, mode))
5035 return TRUE;
5037 if (mode != VOIDmode && GET_MODE (op) != VOIDmode && GET_MODE (op) != DImode)
5038 return FALSE;
5040 if (GET_CODE (op) == SUBREG)
5041 op = SUBREG_REG (op);
5043 if (GET_CODE (op) == MEM)
5044 return memory_address_p (DImode, XEXP (op, 0));
5046 return FALSE;
5049 /* Return TRUE for a valid operand of a DFmode operation when soft-float.
5050 Either: REG, SUBREG, CONST_DOUBLE or MEM(DImode_address).
5051 Note that this disallows MEM(REG+REG), but allows
5052 MEM(PRE/POST_INC/DEC(REG)). */
5054 soft_df_operand (rtx op, enum machine_mode mode)
5056 if (s_register_operand (op, mode))
5057 return TRUE;
5059 if (mode != VOIDmode && GET_MODE (op) != mode)
5060 return FALSE;
5062 if (GET_CODE (op) == SUBREG && CONSTANT_P (SUBREG_REG (op)))
5063 return FALSE;
5065 if (GET_CODE (op) == SUBREG)
5066 op = SUBREG_REG (op);
5068 switch (GET_CODE (op))
5070 case CONST_DOUBLE:
5071 return TRUE;
5073 case MEM:
5074 return memory_address_p (DFmode, XEXP (op, 0));
5076 default:
5077 return FALSE;
5081 /* Like soft_df_operand, but don't accept constants. */
5083 nonimmediate_soft_df_operand (rtx op, enum machine_mode mode)
5085 if (s_register_operand (op, mode))
5086 return TRUE;
5088 if (mode != VOIDmode && GET_MODE (op) != mode)
5089 return FALSE;
5091 if (GET_CODE (op) == SUBREG)
5092 op = SUBREG_REG (op);
5094 if (GET_CODE (op) == MEM)
5095 return memory_address_p (DFmode, XEXP (op, 0));
5096 return FALSE;
5099 /* Return TRUE for valid index operands. */
5101 index_operand (rtx op, enum machine_mode mode)
5103 return (s_register_operand (op, mode)
5104 || (immediate_operand (op, mode)
5105 && (GET_CODE (op) != CONST_INT
5106 || (INTVAL (op) < 4096 && INTVAL (op) > -4096))));
5109 /* Return TRUE for valid shifts by a constant. This also accepts any
5110 power of two on the (somewhat overly relaxed) assumption that the
5111 shift operator in this case was a mult. */
5113 const_shift_operand (rtx op, enum machine_mode mode)
5115 return (power_of_two_operand (op, mode)
5116 || (immediate_operand (op, mode)
5117 && (GET_CODE (op) != CONST_INT
5118 || (INTVAL (op) < 32 && INTVAL (op) > 0))));
5121 /* Return TRUE for arithmetic operators which can be combined with a multiply
5122 (shift). */
5124 shiftable_operator (rtx x, enum machine_mode mode)
5126 enum rtx_code code;
5128 if (GET_MODE (x) != mode)
5129 return FALSE;
5131 code = GET_CODE (x);
5133 return (code == PLUS || code == MINUS
5134 || code == IOR || code == XOR || code == AND);
5137 /* Return TRUE for binary logical operators. */
5139 logical_binary_operator (rtx x, enum machine_mode mode)
5141 enum rtx_code code;
5143 if (GET_MODE (x) != mode)
5144 return FALSE;
5146 code = GET_CODE (x);
5148 return (code == IOR || code == XOR || code == AND);
5151 /* Return TRUE for shift operators. */
5153 shift_operator (rtx x,enum machine_mode mode)
5155 enum rtx_code code;
5157 if (GET_MODE (x) != mode)
5158 return FALSE;
5160 code = GET_CODE (x);
5162 if (code == MULT)
5163 return power_of_two_operand (XEXP (x, 1), mode);
5165 return (code == ASHIFT || code == ASHIFTRT || code == LSHIFTRT
5166 || code == ROTATERT);
5169 /* Return TRUE if x is EQ or NE. */
5171 equality_operator (rtx x, enum machine_mode mode ATTRIBUTE_UNUSED)
5173 return GET_CODE (x) == EQ || GET_CODE (x) == NE;
5176 /* Return TRUE if x is a comparison operator other than LTGT or UNEQ. */
5178 arm_comparison_operator (rtx x, enum machine_mode mode)
5180 return (comparison_operator (x, mode)
5181 && GET_CODE (x) != LTGT
5182 && GET_CODE (x) != UNEQ);
5185 /* Return TRUE for SMIN SMAX UMIN UMAX operators. */
5187 minmax_operator (rtx x, enum machine_mode mode)
5189 enum rtx_code code = GET_CODE (x);
5191 if (GET_MODE (x) != mode)
5192 return FALSE;
5194 return code == SMIN || code == SMAX || code == UMIN || code == UMAX;
5197 /* Return TRUE if this is the condition code register, if we aren't given
5198 a mode, accept any class CCmode register. */
5200 cc_register (rtx x, enum machine_mode mode)
5202 if (mode == VOIDmode)
5204 mode = GET_MODE (x);
5206 if (GET_MODE_CLASS (mode) != MODE_CC)
5207 return FALSE;
5210 if ( GET_MODE (x) == mode
5211 && GET_CODE (x) == REG
5212 && REGNO (x) == CC_REGNUM)
5213 return TRUE;
5215 return FALSE;
5218 /* Return TRUE if this is the condition code register, if we aren't given
5219 a mode, accept any class CCmode register which indicates a dominance
5220 expression. */
5222 dominant_cc_register (rtx x, enum machine_mode mode)
5224 if (mode == VOIDmode)
5226 mode = GET_MODE (x);
5228 if (GET_MODE_CLASS (mode) != MODE_CC)
5229 return FALSE;
5232 if (mode != CC_DNEmode && mode != CC_DEQmode
5233 && mode != CC_DLEmode && mode != CC_DLTmode
5234 && mode != CC_DGEmode && mode != CC_DGTmode
5235 && mode != CC_DLEUmode && mode != CC_DLTUmode
5236 && mode != CC_DGEUmode && mode != CC_DGTUmode)
5237 return FALSE;
5239 return cc_register (x, mode);
5242 /* Return TRUE if X references a SYMBOL_REF. */
5244 symbol_mentioned_p (rtx x)
5246 const char * fmt;
5247 int i;
5249 if (GET_CODE (x) == SYMBOL_REF)
5250 return 1;
5252 fmt = GET_RTX_FORMAT (GET_CODE (x));
5254 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5256 if (fmt[i] == 'E')
5258 int j;
5260 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5261 if (symbol_mentioned_p (XVECEXP (x, i, j)))
5262 return 1;
5264 else if (fmt[i] == 'e' && symbol_mentioned_p (XEXP (x, i)))
5265 return 1;
5268 return 0;
5271 /* Return TRUE if X references a LABEL_REF. */
5273 label_mentioned_p (rtx x)
5275 const char * fmt;
5276 int i;
5278 if (GET_CODE (x) == LABEL_REF)
5279 return 1;
5281 fmt = GET_RTX_FORMAT (GET_CODE (x));
5282 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5284 if (fmt[i] == 'E')
5286 int j;
5288 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5289 if (label_mentioned_p (XVECEXP (x, i, j)))
5290 return 1;
5292 else if (fmt[i] == 'e' && label_mentioned_p (XEXP (x, i)))
5293 return 1;
5296 return 0;
5299 enum rtx_code
5300 minmax_code (rtx x)
5302 enum rtx_code code = GET_CODE (x);
5304 if (code == SMAX)
5305 return GE;
5306 else if (code == SMIN)
5307 return LE;
5308 else if (code == UMIN)
5309 return LEU;
5310 else if (code == UMAX)
5311 return GEU;
5313 abort ();
5316 /* Return 1 if memory locations are adjacent. */
5318 adjacent_mem_locations (rtx a, rtx b)
5320 if ((GET_CODE (XEXP (a, 0)) == REG
5321 || (GET_CODE (XEXP (a, 0)) == PLUS
5322 && GET_CODE (XEXP (XEXP (a, 0), 1)) == CONST_INT))
5323 && (GET_CODE (XEXP (b, 0)) == REG
5324 || (GET_CODE (XEXP (b, 0)) == PLUS
5325 && GET_CODE (XEXP (XEXP (b, 0), 1)) == CONST_INT)))
5327 int val0 = 0, val1 = 0;
5328 int reg0, reg1;
5330 if (GET_CODE (XEXP (a, 0)) == PLUS)
5332 reg0 = REGNO (XEXP (XEXP (a, 0), 0));
5333 val0 = INTVAL (XEXP (XEXP (a, 0), 1));
5335 else
5336 reg0 = REGNO (XEXP (a, 0));
5338 if (GET_CODE (XEXP (b, 0)) == PLUS)
5340 reg1 = REGNO (XEXP (XEXP (b, 0), 0));
5341 val1 = INTVAL (XEXP (XEXP (b, 0), 1));
5343 else
5344 reg1 = REGNO (XEXP (b, 0));
5346 /* Don't accept any offset that will require multiple
5347 instructions to handle, since this would cause the
5348 arith_adjacentmem pattern to output an overlong sequence. */
5349 if (!const_ok_for_op (PLUS, val0) || !const_ok_for_op (PLUS, val1))
5350 return 0;
5352 return (reg0 == reg1) && ((val1 - val0) == 4 || (val0 - val1) == 4);
5354 return 0;
5357 /* Return 1 if OP is a load multiple operation. It is known to be
5358 parallel and the first section will be tested. */
5360 load_multiple_operation (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
5362 HOST_WIDE_INT count = XVECLEN (op, 0);
5363 int dest_regno;
5364 rtx src_addr;
5365 HOST_WIDE_INT i = 1, base = 0;
5366 rtx elt;
5368 if (count <= 1
5369 || GET_CODE (XVECEXP (op, 0, 0)) != SET)
5370 return 0;
5372 /* Check to see if this might be a write-back. */
5373 if (GET_CODE (SET_SRC (elt = XVECEXP (op, 0, 0))) == PLUS)
5375 i++;
5376 base = 1;
5378 /* Now check it more carefully. */
5379 if (GET_CODE (SET_DEST (elt)) != REG
5380 || GET_CODE (XEXP (SET_SRC (elt), 0)) != REG
5381 || GET_CODE (XEXP (SET_SRC (elt), 1)) != CONST_INT
5382 || INTVAL (XEXP (SET_SRC (elt), 1)) != (count - 1) * 4)
5383 return 0;
5386 /* Perform a quick check so we don't blow up below. */
5387 if (count <= i
5388 || GET_CODE (XVECEXP (op, 0, i - 1)) != SET
5389 || GET_CODE (SET_DEST (XVECEXP (op, 0, i - 1))) != REG
5390 || GET_CODE (SET_SRC (XVECEXP (op, 0, i - 1))) != MEM)
5391 return 0;
5393 dest_regno = REGNO (SET_DEST (XVECEXP (op, 0, i - 1)));
5394 src_addr = XEXP (SET_SRC (XVECEXP (op, 0, i - 1)), 0);
5396 for (; i < count; i++)
5398 elt = XVECEXP (op, 0, i);
5400 if (GET_CODE (elt) != SET
5401 || GET_CODE (SET_DEST (elt)) != REG
5402 || GET_MODE (SET_DEST (elt)) != SImode
5403 || REGNO (SET_DEST (elt)) != (unsigned int)(dest_regno + i - base)
5404 || GET_CODE (SET_SRC (elt)) != MEM
5405 || GET_MODE (SET_SRC (elt)) != SImode
5406 || GET_CODE (XEXP (SET_SRC (elt), 0)) != PLUS
5407 || !rtx_equal_p (XEXP (XEXP (SET_SRC (elt), 0), 0), src_addr)
5408 || GET_CODE (XEXP (XEXP (SET_SRC (elt), 0), 1)) != CONST_INT
5409 || INTVAL (XEXP (XEXP (SET_SRC (elt), 0), 1)) != (i - base) * 4)
5410 return 0;
5413 return 1;
5416 /* Return 1 if OP is a store multiple operation. It is known to be
5417 parallel and the first section will be tested. */
5419 store_multiple_operation (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
5421 HOST_WIDE_INT count = XVECLEN (op, 0);
5422 int src_regno;
5423 rtx dest_addr;
5424 HOST_WIDE_INT i = 1, base = 0;
5425 rtx elt;
5427 if (count <= 1
5428 || GET_CODE (XVECEXP (op, 0, 0)) != SET)
5429 return 0;
5431 /* Check to see if this might be a write-back. */
5432 if (GET_CODE (SET_SRC (elt = XVECEXP (op, 0, 0))) == PLUS)
5434 i++;
5435 base = 1;
5437 /* Now check it more carefully. */
5438 if (GET_CODE (SET_DEST (elt)) != REG
5439 || GET_CODE (XEXP (SET_SRC (elt), 0)) != REG
5440 || GET_CODE (XEXP (SET_SRC (elt), 1)) != CONST_INT
5441 || INTVAL (XEXP (SET_SRC (elt), 1)) != (count - 1) * 4)
5442 return 0;
5445 /* Perform a quick check so we don't blow up below. */
5446 if (count <= i
5447 || GET_CODE (XVECEXP (op, 0, i - 1)) != SET
5448 || GET_CODE (SET_DEST (XVECEXP (op, 0, i - 1))) != MEM
5449 || GET_CODE (SET_SRC (XVECEXP (op, 0, i - 1))) != REG)
5450 return 0;
5452 src_regno = REGNO (SET_SRC (XVECEXP (op, 0, i - 1)));
5453 dest_addr = XEXP (SET_DEST (XVECEXP (op, 0, i - 1)), 0);
5455 for (; i < count; i++)
5457 elt = XVECEXP (op, 0, i);
5459 if (GET_CODE (elt) != SET
5460 || GET_CODE (SET_SRC (elt)) != REG
5461 || GET_MODE (SET_SRC (elt)) != SImode
5462 || REGNO (SET_SRC (elt)) != (unsigned int)(src_regno + i - base)
5463 || GET_CODE (SET_DEST (elt)) != MEM
5464 || GET_MODE (SET_DEST (elt)) != SImode
5465 || GET_CODE (XEXP (SET_DEST (elt), 0)) != PLUS
5466 || !rtx_equal_p (XEXP (XEXP (SET_DEST (elt), 0), 0), dest_addr)
5467 || GET_CODE (XEXP (XEXP (SET_DEST (elt), 0), 1)) != CONST_INT
5468 || INTVAL (XEXP (XEXP (SET_DEST (elt), 0), 1)) != (i - base) * 4)
5469 return 0;
5472 return 1;
5476 load_multiple_sequence (rtx *operands, int nops, int *regs, int *base,
5477 HOST_WIDE_INT *load_offset)
5479 int unsorted_regs[4];
5480 HOST_WIDE_INT unsorted_offsets[4];
5481 int order[4];
5482 int base_reg = -1;
5483 int i;
5485 /* Can only handle 2, 3, or 4 insns at present,
5486 though could be easily extended if required. */
5487 if (nops < 2 || nops > 4)
5488 abort ();
5490 /* Loop over the operands and check that the memory references are
5491 suitable (ie immediate offsets from the same base register). At
5492 the same time, extract the target register, and the memory
5493 offsets. */
5494 for (i = 0; i < nops; i++)
5496 rtx reg;
5497 rtx offset;
5499 /* Convert a subreg of a mem into the mem itself. */
5500 if (GET_CODE (operands[nops + i]) == SUBREG)
5501 operands[nops + i] = alter_subreg (operands + (nops + i));
5503 if (GET_CODE (operands[nops + i]) != MEM)
5504 abort ();
5506 /* Don't reorder volatile memory references; it doesn't seem worth
5507 looking for the case where the order is ok anyway. */
5508 if (MEM_VOLATILE_P (operands[nops + i]))
5509 return 0;
5511 offset = const0_rtx;
5513 if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG
5514 || (GET_CODE (reg) == SUBREG
5515 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5516 || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
5517 && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0))
5518 == REG)
5519 || (GET_CODE (reg) == SUBREG
5520 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5521 && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1))
5522 == CONST_INT)))
5524 if (i == 0)
5526 base_reg = REGNO (reg);
5527 unsorted_regs[0] = (GET_CODE (operands[i]) == REG
5528 ? REGNO (operands[i])
5529 : REGNO (SUBREG_REG (operands[i])));
5530 order[0] = 0;
5532 else
5534 if (base_reg != (int) REGNO (reg))
5535 /* Not addressed from the same base register. */
5536 return 0;
5538 unsorted_regs[i] = (GET_CODE (operands[i]) == REG
5539 ? REGNO (operands[i])
5540 : REGNO (SUBREG_REG (operands[i])));
5541 if (unsorted_regs[i] < unsorted_regs[order[0]])
5542 order[0] = i;
5545 /* If it isn't an integer register, or if it overwrites the
5546 base register but isn't the last insn in the list, then
5547 we can't do this. */
5548 if (unsorted_regs[i] < 0 || unsorted_regs[i] > 14
5549 || (i != nops - 1 && unsorted_regs[i] == base_reg))
5550 return 0;
5552 unsorted_offsets[i] = INTVAL (offset);
5554 else
5555 /* Not a suitable memory address. */
5556 return 0;
5559 /* All the useful information has now been extracted from the
5560 operands into unsorted_regs and unsorted_offsets; additionally,
5561 order[0] has been set to the lowest numbered register in the
5562 list. Sort the registers into order, and check that the memory
5563 offsets are ascending and adjacent. */
5565 for (i = 1; i < nops; i++)
5567 int j;
5569 order[i] = order[i - 1];
5570 for (j = 0; j < nops; j++)
5571 if (unsorted_regs[j] > unsorted_regs[order[i - 1]]
5572 && (order[i] == order[i - 1]
5573 || unsorted_regs[j] < unsorted_regs[order[i]]))
5574 order[i] = j;
5576 /* Have we found a suitable register? if not, one must be used more
5577 than once. */
5578 if (order[i] == order[i - 1])
5579 return 0;
5581 /* Is the memory address adjacent and ascending? */
5582 if (unsorted_offsets[order[i]] != unsorted_offsets[order[i - 1]] + 4)
5583 return 0;
5586 if (base)
5588 *base = base_reg;
5590 for (i = 0; i < nops; i++)
5591 regs[i] = unsorted_regs[order[i]];
5593 *load_offset = unsorted_offsets[order[0]];
5596 if (unsorted_offsets[order[0]] == 0)
5597 return 1; /* ldmia */
5599 if (unsorted_offsets[order[0]] == 4)
5600 return 2; /* ldmib */
5602 if (unsorted_offsets[order[nops - 1]] == 0)
5603 return 3; /* ldmda */
5605 if (unsorted_offsets[order[nops - 1]] == -4)
5606 return 4; /* ldmdb */
5608 /* For ARM8,9 & StrongARM, 2 ldr instructions are faster than an ldm
5609 if the offset isn't small enough. The reason 2 ldrs are faster
5610 is because these ARMs are able to do more than one cache access
5611 in a single cycle. The ARM9 and StrongARM have Harvard caches,
5612 whilst the ARM8 has a double bandwidth cache. This means that
5613 these cores can do both an instruction fetch and a data fetch in
5614 a single cycle, so the trick of calculating the address into a
5615 scratch register (one of the result regs) and then doing a load
5616 multiple actually becomes slower (and no smaller in code size).
5617 That is the transformation
5619 ldr rd1, [rbase + offset]
5620 ldr rd2, [rbase + offset + 4]
5624 add rd1, rbase, offset
5625 ldmia rd1, {rd1, rd2}
5627 produces worse code -- '3 cycles + any stalls on rd2' instead of
5628 '2 cycles + any stalls on rd2'. On ARMs with only one cache
5629 access per cycle, the first sequence could never complete in less
5630 than 6 cycles, whereas the ldm sequence would only take 5 and
5631 would make better use of sequential accesses if not hitting the
5632 cache.
5634 We cheat here and test 'arm_ld_sched' which we currently know to
5635 only be true for the ARM8, ARM9 and StrongARM. If this ever
5636 changes, then the test below needs to be reworked. */
5637 if (nops == 2 && arm_ld_sched)
5638 return 0;
5640 /* Can't do it without setting up the offset, only do this if it takes
5641 no more than one insn. */
5642 return (const_ok_for_arm (unsorted_offsets[order[0]])
5643 || const_ok_for_arm (-unsorted_offsets[order[0]])) ? 5 : 0;
5646 const char *
5647 emit_ldm_seq (rtx *operands, int nops)
5649 int regs[4];
5650 int base_reg;
5651 HOST_WIDE_INT offset;
5652 char buf[100];
5653 int i;
5655 switch (load_multiple_sequence (operands, nops, regs, &base_reg, &offset))
5657 case 1:
5658 strcpy (buf, "ldm%?ia\t");
5659 break;
5661 case 2:
5662 strcpy (buf, "ldm%?ib\t");
5663 break;
5665 case 3:
5666 strcpy (buf, "ldm%?da\t");
5667 break;
5669 case 4:
5670 strcpy (buf, "ldm%?db\t");
5671 break;
5673 case 5:
5674 if (offset >= 0)
5675 sprintf (buf, "add%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX,
5676 reg_names[regs[0]], REGISTER_PREFIX, reg_names[base_reg],
5677 (long) offset);
5678 else
5679 sprintf (buf, "sub%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX,
5680 reg_names[regs[0]], REGISTER_PREFIX, reg_names[base_reg],
5681 (long) -offset);
5682 output_asm_insn (buf, operands);
5683 base_reg = regs[0];
5684 strcpy (buf, "ldm%?ia\t");
5685 break;
5687 default:
5688 abort ();
5691 sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
5692 reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
5694 for (i = 1; i < nops; i++)
5695 sprintf (buf + strlen (buf), ", %s%s", REGISTER_PREFIX,
5696 reg_names[regs[i]]);
5698 strcat (buf, "}\t%@ phole ldm");
5700 output_asm_insn (buf, operands);
5701 return "";
5705 store_multiple_sequence (rtx *operands, int nops, int *regs, int *base,
5706 HOST_WIDE_INT * load_offset)
5708 int unsorted_regs[4];
5709 HOST_WIDE_INT unsorted_offsets[4];
5710 int order[4];
5711 int base_reg = -1;
5712 int i;
5714 /* Can only handle 2, 3, or 4 insns at present, though could be easily
5715 extended if required. */
5716 if (nops < 2 || nops > 4)
5717 abort ();
5719 /* Loop over the operands and check that the memory references are
5720 suitable (ie immediate offsets from the same base register). At
5721 the same time, extract the target register, and the memory
5722 offsets. */
5723 for (i = 0; i < nops; i++)
5725 rtx reg;
5726 rtx offset;
5728 /* Convert a subreg of a mem into the mem itself. */
5729 if (GET_CODE (operands[nops + i]) == SUBREG)
5730 operands[nops + i] = alter_subreg (operands + (nops + i));
5732 if (GET_CODE (operands[nops + i]) != MEM)
5733 abort ();
5735 /* Don't reorder volatile memory references; it doesn't seem worth
5736 looking for the case where the order is ok anyway. */
5737 if (MEM_VOLATILE_P (operands[nops + i]))
5738 return 0;
5740 offset = const0_rtx;
5742 if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG
5743 || (GET_CODE (reg) == SUBREG
5744 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5745 || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
5746 && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0))
5747 == REG)
5748 || (GET_CODE (reg) == SUBREG
5749 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5750 && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1))
5751 == CONST_INT)))
5753 if (i == 0)
5755 base_reg = REGNO (reg);
5756 unsorted_regs[0] = (GET_CODE (operands[i]) == REG
5757 ? REGNO (operands[i])
5758 : REGNO (SUBREG_REG (operands[i])));
5759 order[0] = 0;
5761 else
5763 if (base_reg != (int) REGNO (reg))
5764 /* Not addressed from the same base register. */
5765 return 0;
5767 unsorted_regs[i] = (GET_CODE (operands[i]) == REG
5768 ? REGNO (operands[i])
5769 : REGNO (SUBREG_REG (operands[i])));
5770 if (unsorted_regs[i] < unsorted_regs[order[0]])
5771 order[0] = i;
5774 /* If it isn't an integer register, then we can't do this. */
5775 if (unsorted_regs[i] < 0 || unsorted_regs[i] > 14)
5776 return 0;
5778 unsorted_offsets[i] = INTVAL (offset);
5780 else
5781 /* Not a suitable memory address. */
5782 return 0;
5785 /* All the useful information has now been extracted from the
5786 operands into unsorted_regs and unsorted_offsets; additionally,
5787 order[0] has been set to the lowest numbered register in the
5788 list. Sort the registers into order, and check that the memory
5789 offsets are ascending and adjacent. */
5791 for (i = 1; i < nops; i++)
5793 int j;
5795 order[i] = order[i - 1];
5796 for (j = 0; j < nops; j++)
5797 if (unsorted_regs[j] > unsorted_regs[order[i - 1]]
5798 && (order[i] == order[i - 1]
5799 || unsorted_regs[j] < unsorted_regs[order[i]]))
5800 order[i] = j;
5802 /* Have we found a suitable register? if not, one must be used more
5803 than once. */
5804 if (order[i] == order[i - 1])
5805 return 0;
5807 /* Is the memory address adjacent and ascending? */
5808 if (unsorted_offsets[order[i]] != unsorted_offsets[order[i - 1]] + 4)
5809 return 0;
5812 if (base)
5814 *base = base_reg;
5816 for (i = 0; i < nops; i++)
5817 regs[i] = unsorted_regs[order[i]];
5819 *load_offset = unsorted_offsets[order[0]];
5822 if (unsorted_offsets[order[0]] == 0)
5823 return 1; /* stmia */
5825 if (unsorted_offsets[order[0]] == 4)
5826 return 2; /* stmib */
5828 if (unsorted_offsets[order[nops - 1]] == 0)
5829 return 3; /* stmda */
5831 if (unsorted_offsets[order[nops - 1]] == -4)
5832 return 4; /* stmdb */
5834 return 0;
5837 const char *
5838 emit_stm_seq (rtx *operands, int nops)
5840 int regs[4];
5841 int base_reg;
5842 HOST_WIDE_INT offset;
5843 char buf[100];
5844 int i;
5846 switch (store_multiple_sequence (operands, nops, regs, &base_reg, &offset))
5848 case 1:
5849 strcpy (buf, "stm%?ia\t");
5850 break;
5852 case 2:
5853 strcpy (buf, "stm%?ib\t");
5854 break;
5856 case 3:
5857 strcpy (buf, "stm%?da\t");
5858 break;
5860 case 4:
5861 strcpy (buf, "stm%?db\t");
5862 break;
5864 default:
5865 abort ();
5868 sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
5869 reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
5871 for (i = 1; i < nops; i++)
5872 sprintf (buf + strlen (buf), ", %s%s", REGISTER_PREFIX,
5873 reg_names[regs[i]]);
5875 strcat (buf, "}\t%@ phole stm");
5877 output_asm_insn (buf, operands);
5878 return "";
5882 multi_register_push (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
5884 if (GET_CODE (op) != PARALLEL
5885 || (GET_CODE (XVECEXP (op, 0, 0)) != SET)
5886 || (GET_CODE (SET_SRC (XVECEXP (op, 0, 0))) != UNSPEC)
5887 || (XINT (SET_SRC (XVECEXP (op, 0, 0)), 1) != UNSPEC_PUSH_MULT))
5888 return 0;
5890 return 1;
5893 /* Routines for use in generating RTL. */
5896 arm_gen_load_multiple (int base_regno, int count, rtx from, int up,
5897 int write_back, int unchanging_p, int in_struct_p,
5898 int scalar_p)
5900 int i = 0, j;
5901 rtx result;
5902 int sign = up ? 1 : -1;
5903 rtx mem;
5905 /* XScale has load-store double instructions, but they have stricter
5906 alignment requirements than load-store multiple, so we cannot
5907 use them.
5909 For XScale ldm requires 2 + NREGS cycles to complete and blocks
5910 the pipeline until completion.
5912 NREGS CYCLES
5918 An ldr instruction takes 1-3 cycles, but does not block the
5919 pipeline.
5921 NREGS CYCLES
5922 1 1-3
5923 2 2-6
5924 3 3-9
5925 4 4-12
5927 Best case ldr will always win. However, the more ldr instructions
5928 we issue, the less likely we are to be able to schedule them well.
5929 Using ldr instructions also increases code size.
5931 As a compromise, we use ldr for counts of 1 or 2 regs, and ldm
5932 for counts of 3 or 4 regs. */
5933 if (arm_tune_xscale && count <= 2 && ! optimize_size)
5935 rtx seq;
5937 start_sequence ();
5939 for (i = 0; i < count; i++)
5941 mem = gen_rtx_MEM (SImode, plus_constant (from, i * 4 * sign));
5942 MEM_READONLY_P (mem) = unchanging_p;
5943 MEM_IN_STRUCT_P (mem) = in_struct_p;
5944 MEM_SCALAR_P (mem) = scalar_p;
5945 emit_move_insn (gen_rtx_REG (SImode, base_regno + i), mem);
5948 if (write_back)
5949 emit_move_insn (from, plus_constant (from, count * 4 * sign));
5951 seq = get_insns ();
5952 end_sequence ();
5954 return seq;
5957 result = gen_rtx_PARALLEL (VOIDmode,
5958 rtvec_alloc (count + (write_back ? 1 : 0)));
5959 if (write_back)
5961 XVECEXP (result, 0, 0)
5962 = gen_rtx_SET (GET_MODE (from), from,
5963 plus_constant (from, count * 4 * sign));
5964 i = 1;
5965 count++;
5968 for (j = 0; i < count; i++, j++)
5970 mem = gen_rtx_MEM (SImode, plus_constant (from, j * 4 * sign));
5971 MEM_READONLY_P (mem) = unchanging_p;
5972 MEM_IN_STRUCT_P (mem) = in_struct_p;
5973 MEM_SCALAR_P (mem) = scalar_p;
5974 XVECEXP (result, 0, i)
5975 = gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, base_regno + j), mem);
5978 return result;
5982 arm_gen_store_multiple (int base_regno, int count, rtx to, int up,
5983 int write_back, int unchanging_p, int in_struct_p,
5984 int scalar_p)
5986 int i = 0, j;
5987 rtx result;
5988 int sign = up ? 1 : -1;
5989 rtx mem;
5991 /* See arm_gen_load_multiple for discussion of
5992 the pros/cons of ldm/stm usage for XScale. */
5993 if (arm_tune_xscale && count <= 2 && ! optimize_size)
5995 rtx seq;
5997 start_sequence ();
5999 for (i = 0; i < count; i++)
6001 mem = gen_rtx_MEM (SImode, plus_constant (to, i * 4 * sign));
6002 MEM_READONLY_P (mem) = unchanging_p;
6003 MEM_IN_STRUCT_P (mem) = in_struct_p;
6004 MEM_SCALAR_P (mem) = scalar_p;
6005 emit_move_insn (mem, gen_rtx_REG (SImode, base_regno + i));
6008 if (write_back)
6009 emit_move_insn (to, plus_constant (to, count * 4 * sign));
6011 seq = get_insns ();
6012 end_sequence ();
6014 return seq;
6017 result = gen_rtx_PARALLEL (VOIDmode,
6018 rtvec_alloc (count + (write_back ? 1 : 0)));
6019 if (write_back)
6021 XVECEXP (result, 0, 0)
6022 = gen_rtx_SET (GET_MODE (to), to,
6023 plus_constant (to, count * 4 * sign));
6024 i = 1;
6025 count++;
6028 for (j = 0; i < count; i++, j++)
6030 mem = gen_rtx_MEM (SImode, plus_constant (to, j * 4 * sign));
6031 MEM_READONLY_P (mem) = unchanging_p;
6032 MEM_IN_STRUCT_P (mem) = in_struct_p;
6033 MEM_SCALAR_P (mem) = scalar_p;
6035 XVECEXP (result, 0, i)
6036 = gen_rtx_SET (VOIDmode, mem, gen_rtx_REG (SImode, base_regno + j));
6039 return result;
6043 arm_gen_movmemqi (rtx *operands)
6045 HOST_WIDE_INT in_words_to_go, out_words_to_go, last_bytes;
6046 int i;
6047 rtx src, dst;
6048 rtx st_src, st_dst, fin_src, fin_dst;
6049 rtx part_bytes_reg = NULL;
6050 rtx mem;
6051 int dst_unchanging_p, dst_in_struct_p, src_unchanging_p, src_in_struct_p;
6052 int dst_scalar_p, src_scalar_p;
6054 if (GET_CODE (operands[2]) != CONST_INT
6055 || GET_CODE (operands[3]) != CONST_INT
6056 || INTVAL (operands[2]) > 64
6057 || INTVAL (operands[3]) & 3)
6058 return 0;
6060 st_dst = XEXP (operands[0], 0);
6061 st_src = XEXP (operands[1], 0);
6063 dst_unchanging_p = MEM_READONLY_P (operands[0]);
6064 dst_in_struct_p = MEM_IN_STRUCT_P (operands[0]);
6065 dst_scalar_p = MEM_SCALAR_P (operands[0]);
6066 src_unchanging_p = MEM_READONLY_P (operands[1]);
6067 src_in_struct_p = MEM_IN_STRUCT_P (operands[1]);
6068 src_scalar_p = MEM_SCALAR_P (operands[1]);
6070 fin_dst = dst = copy_to_mode_reg (SImode, st_dst);
6071 fin_src = src = copy_to_mode_reg (SImode, st_src);
6073 in_words_to_go = ARM_NUM_INTS (INTVAL (operands[2]));
6074 out_words_to_go = INTVAL (operands[2]) / 4;
6075 last_bytes = INTVAL (operands[2]) & 3;
6077 if (out_words_to_go != in_words_to_go && ((in_words_to_go - 1) & 3) != 0)
6078 part_bytes_reg = gen_rtx_REG (SImode, (in_words_to_go - 1) & 3);
6080 for (i = 0; in_words_to_go >= 2; i+=4)
6082 if (in_words_to_go > 4)
6083 emit_insn (arm_gen_load_multiple (0, 4, src, TRUE, TRUE,
6084 src_unchanging_p,
6085 src_in_struct_p,
6086 src_scalar_p));
6087 else
6088 emit_insn (arm_gen_load_multiple (0, in_words_to_go, src, TRUE,
6089 FALSE, src_unchanging_p,
6090 src_in_struct_p, src_scalar_p));
6092 if (out_words_to_go)
6094 if (out_words_to_go > 4)
6095 emit_insn (arm_gen_store_multiple (0, 4, dst, TRUE, TRUE,
6096 dst_unchanging_p,
6097 dst_in_struct_p,
6098 dst_scalar_p));
6099 else if (out_words_to_go != 1)
6100 emit_insn (arm_gen_store_multiple (0, out_words_to_go,
6101 dst, TRUE,
6102 (last_bytes == 0
6103 ? FALSE : TRUE),
6104 dst_unchanging_p,
6105 dst_in_struct_p,
6106 dst_scalar_p));
6107 else
6109 mem = gen_rtx_MEM (SImode, dst);
6110 MEM_READONLY_P (mem) = dst_unchanging_p;
6111 MEM_IN_STRUCT_P (mem) = dst_in_struct_p;
6112 MEM_SCALAR_P (mem) = dst_scalar_p;
6113 emit_move_insn (mem, gen_rtx_REG (SImode, 0));
6114 if (last_bytes != 0)
6115 emit_insn (gen_addsi3 (dst, dst, GEN_INT (4)));
6119 in_words_to_go -= in_words_to_go < 4 ? in_words_to_go : 4;
6120 out_words_to_go -= out_words_to_go < 4 ? out_words_to_go : 4;
6123 /* OUT_WORDS_TO_GO will be zero here if there are byte stores to do. */
6124 if (out_words_to_go)
6126 rtx sreg;
6128 mem = gen_rtx_MEM (SImode, src);
6129 MEM_READONLY_P (mem) = src_unchanging_p;
6130 MEM_IN_STRUCT_P (mem) = src_in_struct_p;
6131 MEM_SCALAR_P (mem) = src_scalar_p;
6132 emit_move_insn (sreg = gen_reg_rtx (SImode), mem);
6133 emit_move_insn (fin_src = gen_reg_rtx (SImode), plus_constant (src, 4));
6135 mem = gen_rtx_MEM (SImode, dst);
6136 MEM_READONLY_P (mem) = dst_unchanging_p;
6137 MEM_IN_STRUCT_P (mem) = dst_in_struct_p;
6138 MEM_SCALAR_P (mem) = dst_scalar_p;
6139 emit_move_insn (mem, sreg);
6140 emit_move_insn (fin_dst = gen_reg_rtx (SImode), plus_constant (dst, 4));
6141 in_words_to_go--;
6143 if (in_words_to_go) /* Sanity check */
6144 abort ();
6147 if (in_words_to_go)
6149 if (in_words_to_go < 0)
6150 abort ();
6152 mem = gen_rtx_MEM (SImode, src);
6153 MEM_READONLY_P (mem) = src_unchanging_p;
6154 MEM_IN_STRUCT_P (mem) = src_in_struct_p;
6155 MEM_SCALAR_P (mem) = src_scalar_p;
6156 part_bytes_reg = copy_to_mode_reg (SImode, mem);
6159 if (last_bytes && part_bytes_reg == NULL)
6160 abort ();
6162 if (BYTES_BIG_ENDIAN && last_bytes)
6164 rtx tmp = gen_reg_rtx (SImode);
6166 /* The bytes we want are in the top end of the word. */
6167 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg,
6168 GEN_INT (8 * (4 - last_bytes))));
6169 part_bytes_reg = tmp;
6171 while (last_bytes)
6173 mem = gen_rtx_MEM (QImode, plus_constant (dst, last_bytes - 1));
6174 MEM_READONLY_P (mem) = dst_unchanging_p;
6175 MEM_IN_STRUCT_P (mem) = dst_in_struct_p;
6176 MEM_SCALAR_P (mem) = dst_scalar_p;
6177 emit_move_insn (mem, gen_lowpart (QImode, part_bytes_reg));
6179 if (--last_bytes)
6181 tmp = gen_reg_rtx (SImode);
6182 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (8)));
6183 part_bytes_reg = tmp;
6188 else
6190 if (last_bytes > 1)
6192 mem = gen_rtx_MEM (HImode, dst);
6193 MEM_READONLY_P (mem) = dst_unchanging_p;
6194 MEM_IN_STRUCT_P (mem) = dst_in_struct_p;
6195 MEM_SCALAR_P (mem) = dst_scalar_p;
6196 emit_move_insn (mem, gen_lowpart (HImode, part_bytes_reg));
6197 last_bytes -= 2;
6198 if (last_bytes)
6200 rtx tmp = gen_reg_rtx (SImode);
6202 emit_insn (gen_addsi3 (dst, dst, const2_rtx));
6203 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (16)));
6204 part_bytes_reg = tmp;
6208 if (last_bytes)
6210 mem = gen_rtx_MEM (QImode, dst);
6211 MEM_READONLY_P (mem) = dst_unchanging_p;
6212 MEM_IN_STRUCT_P (mem) = dst_in_struct_p;
6213 MEM_SCALAR_P (mem) = dst_scalar_p;
6214 emit_move_insn (mem, gen_lowpart (QImode, part_bytes_reg));
6218 return 1;
6221 /* Generate a memory reference for a half word, such that it will be loaded
6222 into the top 16 bits of the word. We can assume that the address is
6223 known to be alignable and of the form reg, or plus (reg, const). */
6226 arm_gen_rotated_half_load (rtx memref)
6228 HOST_WIDE_INT offset = 0;
6229 rtx base = XEXP (memref, 0);
6231 if (GET_CODE (base) == PLUS)
6233 offset = INTVAL (XEXP (base, 1));
6234 base = XEXP (base, 0);
6237 /* If we aren't allowed to generate unaligned addresses, then fail. */
6238 if ((BYTES_BIG_ENDIAN ? 1 : 0) ^ ((offset & 2) == 0))
6239 return NULL;
6241 base = gen_rtx_MEM (SImode, plus_constant (base, offset & ~2));
6243 if ((BYTES_BIG_ENDIAN ? 1 : 0) ^ ((offset & 2) == 2))
6244 return base;
6246 return gen_rtx_ROTATE (SImode, base, GEN_INT (16));
6249 /* Select a dominance comparison mode if possible for a test of the general
6250 form (OP (COND_OR (X) (Y)) (const_int 0)). We support three forms.
6251 COND_OR == DOM_CC_X_AND_Y => (X && Y)
6252 COND_OR == DOM_CC_NX_OR_Y => ((! X) || Y)
6253 COND_OR == DOM_CC_X_OR_Y => (X || Y)
6254 In all cases OP will be either EQ or NE, but we don't need to know which
6255 here. If we are unable to support a dominance comparison we return
6256 CC mode. This will then fail to match for the RTL expressions that
6257 generate this call. */
6258 enum machine_mode
6259 arm_select_dominance_cc_mode (rtx x, rtx y, HOST_WIDE_INT cond_or)
6261 enum rtx_code cond1, cond2;
6262 int swapped = 0;
6264 /* Currently we will probably get the wrong result if the individual
6265 comparisons are not simple. This also ensures that it is safe to
6266 reverse a comparison if necessary. */
6267 if ((arm_select_cc_mode (cond1 = GET_CODE (x), XEXP (x, 0), XEXP (x, 1))
6268 != CCmode)
6269 || (arm_select_cc_mode (cond2 = GET_CODE (y), XEXP (y, 0), XEXP (y, 1))
6270 != CCmode))
6271 return CCmode;
6273 /* The if_then_else variant of this tests the second condition if the
6274 first passes, but is true if the first fails. Reverse the first
6275 condition to get a true "inclusive-or" expression. */
6276 if (cond_or == DOM_CC_NX_OR_Y)
6277 cond1 = reverse_condition (cond1);
6279 /* If the comparisons are not equal, and one doesn't dominate the other,
6280 then we can't do this. */
6281 if (cond1 != cond2
6282 && !comparison_dominates_p (cond1, cond2)
6283 && (swapped = 1, !comparison_dominates_p (cond2, cond1)))
6284 return CCmode;
6286 if (swapped)
6288 enum rtx_code temp = cond1;
6289 cond1 = cond2;
6290 cond2 = temp;
6293 switch (cond1)
6295 case EQ:
6296 if (cond2 == EQ || cond_or == DOM_CC_X_AND_Y)
6297 return CC_DEQmode;
6299 switch (cond2)
6301 case LE: return CC_DLEmode;
6302 case LEU: return CC_DLEUmode;
6303 case GE: return CC_DGEmode;
6304 case GEU: return CC_DGEUmode;
6305 default: break;
6308 break;
6310 case LT:
6311 if (cond2 == LT || cond_or == DOM_CC_X_AND_Y)
6312 return CC_DLTmode;
6313 if (cond2 == LE)
6314 return CC_DLEmode;
6315 if (cond2 == NE)
6316 return CC_DNEmode;
6317 break;
6319 case GT:
6320 if (cond2 == GT || cond_or == DOM_CC_X_AND_Y)
6321 return CC_DGTmode;
6322 if (cond2 == GE)
6323 return CC_DGEmode;
6324 if (cond2 == NE)
6325 return CC_DNEmode;
6326 break;
6328 case LTU:
6329 if (cond2 == LTU || cond_or == DOM_CC_X_AND_Y)
6330 return CC_DLTUmode;
6331 if (cond2 == LEU)
6332 return CC_DLEUmode;
6333 if (cond2 == NE)
6334 return CC_DNEmode;
6335 break;
6337 case GTU:
6338 if (cond2 == GTU || cond_or == DOM_CC_X_AND_Y)
6339 return CC_DGTUmode;
6340 if (cond2 == GEU)
6341 return CC_DGEUmode;
6342 if (cond2 == NE)
6343 return CC_DNEmode;
6344 break;
6346 /* The remaining cases only occur when both comparisons are the
6347 same. */
6348 case NE:
6349 return CC_DNEmode;
6351 case LE:
6352 return CC_DLEmode;
6354 case GE:
6355 return CC_DGEmode;
6357 case LEU:
6358 return CC_DLEUmode;
6360 case GEU:
6361 return CC_DGEUmode;
6363 default:
6364 break;
6367 abort ();
6370 enum machine_mode
6371 arm_select_cc_mode (enum rtx_code op, rtx x, rtx y)
6373 /* All floating point compares return CCFP if it is an equality
6374 comparison, and CCFPE otherwise. */
6375 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
6377 switch (op)
6379 case EQ:
6380 case NE:
6381 case UNORDERED:
6382 case ORDERED:
6383 case UNLT:
6384 case UNLE:
6385 case UNGT:
6386 case UNGE:
6387 case UNEQ:
6388 case LTGT:
6389 return CCFPmode;
6391 case LT:
6392 case LE:
6393 case GT:
6394 case GE:
6395 if (TARGET_HARD_FLOAT && TARGET_MAVERICK)
6396 return CCFPmode;
6397 return CCFPEmode;
6399 default:
6400 abort ();
6404 /* A compare with a shifted operand. Because of canonicalization, the
6405 comparison will have to be swapped when we emit the assembler. */
6406 if (GET_MODE (y) == SImode && GET_CODE (y) == REG
6407 && (GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
6408 || GET_CODE (x) == LSHIFTRT || GET_CODE (x) == ROTATE
6409 || GET_CODE (x) == ROTATERT))
6410 return CC_SWPmode;
6412 /* This is a special case that is used by combine to allow a
6413 comparison of a shifted byte load to be split into a zero-extend
6414 followed by a comparison of the shifted integer (only valid for
6415 equalities and unsigned inequalities). */
6416 if (GET_MODE (x) == SImode
6417 && GET_CODE (x) == ASHIFT
6418 && GET_CODE (XEXP (x, 1)) == CONST_INT && INTVAL (XEXP (x, 1)) == 24
6419 && GET_CODE (XEXP (x, 0)) == SUBREG
6420 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == MEM
6421 && GET_MODE (SUBREG_REG (XEXP (x, 0))) == QImode
6422 && (op == EQ || op == NE
6423 || op == GEU || op == GTU || op == LTU || op == LEU)
6424 && GET_CODE (y) == CONST_INT)
6425 return CC_Zmode;
6427 /* A construct for a conditional compare, if the false arm contains
6428 0, then both conditions must be true, otherwise either condition
6429 must be true. Not all conditions are possible, so CCmode is
6430 returned if it can't be done. */
6431 if (GET_CODE (x) == IF_THEN_ELSE
6432 && (XEXP (x, 2) == const0_rtx
6433 || XEXP (x, 2) == const1_rtx)
6434 && COMPARISON_P (XEXP (x, 0))
6435 && COMPARISON_P (XEXP (x, 1)))
6436 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
6437 INTVAL (XEXP (x, 2)));
6439 /* Alternate canonicalizations of the above. These are somewhat cleaner. */
6440 if (GET_CODE (x) == AND
6441 && COMPARISON_P (XEXP (x, 0))
6442 && COMPARISON_P (XEXP (x, 1)))
6443 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
6444 DOM_CC_X_AND_Y);
6446 if (GET_CODE (x) == IOR
6447 && COMPARISON_P (XEXP (x, 0))
6448 && COMPARISON_P (XEXP (x, 1)))
6449 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
6450 DOM_CC_X_OR_Y);
6452 /* An operation (on Thumb) where we want to test for a single bit.
6453 This is done by shifting that bit up into the top bit of a
6454 scratch register; we can then branch on the sign bit. */
6455 if (TARGET_THUMB
6456 && GET_MODE (x) == SImode
6457 && (op == EQ || op == NE)
6458 && (GET_CODE (x) == ZERO_EXTRACT))
6459 return CC_Nmode;
6461 /* An operation that sets the condition codes as a side-effect, the
6462 V flag is not set correctly, so we can only use comparisons where
6463 this doesn't matter. (For LT and GE we can use "mi" and "pl"
6464 instead.) */
6465 if (GET_MODE (x) == SImode
6466 && y == const0_rtx
6467 && (op == EQ || op == NE || op == LT || op == GE)
6468 && (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
6469 || GET_CODE (x) == AND || GET_CODE (x) == IOR
6470 || GET_CODE (x) == XOR || GET_CODE (x) == MULT
6471 || GET_CODE (x) == NOT || GET_CODE (x) == NEG
6472 || GET_CODE (x) == LSHIFTRT
6473 || GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
6474 || GET_CODE (x) == ROTATERT
6475 || (TARGET_ARM && GET_CODE (x) == ZERO_EXTRACT)))
6476 return CC_NOOVmode;
6478 if (GET_MODE (x) == QImode && (op == EQ || op == NE))
6479 return CC_Zmode;
6481 if (GET_MODE (x) == SImode && (op == LTU || op == GEU)
6482 && GET_CODE (x) == PLUS
6483 && (rtx_equal_p (XEXP (x, 0), y) || rtx_equal_p (XEXP (x, 1), y)))
6484 return CC_Cmode;
6486 return CCmode;
6489 /* X and Y are two things to compare using CODE. Emit the compare insn and
6490 return the rtx for register 0 in the proper mode. FP means this is a
6491 floating point compare: I don't think that it is needed on the arm. */
6493 arm_gen_compare_reg (enum rtx_code code, rtx x, rtx y)
6495 enum machine_mode mode = SELECT_CC_MODE (code, x, y);
6496 rtx cc_reg = gen_rtx_REG (mode, CC_REGNUM);
6498 emit_insn (gen_rtx_SET (VOIDmode, cc_reg,
6499 gen_rtx_COMPARE (mode, x, y)));
6501 return cc_reg;
6504 /* Generate a sequence of insns that will generate the correct return
6505 address mask depending on the physical architecture that the program
6506 is running on. */
6508 arm_gen_return_addr_mask (void)
6510 rtx reg = gen_reg_rtx (Pmode);
6512 emit_insn (gen_return_addr_mask (reg));
6513 return reg;
6516 void
6517 arm_reload_in_hi (rtx *operands)
6519 rtx ref = operands[1];
6520 rtx base, scratch;
6521 HOST_WIDE_INT offset = 0;
6523 if (GET_CODE (ref) == SUBREG)
6525 offset = SUBREG_BYTE (ref);
6526 ref = SUBREG_REG (ref);
6529 if (GET_CODE (ref) == REG)
6531 /* We have a pseudo which has been spilt onto the stack; there
6532 are two cases here: the first where there is a simple
6533 stack-slot replacement and a second where the stack-slot is
6534 out of range, or is used as a subreg. */
6535 if (reg_equiv_mem[REGNO (ref)])
6537 ref = reg_equiv_mem[REGNO (ref)];
6538 base = find_replacement (&XEXP (ref, 0));
6540 else
6541 /* The slot is out of range, or was dressed up in a SUBREG. */
6542 base = reg_equiv_address[REGNO (ref)];
6544 else
6545 base = find_replacement (&XEXP (ref, 0));
6547 /* Handle the case where the address is too complex to be offset by 1. */
6548 if (GET_CODE (base) == MINUS
6549 || (GET_CODE (base) == PLUS && GET_CODE (XEXP (base, 1)) != CONST_INT))
6551 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6553 emit_insn (gen_rtx_SET (VOIDmode, base_plus, base));
6554 base = base_plus;
6556 else if (GET_CODE (base) == PLUS)
6558 /* The addend must be CONST_INT, or we would have dealt with it above. */
6559 HOST_WIDE_INT hi, lo;
6561 offset += INTVAL (XEXP (base, 1));
6562 base = XEXP (base, 0);
6564 /* Rework the address into a legal sequence of insns. */
6565 /* Valid range for lo is -4095 -> 4095 */
6566 lo = (offset >= 0
6567 ? (offset & 0xfff)
6568 : -((-offset) & 0xfff));
6570 /* Corner case, if lo is the max offset then we would be out of range
6571 once we have added the additional 1 below, so bump the msb into the
6572 pre-loading insn(s). */
6573 if (lo == 4095)
6574 lo &= 0x7ff;
6576 hi = ((((offset - lo) & (HOST_WIDE_INT) 0xffffffff)
6577 ^ (HOST_WIDE_INT) 0x80000000)
6578 - (HOST_WIDE_INT) 0x80000000);
6580 if (hi + lo != offset)
6581 abort ();
6583 if (hi != 0)
6585 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6587 /* Get the base address; addsi3 knows how to handle constants
6588 that require more than one insn. */
6589 emit_insn (gen_addsi3 (base_plus, base, GEN_INT (hi)));
6590 base = base_plus;
6591 offset = lo;
6595 /* Operands[2] may overlap operands[0] (though it won't overlap
6596 operands[1]), that's why we asked for a DImode reg -- so we can
6597 use the bit that does not overlap. */
6598 if (REGNO (operands[2]) == REGNO (operands[0]))
6599 scratch = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6600 else
6601 scratch = gen_rtx_REG (SImode, REGNO (operands[2]));
6603 emit_insn (gen_zero_extendqisi2 (scratch,
6604 gen_rtx_MEM (QImode,
6605 plus_constant (base,
6606 offset))));
6607 emit_insn (gen_zero_extendqisi2 (gen_rtx_SUBREG (SImode, operands[0], 0),
6608 gen_rtx_MEM (QImode,
6609 plus_constant (base,
6610 offset + 1))));
6611 if (!BYTES_BIG_ENDIAN)
6612 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_SUBREG (SImode, operands[0], 0),
6613 gen_rtx_IOR (SImode,
6614 gen_rtx_ASHIFT
6615 (SImode,
6616 gen_rtx_SUBREG (SImode, operands[0], 0),
6617 GEN_INT (8)),
6618 scratch)));
6619 else
6620 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_SUBREG (SImode, operands[0], 0),
6621 gen_rtx_IOR (SImode,
6622 gen_rtx_ASHIFT (SImode, scratch,
6623 GEN_INT (8)),
6624 gen_rtx_SUBREG (SImode, operands[0],
6625 0))));
6628 /* Handle storing a half-word to memory during reload by synthesizing as two
6629 byte stores. Take care not to clobber the input values until after we
6630 have moved them somewhere safe. This code assumes that if the DImode
6631 scratch in operands[2] overlaps either the input value or output address
6632 in some way, then that value must die in this insn (we absolutely need
6633 two scratch registers for some corner cases). */
6634 void
6635 arm_reload_out_hi (rtx *operands)
6637 rtx ref = operands[0];
6638 rtx outval = operands[1];
6639 rtx base, scratch;
6640 HOST_WIDE_INT offset = 0;
6642 if (GET_CODE (ref) == SUBREG)
6644 offset = SUBREG_BYTE (ref);
6645 ref = SUBREG_REG (ref);
6648 if (GET_CODE (ref) == REG)
6650 /* We have a pseudo which has been spilt onto the stack; there
6651 are two cases here: the first where there is a simple
6652 stack-slot replacement and a second where the stack-slot is
6653 out of range, or is used as a subreg. */
6654 if (reg_equiv_mem[REGNO (ref)])
6656 ref = reg_equiv_mem[REGNO (ref)];
6657 base = find_replacement (&XEXP (ref, 0));
6659 else
6660 /* The slot is out of range, or was dressed up in a SUBREG. */
6661 base = reg_equiv_address[REGNO (ref)];
6663 else
6664 base = find_replacement (&XEXP (ref, 0));
6666 scratch = gen_rtx_REG (SImode, REGNO (operands[2]));
6668 /* Handle the case where the address is too complex to be offset by 1. */
6669 if (GET_CODE (base) == MINUS
6670 || (GET_CODE (base) == PLUS && GET_CODE (XEXP (base, 1)) != CONST_INT))
6672 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6674 /* Be careful not to destroy OUTVAL. */
6675 if (reg_overlap_mentioned_p (base_plus, outval))
6677 /* Updating base_plus might destroy outval, see if we can
6678 swap the scratch and base_plus. */
6679 if (!reg_overlap_mentioned_p (scratch, outval))
6681 rtx tmp = scratch;
6682 scratch = base_plus;
6683 base_plus = tmp;
6685 else
6687 rtx scratch_hi = gen_rtx_REG (HImode, REGNO (operands[2]));
6689 /* Be conservative and copy OUTVAL into the scratch now,
6690 this should only be necessary if outval is a subreg
6691 of something larger than a word. */
6692 /* XXX Might this clobber base? I can't see how it can,
6693 since scratch is known to overlap with OUTVAL, and
6694 must be wider than a word. */
6695 emit_insn (gen_movhi (scratch_hi, outval));
6696 outval = scratch_hi;
6700 emit_insn (gen_rtx_SET (VOIDmode, base_plus, base));
6701 base = base_plus;
6703 else if (GET_CODE (base) == PLUS)
6705 /* The addend must be CONST_INT, or we would have dealt with it above. */
6706 HOST_WIDE_INT hi, lo;
6708 offset += INTVAL (XEXP (base, 1));
6709 base = XEXP (base, 0);
6711 /* Rework the address into a legal sequence of insns. */
6712 /* Valid range for lo is -4095 -> 4095 */
6713 lo = (offset >= 0
6714 ? (offset & 0xfff)
6715 : -((-offset) & 0xfff));
6717 /* Corner case, if lo is the max offset then we would be out of range
6718 once we have added the additional 1 below, so bump the msb into the
6719 pre-loading insn(s). */
6720 if (lo == 4095)
6721 lo &= 0x7ff;
6723 hi = ((((offset - lo) & (HOST_WIDE_INT) 0xffffffff)
6724 ^ (HOST_WIDE_INT) 0x80000000)
6725 - (HOST_WIDE_INT) 0x80000000);
6727 if (hi + lo != offset)
6728 abort ();
6730 if (hi != 0)
6732 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6734 /* Be careful not to destroy OUTVAL. */
6735 if (reg_overlap_mentioned_p (base_plus, outval))
6737 /* Updating base_plus might destroy outval, see if we
6738 can swap the scratch and base_plus. */
6739 if (!reg_overlap_mentioned_p (scratch, outval))
6741 rtx tmp = scratch;
6742 scratch = base_plus;
6743 base_plus = tmp;
6745 else
6747 rtx scratch_hi = gen_rtx_REG (HImode, REGNO (operands[2]));
6749 /* Be conservative and copy outval into scratch now,
6750 this should only be necessary if outval is a
6751 subreg of something larger than a word. */
6752 /* XXX Might this clobber base? I can't see how it
6753 can, since scratch is known to overlap with
6754 outval. */
6755 emit_insn (gen_movhi (scratch_hi, outval));
6756 outval = scratch_hi;
6760 /* Get the base address; addsi3 knows how to handle constants
6761 that require more than one insn. */
6762 emit_insn (gen_addsi3 (base_plus, base, GEN_INT (hi)));
6763 base = base_plus;
6764 offset = lo;
6768 if (BYTES_BIG_ENDIAN)
6770 emit_insn (gen_movqi (gen_rtx_MEM (QImode,
6771 plus_constant (base, offset + 1)),
6772 gen_lowpart (QImode, outval)));
6773 emit_insn (gen_lshrsi3 (scratch,
6774 gen_rtx_SUBREG (SImode, outval, 0),
6775 GEN_INT (8)));
6776 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (base, offset)),
6777 gen_lowpart (QImode, scratch)));
6779 else
6781 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (base, offset)),
6782 gen_lowpart (QImode, outval)));
6783 emit_insn (gen_lshrsi3 (scratch,
6784 gen_rtx_SUBREG (SImode, outval, 0),
6785 GEN_INT (8)));
6786 emit_insn (gen_movqi (gen_rtx_MEM (QImode,
6787 plus_constant (base, offset + 1)),
6788 gen_lowpart (QImode, scratch)));
6792 /* Print a symbolic form of X to the debug file, F. */
6793 static void
6794 arm_print_value (FILE *f, rtx x)
6796 switch (GET_CODE (x))
6798 case CONST_INT:
6799 fprintf (f, HOST_WIDE_INT_PRINT_HEX, INTVAL (x));
6800 return;
6802 case CONST_DOUBLE:
6803 fprintf (f, "<0x%lx,0x%lx>", (long)XWINT (x, 2), (long)XWINT (x, 3));
6804 return;
6806 case CONST_VECTOR:
6808 int i;
6810 fprintf (f, "<");
6811 for (i = 0; i < CONST_VECTOR_NUNITS (x); i++)
6813 fprintf (f, HOST_WIDE_INT_PRINT_HEX, INTVAL (CONST_VECTOR_ELT (x, i)));
6814 if (i < (CONST_VECTOR_NUNITS (x) - 1))
6815 fputc (',', f);
6817 fprintf (f, ">");
6819 return;
6821 case CONST_STRING:
6822 fprintf (f, "\"%s\"", XSTR (x, 0));
6823 return;
6825 case SYMBOL_REF:
6826 fprintf (f, "`%s'", XSTR (x, 0));
6827 return;
6829 case LABEL_REF:
6830 fprintf (f, "L%d", INSN_UID (XEXP (x, 0)));
6831 return;
6833 case CONST:
6834 arm_print_value (f, XEXP (x, 0));
6835 return;
6837 case PLUS:
6838 arm_print_value (f, XEXP (x, 0));
6839 fprintf (f, "+");
6840 arm_print_value (f, XEXP (x, 1));
6841 return;
6843 case PC:
6844 fprintf (f, "pc");
6845 return;
6847 default:
6848 fprintf (f, "????");
6849 return;
6853 /* Routines for manipulation of the constant pool. */
6855 /* Arm instructions cannot load a large constant directly into a
6856 register; they have to come from a pc relative load. The constant
6857 must therefore be placed in the addressable range of the pc
6858 relative load. Depending on the precise pc relative load
6859 instruction the range is somewhere between 256 bytes and 4k. This
6860 means that we often have to dump a constant inside a function, and
6861 generate code to branch around it.
6863 It is important to minimize this, since the branches will slow
6864 things down and make the code larger.
6866 Normally we can hide the table after an existing unconditional
6867 branch so that there is no interruption of the flow, but in the
6868 worst case the code looks like this:
6870 ldr rn, L1
6872 b L2
6873 align
6874 L1: .long value
6878 ldr rn, L3
6880 b L4
6881 align
6882 L3: .long value
6886 We fix this by performing a scan after scheduling, which notices
6887 which instructions need to have their operands fetched from the
6888 constant table and builds the table.
6890 The algorithm starts by building a table of all the constants that
6891 need fixing up and all the natural barriers in the function (places
6892 where a constant table can be dropped without breaking the flow).
6893 For each fixup we note how far the pc-relative replacement will be
6894 able to reach and the offset of the instruction into the function.
6896 Having built the table we then group the fixes together to form
6897 tables that are as large as possible (subject to addressing
6898 constraints) and emit each table of constants after the last
6899 barrier that is within range of all the instructions in the group.
6900 If a group does not contain a barrier, then we forcibly create one
6901 by inserting a jump instruction into the flow. Once the table has
6902 been inserted, the insns are then modified to reference the
6903 relevant entry in the pool.
6905 Possible enhancements to the algorithm (not implemented) are:
6907 1) For some processors and object formats, there may be benefit in
6908 aligning the pools to the start of cache lines; this alignment
6909 would need to be taken into account when calculating addressability
6910 of a pool. */
6912 /* These typedefs are located at the start of this file, so that
6913 they can be used in the prototypes there. This comment is to
6914 remind readers of that fact so that the following structures
6915 can be understood more easily.
6917 typedef struct minipool_node Mnode;
6918 typedef struct minipool_fixup Mfix; */
6920 struct minipool_node
6922 /* Doubly linked chain of entries. */
6923 Mnode * next;
6924 Mnode * prev;
6925 /* The maximum offset into the code that this entry can be placed. While
6926 pushing fixes for forward references, all entries are sorted in order
6927 of increasing max_address. */
6928 HOST_WIDE_INT max_address;
6929 /* Similarly for an entry inserted for a backwards ref. */
6930 HOST_WIDE_INT min_address;
6931 /* The number of fixes referencing this entry. This can become zero
6932 if we "unpush" an entry. In this case we ignore the entry when we
6933 come to emit the code. */
6934 int refcount;
6935 /* The offset from the start of the minipool. */
6936 HOST_WIDE_INT offset;
6937 /* The value in table. */
6938 rtx value;
6939 /* The mode of value. */
6940 enum machine_mode mode;
6941 /* The size of the value. With iWMMXt enabled
6942 sizes > 4 also imply an alignment of 8-bytes. */
6943 int fix_size;
6946 struct minipool_fixup
6948 Mfix * next;
6949 rtx insn;
6950 HOST_WIDE_INT address;
6951 rtx * loc;
6952 enum machine_mode mode;
6953 int fix_size;
6954 rtx value;
6955 Mnode * minipool;
6956 HOST_WIDE_INT forwards;
6957 HOST_WIDE_INT backwards;
6960 /* Fixes less than a word need padding out to a word boundary. */
6961 #define MINIPOOL_FIX_SIZE(mode) \
6962 (GET_MODE_SIZE ((mode)) >= 4 ? GET_MODE_SIZE ((mode)) : 4)
6964 static Mnode * minipool_vector_head;
6965 static Mnode * minipool_vector_tail;
6966 static rtx minipool_vector_label;
6968 /* The linked list of all minipool fixes required for this function. */
6969 Mfix * minipool_fix_head;
6970 Mfix * minipool_fix_tail;
6971 /* The fix entry for the current minipool, once it has been placed. */
6972 Mfix * minipool_barrier;
6974 /* Determines if INSN is the start of a jump table. Returns the end
6975 of the TABLE or NULL_RTX. */
6976 static rtx
6977 is_jump_table (rtx insn)
6979 rtx table;
6981 if (GET_CODE (insn) == JUMP_INSN
6982 && JUMP_LABEL (insn) != NULL
6983 && ((table = next_real_insn (JUMP_LABEL (insn)))
6984 == next_real_insn (insn))
6985 && table != NULL
6986 && GET_CODE (table) == JUMP_INSN
6987 && (GET_CODE (PATTERN (table)) == ADDR_VEC
6988 || GET_CODE (PATTERN (table)) == ADDR_DIFF_VEC))
6989 return table;
6991 return NULL_RTX;
6994 #ifndef JUMP_TABLES_IN_TEXT_SECTION
6995 #define JUMP_TABLES_IN_TEXT_SECTION 0
6996 #endif
6998 static HOST_WIDE_INT
6999 get_jump_table_size (rtx insn)
7001 /* ADDR_VECs only take room if read-only data does into the text
7002 section. */
7003 if (JUMP_TABLES_IN_TEXT_SECTION
7004 #if !defined(READONLY_DATA_SECTION) && !defined(READONLY_DATA_SECTION_ASM_OP)
7005 || 1
7006 #endif
7009 rtx body = PATTERN (insn);
7010 int elt = GET_CODE (body) == ADDR_DIFF_VEC ? 1 : 0;
7012 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, elt);
7015 return 0;
7018 /* Move a minipool fix MP from its current location to before MAX_MP.
7019 If MAX_MP is NULL, then MP doesn't need moving, but the addressing
7020 constraints may need updating. */
7021 static Mnode *
7022 move_minipool_fix_forward_ref (Mnode *mp, Mnode *max_mp,
7023 HOST_WIDE_INT max_address)
7025 /* This should never be true and the code below assumes these are
7026 different. */
7027 if (mp == max_mp)
7028 abort ();
7030 if (max_mp == NULL)
7032 if (max_address < mp->max_address)
7033 mp->max_address = max_address;
7035 else
7037 if (max_address > max_mp->max_address - mp->fix_size)
7038 mp->max_address = max_mp->max_address - mp->fix_size;
7039 else
7040 mp->max_address = max_address;
7042 /* Unlink MP from its current position. Since max_mp is non-null,
7043 mp->prev must be non-null. */
7044 mp->prev->next = mp->next;
7045 if (mp->next != NULL)
7046 mp->next->prev = mp->prev;
7047 else
7048 minipool_vector_tail = mp->prev;
7050 /* Re-insert it before MAX_MP. */
7051 mp->next = max_mp;
7052 mp->prev = max_mp->prev;
7053 max_mp->prev = mp;
7055 if (mp->prev != NULL)
7056 mp->prev->next = mp;
7057 else
7058 minipool_vector_head = mp;
7061 /* Save the new entry. */
7062 max_mp = mp;
7064 /* Scan over the preceding entries and adjust their addresses as
7065 required. */
7066 while (mp->prev != NULL
7067 && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
7069 mp->prev->max_address = mp->max_address - mp->prev->fix_size;
7070 mp = mp->prev;
7073 return max_mp;
7076 /* Add a constant to the minipool for a forward reference. Returns the
7077 node added or NULL if the constant will not fit in this pool. */
7078 static Mnode *
7079 add_minipool_forward_ref (Mfix *fix)
7081 /* If set, max_mp is the first pool_entry that has a lower
7082 constraint than the one we are trying to add. */
7083 Mnode * max_mp = NULL;
7084 HOST_WIDE_INT max_address = fix->address + fix->forwards;
7085 Mnode * mp;
7087 /* If this fix's address is greater than the address of the first
7088 entry, then we can't put the fix in this pool. We subtract the
7089 size of the current fix to ensure that if the table is fully
7090 packed we still have enough room to insert this value by suffling
7091 the other fixes forwards. */
7092 if (minipool_vector_head &&
7093 fix->address >= minipool_vector_head->max_address - fix->fix_size)
7094 return NULL;
7096 /* Scan the pool to see if a constant with the same value has
7097 already been added. While we are doing this, also note the
7098 location where we must insert the constant if it doesn't already
7099 exist. */
7100 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7102 if (GET_CODE (fix->value) == GET_CODE (mp->value)
7103 && fix->mode == mp->mode
7104 && (GET_CODE (fix->value) != CODE_LABEL
7105 || (CODE_LABEL_NUMBER (fix->value)
7106 == CODE_LABEL_NUMBER (mp->value)))
7107 && rtx_equal_p (fix->value, mp->value))
7109 /* More than one fix references this entry. */
7110 mp->refcount++;
7111 return move_minipool_fix_forward_ref (mp, max_mp, max_address);
7114 /* Note the insertion point if necessary. */
7115 if (max_mp == NULL
7116 && mp->max_address > max_address)
7117 max_mp = mp;
7119 /* If we are inserting an 8-bytes aligned quantity and
7120 we have not already found an insertion point, then
7121 make sure that all such 8-byte aligned quantities are
7122 placed at the start of the pool. */
7123 if (ARM_DOUBLEWORD_ALIGN
7124 && max_mp == NULL
7125 && fix->fix_size == 8
7126 && mp->fix_size != 8)
7128 max_mp = mp;
7129 max_address = mp->max_address;
7133 /* The value is not currently in the minipool, so we need to create
7134 a new entry for it. If MAX_MP is NULL, the entry will be put on
7135 the end of the list since the placement is less constrained than
7136 any existing entry. Otherwise, we insert the new fix before
7137 MAX_MP and, if necessary, adjust the constraints on the other
7138 entries. */
7139 mp = xmalloc (sizeof (* mp));
7140 mp->fix_size = fix->fix_size;
7141 mp->mode = fix->mode;
7142 mp->value = fix->value;
7143 mp->refcount = 1;
7144 /* Not yet required for a backwards ref. */
7145 mp->min_address = -65536;
7147 if (max_mp == NULL)
7149 mp->max_address = max_address;
7150 mp->next = NULL;
7151 mp->prev = minipool_vector_tail;
7153 if (mp->prev == NULL)
7155 minipool_vector_head = mp;
7156 minipool_vector_label = gen_label_rtx ();
7158 else
7159 mp->prev->next = mp;
7161 minipool_vector_tail = mp;
7163 else
7165 if (max_address > max_mp->max_address - mp->fix_size)
7166 mp->max_address = max_mp->max_address - mp->fix_size;
7167 else
7168 mp->max_address = max_address;
7170 mp->next = max_mp;
7171 mp->prev = max_mp->prev;
7172 max_mp->prev = mp;
7173 if (mp->prev != NULL)
7174 mp->prev->next = mp;
7175 else
7176 minipool_vector_head = mp;
7179 /* Save the new entry. */
7180 max_mp = mp;
7182 /* Scan over the preceding entries and adjust their addresses as
7183 required. */
7184 while (mp->prev != NULL
7185 && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
7187 mp->prev->max_address = mp->max_address - mp->prev->fix_size;
7188 mp = mp->prev;
7191 return max_mp;
7194 static Mnode *
7195 move_minipool_fix_backward_ref (Mnode *mp, Mnode *min_mp,
7196 HOST_WIDE_INT min_address)
7198 HOST_WIDE_INT offset;
7200 /* This should never be true, and the code below assumes these are
7201 different. */
7202 if (mp == min_mp)
7203 abort ();
7205 if (min_mp == NULL)
7207 if (min_address > mp->min_address)
7208 mp->min_address = min_address;
7210 else
7212 /* We will adjust this below if it is too loose. */
7213 mp->min_address = min_address;
7215 /* Unlink MP from its current position. Since min_mp is non-null,
7216 mp->next must be non-null. */
7217 mp->next->prev = mp->prev;
7218 if (mp->prev != NULL)
7219 mp->prev->next = mp->next;
7220 else
7221 minipool_vector_head = mp->next;
7223 /* Reinsert it after MIN_MP. */
7224 mp->prev = min_mp;
7225 mp->next = min_mp->next;
7226 min_mp->next = mp;
7227 if (mp->next != NULL)
7228 mp->next->prev = mp;
7229 else
7230 minipool_vector_tail = mp;
7233 min_mp = mp;
7235 offset = 0;
7236 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7238 mp->offset = offset;
7239 if (mp->refcount > 0)
7240 offset += mp->fix_size;
7242 if (mp->next && mp->next->min_address < mp->min_address + mp->fix_size)
7243 mp->next->min_address = mp->min_address + mp->fix_size;
7246 return min_mp;
7249 /* Add a constant to the minipool for a backward reference. Returns the
7250 node added or NULL if the constant will not fit in this pool.
7252 Note that the code for insertion for a backwards reference can be
7253 somewhat confusing because the calculated offsets for each fix do
7254 not take into account the size of the pool (which is still under
7255 construction. */
7256 static Mnode *
7257 add_minipool_backward_ref (Mfix *fix)
7259 /* If set, min_mp is the last pool_entry that has a lower constraint
7260 than the one we are trying to add. */
7261 Mnode *min_mp = NULL;
7262 /* This can be negative, since it is only a constraint. */
7263 HOST_WIDE_INT min_address = fix->address - fix->backwards;
7264 Mnode *mp;
7266 /* If we can't reach the current pool from this insn, or if we can't
7267 insert this entry at the end of the pool without pushing other
7268 fixes out of range, then we don't try. This ensures that we
7269 can't fail later on. */
7270 if (min_address >= minipool_barrier->address
7271 || (minipool_vector_tail->min_address + fix->fix_size
7272 >= minipool_barrier->address))
7273 return NULL;
7275 /* Scan the pool to see if a constant with the same value has
7276 already been added. While we are doing this, also note the
7277 location where we must insert the constant if it doesn't already
7278 exist. */
7279 for (mp = minipool_vector_tail; mp != NULL; mp = mp->prev)
7281 if (GET_CODE (fix->value) == GET_CODE (mp->value)
7282 && fix->mode == mp->mode
7283 && (GET_CODE (fix->value) != CODE_LABEL
7284 || (CODE_LABEL_NUMBER (fix->value)
7285 == CODE_LABEL_NUMBER (mp->value)))
7286 && rtx_equal_p (fix->value, mp->value)
7287 /* Check that there is enough slack to move this entry to the
7288 end of the table (this is conservative). */
7289 && (mp->max_address
7290 > (minipool_barrier->address
7291 + minipool_vector_tail->offset
7292 + minipool_vector_tail->fix_size)))
7294 mp->refcount++;
7295 return move_minipool_fix_backward_ref (mp, min_mp, min_address);
7298 if (min_mp != NULL)
7299 mp->min_address += fix->fix_size;
7300 else
7302 /* Note the insertion point if necessary. */
7303 if (mp->min_address < min_address)
7305 /* For now, we do not allow the insertion of 8-byte alignment
7306 requiring nodes anywhere but at the start of the pool. */
7307 if (ARM_DOUBLEWORD_ALIGN
7308 && fix->fix_size == 8 && mp->fix_size != 8)
7309 return NULL;
7310 else
7311 min_mp = mp;
7313 else if (mp->max_address
7314 < minipool_barrier->address + mp->offset + fix->fix_size)
7316 /* Inserting before this entry would push the fix beyond
7317 its maximum address (which can happen if we have
7318 re-located a forwards fix); force the new fix to come
7319 after it. */
7320 min_mp = mp;
7321 min_address = mp->min_address + fix->fix_size;
7323 /* If we are inserting an 8-bytes aligned quantity and
7324 we have not already found an insertion point, then
7325 make sure that all such 8-byte aligned quantities are
7326 placed at the start of the pool. */
7327 else if (ARM_DOUBLEWORD_ALIGN
7328 && min_mp == NULL
7329 && fix->fix_size == 8
7330 && mp->fix_size < 8)
7332 min_mp = mp;
7333 min_address = mp->min_address + fix->fix_size;
7338 /* We need to create a new entry. */
7339 mp = xmalloc (sizeof (* mp));
7340 mp->fix_size = fix->fix_size;
7341 mp->mode = fix->mode;
7342 mp->value = fix->value;
7343 mp->refcount = 1;
7344 mp->max_address = minipool_barrier->address + 65536;
7346 mp->min_address = min_address;
7348 if (min_mp == NULL)
7350 mp->prev = NULL;
7351 mp->next = minipool_vector_head;
7353 if (mp->next == NULL)
7355 minipool_vector_tail = mp;
7356 minipool_vector_label = gen_label_rtx ();
7358 else
7359 mp->next->prev = mp;
7361 minipool_vector_head = mp;
7363 else
7365 mp->next = min_mp->next;
7366 mp->prev = min_mp;
7367 min_mp->next = mp;
7369 if (mp->next != NULL)
7370 mp->next->prev = mp;
7371 else
7372 minipool_vector_tail = mp;
7375 /* Save the new entry. */
7376 min_mp = mp;
7378 if (mp->prev)
7379 mp = mp->prev;
7380 else
7381 mp->offset = 0;
7383 /* Scan over the following entries and adjust their offsets. */
7384 while (mp->next != NULL)
7386 if (mp->next->min_address < mp->min_address + mp->fix_size)
7387 mp->next->min_address = mp->min_address + mp->fix_size;
7389 if (mp->refcount)
7390 mp->next->offset = mp->offset + mp->fix_size;
7391 else
7392 mp->next->offset = mp->offset;
7394 mp = mp->next;
7397 return min_mp;
7400 static void
7401 assign_minipool_offsets (Mfix *barrier)
7403 HOST_WIDE_INT offset = 0;
7404 Mnode *mp;
7406 minipool_barrier = barrier;
7408 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7410 mp->offset = offset;
7412 if (mp->refcount > 0)
7413 offset += mp->fix_size;
7417 /* Output the literal table */
7418 static void
7419 dump_minipool (rtx scan)
7421 Mnode * mp;
7422 Mnode * nmp;
7423 int align64 = 0;
7425 if (ARM_DOUBLEWORD_ALIGN)
7426 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7427 if (mp->refcount > 0 && mp->fix_size == 8)
7429 align64 = 1;
7430 break;
7433 if (dump_file)
7434 fprintf (dump_file,
7435 ";; Emitting minipool after insn %u; address %ld; align %d (bytes)\n",
7436 INSN_UID (scan), (unsigned long) minipool_barrier->address, align64 ? 8 : 4);
7438 scan = emit_label_after (gen_label_rtx (), scan);
7439 scan = emit_insn_after (align64 ? gen_align_8 () : gen_align_4 (), scan);
7440 scan = emit_label_after (minipool_vector_label, scan);
7442 for (mp = minipool_vector_head; mp != NULL; mp = nmp)
7444 if (mp->refcount > 0)
7446 if (dump_file)
7448 fprintf (dump_file,
7449 ";; Offset %u, min %ld, max %ld ",
7450 (unsigned) mp->offset, (unsigned long) mp->min_address,
7451 (unsigned long) mp->max_address);
7452 arm_print_value (dump_file, mp->value);
7453 fputc ('\n', dump_file);
7456 switch (mp->fix_size)
7458 #ifdef HAVE_consttable_1
7459 case 1:
7460 scan = emit_insn_after (gen_consttable_1 (mp->value), scan);
7461 break;
7463 #endif
7464 #ifdef HAVE_consttable_2
7465 case 2:
7466 scan = emit_insn_after (gen_consttable_2 (mp->value), scan);
7467 break;
7469 #endif
7470 #ifdef HAVE_consttable_4
7471 case 4:
7472 scan = emit_insn_after (gen_consttable_4 (mp->value), scan);
7473 break;
7475 #endif
7476 #ifdef HAVE_consttable_8
7477 case 8:
7478 scan = emit_insn_after (gen_consttable_8 (mp->value), scan);
7479 break;
7481 #endif
7482 default:
7483 abort ();
7484 break;
7488 nmp = mp->next;
7489 free (mp);
7492 minipool_vector_head = minipool_vector_tail = NULL;
7493 scan = emit_insn_after (gen_consttable_end (), scan);
7494 scan = emit_barrier_after (scan);
7497 /* Return the cost of forcibly inserting a barrier after INSN. */
7498 static int
7499 arm_barrier_cost (rtx insn)
7501 /* Basing the location of the pool on the loop depth is preferable,
7502 but at the moment, the basic block information seems to be
7503 corrupt by this stage of the compilation. */
7504 int base_cost = 50;
7505 rtx next = next_nonnote_insn (insn);
7507 if (next != NULL && GET_CODE (next) == CODE_LABEL)
7508 base_cost -= 20;
7510 switch (GET_CODE (insn))
7512 case CODE_LABEL:
7513 /* It will always be better to place the table before the label, rather
7514 than after it. */
7515 return 50;
7517 case INSN:
7518 case CALL_INSN:
7519 return base_cost;
7521 case JUMP_INSN:
7522 return base_cost - 10;
7524 default:
7525 return base_cost + 10;
7529 /* Find the best place in the insn stream in the range
7530 (FIX->address,MAX_ADDRESS) to forcibly insert a minipool barrier.
7531 Create the barrier by inserting a jump and add a new fix entry for
7532 it. */
7533 static Mfix *
7534 create_fix_barrier (Mfix *fix, HOST_WIDE_INT max_address)
7536 HOST_WIDE_INT count = 0;
7537 rtx barrier;
7538 rtx from = fix->insn;
7539 rtx selected = from;
7540 int selected_cost;
7541 HOST_WIDE_INT selected_address;
7542 Mfix * new_fix;
7543 HOST_WIDE_INT max_count = max_address - fix->address;
7544 rtx label = gen_label_rtx ();
7546 selected_cost = arm_barrier_cost (from);
7547 selected_address = fix->address;
7549 while (from && count < max_count)
7551 rtx tmp;
7552 int new_cost;
7554 /* This code shouldn't have been called if there was a natural barrier
7555 within range. */
7556 if (GET_CODE (from) == BARRIER)
7557 abort ();
7559 /* Count the length of this insn. */
7560 count += get_attr_length (from);
7562 /* If there is a jump table, add its length. */
7563 tmp = is_jump_table (from);
7564 if (tmp != NULL)
7566 count += get_jump_table_size (tmp);
7568 /* Jump tables aren't in a basic block, so base the cost on
7569 the dispatch insn. If we select this location, we will
7570 still put the pool after the table. */
7571 new_cost = arm_barrier_cost (from);
7573 if (count < max_count && new_cost <= selected_cost)
7575 selected = tmp;
7576 selected_cost = new_cost;
7577 selected_address = fix->address + count;
7580 /* Continue after the dispatch table. */
7581 from = NEXT_INSN (tmp);
7582 continue;
7585 new_cost = arm_barrier_cost (from);
7587 if (count < max_count && new_cost <= selected_cost)
7589 selected = from;
7590 selected_cost = new_cost;
7591 selected_address = fix->address + count;
7594 from = NEXT_INSN (from);
7597 /* Create a new JUMP_INSN that branches around a barrier. */
7598 from = emit_jump_insn_after (gen_jump (label), selected);
7599 JUMP_LABEL (from) = label;
7600 barrier = emit_barrier_after (from);
7601 emit_label_after (label, barrier);
7603 /* Create a minipool barrier entry for the new barrier. */
7604 new_fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* new_fix));
7605 new_fix->insn = barrier;
7606 new_fix->address = selected_address;
7607 new_fix->next = fix->next;
7608 fix->next = new_fix;
7610 return new_fix;
7613 /* Record that there is a natural barrier in the insn stream at
7614 ADDRESS. */
7615 static void
7616 push_minipool_barrier (rtx insn, HOST_WIDE_INT address)
7618 Mfix * fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* fix));
7620 fix->insn = insn;
7621 fix->address = address;
7623 fix->next = NULL;
7624 if (minipool_fix_head != NULL)
7625 minipool_fix_tail->next = fix;
7626 else
7627 minipool_fix_head = fix;
7629 minipool_fix_tail = fix;
7632 /* Record INSN, which will need fixing up to load a value from the
7633 minipool. ADDRESS is the offset of the insn since the start of the
7634 function; LOC is a pointer to the part of the insn which requires
7635 fixing; VALUE is the constant that must be loaded, which is of type
7636 MODE. */
7637 static void
7638 push_minipool_fix (rtx insn, HOST_WIDE_INT address, rtx *loc,
7639 enum machine_mode mode, rtx value)
7641 Mfix * fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* fix));
7643 #ifdef AOF_ASSEMBLER
7644 /* PIC symbol references need to be converted into offsets into the
7645 based area. */
7646 /* XXX This shouldn't be done here. */
7647 if (flag_pic && GET_CODE (value) == SYMBOL_REF)
7648 value = aof_pic_entry (value);
7649 #endif /* AOF_ASSEMBLER */
7651 fix->insn = insn;
7652 fix->address = address;
7653 fix->loc = loc;
7654 fix->mode = mode;
7655 fix->fix_size = MINIPOOL_FIX_SIZE (mode);
7656 fix->value = value;
7657 fix->forwards = get_attr_pool_range (insn);
7658 fix->backwards = get_attr_neg_pool_range (insn);
7659 fix->minipool = NULL;
7661 /* If an insn doesn't have a range defined for it, then it isn't
7662 expecting to be reworked by this code. Better to abort now than
7663 to generate duff assembly code. */
7664 if (fix->forwards == 0 && fix->backwards == 0)
7665 abort ();
7667 /* With AAPCS/iWMMXt enabled, the pool is aligned to an 8-byte boundary.
7668 So there might be an empty word before the start of the pool.
7669 Hence we reduce the forward range by 4 to allow for this
7670 possibility. */
7671 if (ARM_DOUBLEWORD_ALIGN && fix->fix_size == 8)
7672 fix->forwards -= 4;
7674 if (dump_file)
7676 fprintf (dump_file,
7677 ";; %smode fixup for i%d; addr %lu, range (%ld,%ld): ",
7678 GET_MODE_NAME (mode),
7679 INSN_UID (insn), (unsigned long) address,
7680 -1 * (long)fix->backwards, (long)fix->forwards);
7681 arm_print_value (dump_file, fix->value);
7682 fprintf (dump_file, "\n");
7685 /* Add it to the chain of fixes. */
7686 fix->next = NULL;
7688 if (minipool_fix_head != NULL)
7689 minipool_fix_tail->next = fix;
7690 else
7691 minipool_fix_head = fix;
7693 minipool_fix_tail = fix;
7696 /* Scan INSN and note any of its operands that need fixing.
7697 If DO_PUSHES is false we do not actually push any of the fixups
7698 needed. The function returns TRUE is any fixups were needed/pushed.
7699 This is used by arm_memory_load_p() which needs to know about loads
7700 of constants that will be converted into minipool loads. */
7701 static bool
7702 note_invalid_constants (rtx insn, HOST_WIDE_INT address, int do_pushes)
7704 bool result = false;
7705 int opno;
7707 extract_insn (insn);
7709 if (!constrain_operands (1))
7710 fatal_insn_not_found (insn);
7712 if (recog_data.n_alternatives == 0)
7713 return false;
7715 /* Fill in recog_op_alt with information about the constraints of this insn. */
7716 preprocess_constraints ();
7718 for (opno = 0; opno < recog_data.n_operands; opno++)
7720 /* Things we need to fix can only occur in inputs. */
7721 if (recog_data.operand_type[opno] != OP_IN)
7722 continue;
7724 /* If this alternative is a memory reference, then any mention
7725 of constants in this alternative is really to fool reload
7726 into allowing us to accept one there. We need to fix them up
7727 now so that we output the right code. */
7728 if (recog_op_alt[opno][which_alternative].memory_ok)
7730 rtx op = recog_data.operand[opno];
7732 if (CONSTANT_P (op))
7734 if (do_pushes)
7735 push_minipool_fix (insn, address, recog_data.operand_loc[opno],
7736 recog_data.operand_mode[opno], op);
7737 result = true;
7739 else if (GET_CODE (op) == MEM
7740 && GET_CODE (XEXP (op, 0)) == SYMBOL_REF
7741 && CONSTANT_POOL_ADDRESS_P (XEXP (op, 0)))
7743 if (do_pushes)
7745 rtx cop = avoid_constant_pool_reference (op);
7747 /* Casting the address of something to a mode narrower
7748 than a word can cause avoid_constant_pool_reference()
7749 to return the pool reference itself. That's no good to
7750 us here. Lets just hope that we can use the
7751 constant pool value directly. */
7752 if (op == cop)
7753 cop = get_pool_constant (XEXP (op, 0));
7755 push_minipool_fix (insn, address,
7756 recog_data.operand_loc[opno],
7757 recog_data.operand_mode[opno], cop);
7760 result = true;
7765 return result;
7768 /* Gcc puts the pool in the wrong place for ARM, since we can only
7769 load addresses a limited distance around the pc. We do some
7770 special munging to move the constant pool values to the correct
7771 point in the code. */
7772 static void
7773 arm_reorg (void)
7775 rtx insn;
7776 HOST_WIDE_INT address = 0;
7777 Mfix * fix;
7779 minipool_fix_head = minipool_fix_tail = NULL;
7781 /* The first insn must always be a note, or the code below won't
7782 scan it properly. */
7783 insn = get_insns ();
7784 if (GET_CODE (insn) != NOTE)
7785 abort ();
7787 /* Scan all the insns and record the operands that will need fixing. */
7788 for (insn = next_nonnote_insn (insn); insn; insn = next_nonnote_insn (insn))
7790 if (TARGET_CIRRUS_FIX_INVALID_INSNS
7791 && (arm_cirrus_insn_p (insn)
7792 || GET_CODE (insn) == JUMP_INSN
7793 || arm_memory_load_p (insn)))
7794 cirrus_reorg (insn);
7796 if (GET_CODE (insn) == BARRIER)
7797 push_minipool_barrier (insn, address);
7798 else if (INSN_P (insn))
7800 rtx table;
7802 note_invalid_constants (insn, address, true);
7803 address += get_attr_length (insn);
7805 /* If the insn is a vector jump, add the size of the table
7806 and skip the table. */
7807 if ((table = is_jump_table (insn)) != NULL)
7809 address += get_jump_table_size (table);
7810 insn = table;
7815 fix = minipool_fix_head;
7817 /* Now scan the fixups and perform the required changes. */
7818 while (fix)
7820 Mfix * ftmp;
7821 Mfix * fdel;
7822 Mfix * last_added_fix;
7823 Mfix * last_barrier = NULL;
7824 Mfix * this_fix;
7826 /* Skip any further barriers before the next fix. */
7827 while (fix && GET_CODE (fix->insn) == BARRIER)
7828 fix = fix->next;
7830 /* No more fixes. */
7831 if (fix == NULL)
7832 break;
7834 last_added_fix = NULL;
7836 for (ftmp = fix; ftmp; ftmp = ftmp->next)
7838 if (GET_CODE (ftmp->insn) == BARRIER)
7840 if (ftmp->address >= minipool_vector_head->max_address)
7841 break;
7843 last_barrier = ftmp;
7845 else if ((ftmp->minipool = add_minipool_forward_ref (ftmp)) == NULL)
7846 break;
7848 last_added_fix = ftmp; /* Keep track of the last fix added. */
7851 /* If we found a barrier, drop back to that; any fixes that we
7852 could have reached but come after the barrier will now go in
7853 the next mini-pool. */
7854 if (last_barrier != NULL)
7856 /* Reduce the refcount for those fixes that won't go into this
7857 pool after all. */
7858 for (fdel = last_barrier->next;
7859 fdel && fdel != ftmp;
7860 fdel = fdel->next)
7862 fdel->minipool->refcount--;
7863 fdel->minipool = NULL;
7866 ftmp = last_barrier;
7868 else
7870 /* ftmp is first fix that we can't fit into this pool and
7871 there no natural barriers that we could use. Insert a
7872 new barrier in the code somewhere between the previous
7873 fix and this one, and arrange to jump around it. */
7874 HOST_WIDE_INT max_address;
7876 /* The last item on the list of fixes must be a barrier, so
7877 we can never run off the end of the list of fixes without
7878 last_barrier being set. */
7879 if (ftmp == NULL)
7880 abort ();
7882 max_address = minipool_vector_head->max_address;
7883 /* Check that there isn't another fix that is in range that
7884 we couldn't fit into this pool because the pool was
7885 already too large: we need to put the pool before such an
7886 instruction. */
7887 if (ftmp->address < max_address)
7888 max_address = ftmp->address;
7890 last_barrier = create_fix_barrier (last_added_fix, max_address);
7893 assign_minipool_offsets (last_barrier);
7895 while (ftmp)
7897 if (GET_CODE (ftmp->insn) != BARRIER
7898 && ((ftmp->minipool = add_minipool_backward_ref (ftmp))
7899 == NULL))
7900 break;
7902 ftmp = ftmp->next;
7905 /* Scan over the fixes we have identified for this pool, fixing them
7906 up and adding the constants to the pool itself. */
7907 for (this_fix = fix; this_fix && ftmp != this_fix;
7908 this_fix = this_fix->next)
7909 if (GET_CODE (this_fix->insn) != BARRIER)
7911 rtx addr
7912 = plus_constant (gen_rtx_LABEL_REF (VOIDmode,
7913 minipool_vector_label),
7914 this_fix->minipool->offset);
7915 *this_fix->loc = gen_rtx_MEM (this_fix->mode, addr);
7918 dump_minipool (last_barrier->insn);
7919 fix = ftmp;
7922 /* From now on we must synthesize any constants that we can't handle
7923 directly. This can happen if the RTL gets split during final
7924 instruction generation. */
7925 after_arm_reorg = 1;
7927 /* Free the minipool memory. */
7928 obstack_free (&minipool_obstack, minipool_startobj);
7931 /* Routines to output assembly language. */
7933 /* If the rtx is the correct value then return the string of the number.
7934 In this way we can ensure that valid double constants are generated even
7935 when cross compiling. */
7936 const char *
7937 fp_immediate_constant (rtx x)
7939 REAL_VALUE_TYPE r;
7940 int i;
7942 if (!fp_consts_inited)
7943 init_fp_table ();
7945 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
7946 for (i = 0; i < 8; i++)
7947 if (REAL_VALUES_EQUAL (r, values_fp[i]))
7948 return strings_fp[i];
7950 abort ();
7953 /* As for fp_immediate_constant, but value is passed directly, not in rtx. */
7954 static const char *
7955 fp_const_from_val (REAL_VALUE_TYPE *r)
7957 int i;
7959 if (!fp_consts_inited)
7960 init_fp_table ();
7962 for (i = 0; i < 8; i++)
7963 if (REAL_VALUES_EQUAL (*r, values_fp[i]))
7964 return strings_fp[i];
7966 abort ();
7969 /* Output the operands of a LDM/STM instruction to STREAM.
7970 MASK is the ARM register set mask of which only bits 0-15 are important.
7971 REG is the base register, either the frame pointer or the stack pointer,
7972 INSTR is the possibly suffixed load or store instruction. */
7973 static void
7974 print_multi_reg (FILE *stream, const char *instr, int reg, int mask)
7976 int i;
7977 int not_first = FALSE;
7979 fputc ('\t', stream);
7980 asm_fprintf (stream, instr, reg);
7981 fputs (", {", stream);
7983 for (i = 0; i <= LAST_ARM_REGNUM; i++)
7984 if (mask & (1 << i))
7986 if (not_first)
7987 fprintf (stream, ", ");
7989 asm_fprintf (stream, "%r", i);
7990 not_first = TRUE;
7993 fprintf (stream, "}\n");
7997 /* Output a FLDMX instruction to STREAM.
7998 BASE if the register containing the address.
7999 REG and COUNT specify the register range.
8000 Extra registers may be added to avoid hardware bugs. */
8002 static void
8003 arm_output_fldmx (FILE * stream, unsigned int base, int reg, int count)
8005 int i;
8007 /* Workaround ARM10 VFPr1 bug. */
8008 if (count == 2 && !arm_arch6)
8010 if (reg == 15)
8011 reg--;
8012 count++;
8015 fputc ('\t', stream);
8016 asm_fprintf (stream, "fldmfdx\t%r!, {", base);
8018 for (i = reg; i < reg + count; i++)
8020 if (i > reg)
8021 fputs (", ", stream);
8022 asm_fprintf (stream, "d%d", i);
8024 fputs ("}\n", stream);
8029 /* Output the assembly for a store multiple. */
8031 const char *
8032 vfp_output_fstmx (rtx * operands)
8034 char pattern[100];
8035 int p;
8036 int base;
8037 int i;
8039 strcpy (pattern, "fstmfdx\t%m0!, {%P1");
8040 p = strlen (pattern);
8042 if (GET_CODE (operands[1]) != REG)
8043 abort ();
8045 base = (REGNO (operands[1]) - FIRST_VFP_REGNUM) / 2;
8046 for (i = 1; i < XVECLEN (operands[2], 0); i++)
8048 p += sprintf (&pattern[p], ", d%d", base + i);
8050 strcpy (&pattern[p], "}");
8052 output_asm_insn (pattern, operands);
8053 return "";
8057 /* Emit RTL to save block of VFP register pairs to the stack. Returns the
8058 number of bytes pushed. */
8060 static int
8061 vfp_emit_fstmx (int base_reg, int count)
8063 rtx par;
8064 rtx dwarf;
8065 rtx tmp, reg;
8066 int i;
8068 /* Workaround ARM10 VFPr1 bug. Data corruption can occur when exactly two
8069 register pairs are stored by a store multiple insn. We avoid this
8070 by pushing an extra pair. */
8071 if (count == 2 && !arm_arch6)
8073 if (base_reg == LAST_VFP_REGNUM - 3)
8074 base_reg -= 2;
8075 count++;
8078 /* ??? The frame layout is implementation defined. We describe
8079 standard format 1 (equivalent to a FSTMD insn and unused pad word).
8080 We really need some way of representing the whole block so that the
8081 unwinder can figure it out at runtime. */
8082 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
8083 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (count + 1));
8085 reg = gen_rtx_REG (DFmode, base_reg);
8086 base_reg += 2;
8088 XVECEXP (par, 0, 0)
8089 = gen_rtx_SET (VOIDmode,
8090 gen_rtx_MEM (BLKmode,
8091 gen_rtx_PRE_DEC (BLKmode, stack_pointer_rtx)),
8092 gen_rtx_UNSPEC (BLKmode,
8093 gen_rtvec (1, reg),
8094 UNSPEC_PUSH_MULT));
8096 tmp = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8097 gen_rtx_PLUS (SImode, stack_pointer_rtx,
8098 GEN_INT (-(count * 8 + 4))));
8099 RTX_FRAME_RELATED_P (tmp) = 1;
8100 XVECEXP (dwarf, 0, 0) = tmp;
8102 tmp = gen_rtx_SET (VOIDmode,
8103 gen_rtx_MEM (DFmode, stack_pointer_rtx),
8104 reg);
8105 RTX_FRAME_RELATED_P (tmp) = 1;
8106 XVECEXP (dwarf, 0, 1) = tmp;
8108 for (i = 1; i < count; i++)
8110 reg = gen_rtx_REG (DFmode, base_reg);
8111 base_reg += 2;
8112 XVECEXP (par, 0, i) = gen_rtx_USE (VOIDmode, reg);
8114 tmp = gen_rtx_SET (VOIDmode,
8115 gen_rtx_MEM (DFmode,
8116 gen_rtx_PLUS (SImode,
8117 stack_pointer_rtx,
8118 GEN_INT (i * 8))),
8119 reg);
8120 RTX_FRAME_RELATED_P (tmp) = 1;
8121 XVECEXP (dwarf, 0, i + 1) = tmp;
8124 par = emit_insn (par);
8125 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
8126 REG_NOTES (par));
8127 RTX_FRAME_RELATED_P (par) = 1;
8129 return count * 8 + 4;
8133 /* Output a 'call' insn. */
8134 const char *
8135 output_call (rtx *operands)
8137 if (arm_arch5)
8138 abort (); /* Patterns should call blx <reg> directly. */
8140 /* Handle calls to lr using ip (which may be clobbered in subr anyway). */
8141 if (REGNO (operands[0]) == LR_REGNUM)
8143 operands[0] = gen_rtx_REG (SImode, IP_REGNUM);
8144 output_asm_insn ("mov%?\t%0, %|lr", operands);
8147 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
8149 if (TARGET_INTERWORK || arm_arch4t)
8150 output_asm_insn ("bx%?\t%0", operands);
8151 else
8152 output_asm_insn ("mov%?\t%|pc, %0", operands);
8154 return "";
8157 /* Output a 'call' insn that is a reference in memory. */
8158 const char *
8159 output_call_mem (rtx *operands)
8161 if (TARGET_INTERWORK && !arm_arch5)
8163 output_asm_insn ("ldr%?\t%|ip, %0", operands);
8164 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
8165 output_asm_insn ("bx%?\t%|ip", operands);
8167 else if (regno_use_in (LR_REGNUM, operands[0]))
8169 /* LR is used in the memory address. We load the address in the
8170 first instruction. It's safe to use IP as the target of the
8171 load since the call will kill it anyway. */
8172 output_asm_insn ("ldr%?\t%|ip, %0", operands);
8173 if (arm_arch5)
8174 output_asm_insn ("blx%?%|ip", operands);
8175 else
8177 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
8178 if (arm_arch4t)
8179 output_asm_insn ("bx%?\t%|ip", operands);
8180 else
8181 output_asm_insn ("mov%?\t%|pc, %|ip", operands);
8184 else
8186 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
8187 output_asm_insn ("ldr%?\t%|pc, %0", operands);
8190 return "";
8194 /* Output a move from arm registers to an fpa registers.
8195 OPERANDS[0] is an fpa register.
8196 OPERANDS[1] is the first registers of an arm register pair. */
8197 const char *
8198 output_mov_long_double_fpa_from_arm (rtx *operands)
8200 int arm_reg0 = REGNO (operands[1]);
8201 rtx ops[3];
8203 if (arm_reg0 == IP_REGNUM)
8204 abort ();
8206 ops[0] = gen_rtx_REG (SImode, arm_reg0);
8207 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
8208 ops[2] = gen_rtx_REG (SImode, 2 + arm_reg0);
8210 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1, %2}", ops);
8211 output_asm_insn ("ldf%?e\t%0, [%|sp], #12", operands);
8213 return "";
8216 /* Output a move from an fpa register to arm registers.
8217 OPERANDS[0] is the first registers of an arm register pair.
8218 OPERANDS[1] is an fpa register. */
8219 const char *
8220 output_mov_long_double_arm_from_fpa (rtx *operands)
8222 int arm_reg0 = REGNO (operands[0]);
8223 rtx ops[3];
8225 if (arm_reg0 == IP_REGNUM)
8226 abort ();
8228 ops[0] = gen_rtx_REG (SImode, arm_reg0);
8229 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
8230 ops[2] = gen_rtx_REG (SImode, 2 + arm_reg0);
8232 output_asm_insn ("stf%?e\t%1, [%|sp, #-12]!", operands);
8233 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1, %2}", ops);
8234 return "";
8237 /* Output a move from arm registers to arm registers of a long double
8238 OPERANDS[0] is the destination.
8239 OPERANDS[1] is the source. */
8240 const char *
8241 output_mov_long_double_arm_from_arm (rtx *operands)
8243 /* We have to be careful here because the two might overlap. */
8244 int dest_start = REGNO (operands[0]);
8245 int src_start = REGNO (operands[1]);
8246 rtx ops[2];
8247 int i;
8249 if (dest_start < src_start)
8251 for (i = 0; i < 3; i++)
8253 ops[0] = gen_rtx_REG (SImode, dest_start + i);
8254 ops[1] = gen_rtx_REG (SImode, src_start + i);
8255 output_asm_insn ("mov%?\t%0, %1", ops);
8258 else
8260 for (i = 2; i >= 0; i--)
8262 ops[0] = gen_rtx_REG (SImode, dest_start + i);
8263 ops[1] = gen_rtx_REG (SImode, src_start + i);
8264 output_asm_insn ("mov%?\t%0, %1", ops);
8268 return "";
8272 /* Output a move from arm registers to an fpa registers.
8273 OPERANDS[0] is an fpa register.
8274 OPERANDS[1] is the first registers of an arm register pair. */
8275 const char *
8276 output_mov_double_fpa_from_arm (rtx *operands)
8278 int arm_reg0 = REGNO (operands[1]);
8279 rtx ops[2];
8281 if (arm_reg0 == IP_REGNUM)
8282 abort ();
8284 ops[0] = gen_rtx_REG (SImode, arm_reg0);
8285 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
8286 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1}", ops);
8287 output_asm_insn ("ldf%?d\t%0, [%|sp], #8", operands);
8288 return "";
8291 /* Output a move from an fpa register to arm registers.
8292 OPERANDS[0] is the first registers of an arm register pair.
8293 OPERANDS[1] is an fpa register. */
8294 const char *
8295 output_mov_double_arm_from_fpa (rtx *operands)
8297 int arm_reg0 = REGNO (operands[0]);
8298 rtx ops[2];
8300 if (arm_reg0 == IP_REGNUM)
8301 abort ();
8303 ops[0] = gen_rtx_REG (SImode, arm_reg0);
8304 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
8305 output_asm_insn ("stf%?d\t%1, [%|sp, #-8]!", operands);
8306 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1}", ops);
8307 return "";
8310 /* Output a move between double words.
8311 It must be REG<-REG, REG<-CONST_DOUBLE, REG<-CONST_INT, REG<-MEM
8312 or MEM<-REG and all MEMs must be offsettable addresses. */
8313 const char *
8314 output_move_double (rtx *operands)
8316 enum rtx_code code0 = GET_CODE (operands[0]);
8317 enum rtx_code code1 = GET_CODE (operands[1]);
8318 rtx otherops[3];
8320 if (code0 == REG)
8322 int reg0 = REGNO (operands[0]);
8324 otherops[0] = gen_rtx_REG (SImode, 1 + reg0);
8326 if (code1 == REG)
8328 int reg1 = REGNO (operands[1]);
8329 if (reg1 == IP_REGNUM)
8330 abort ();
8332 /* Ensure the second source is not overwritten. */
8333 if (reg1 == reg0 + (WORDS_BIG_ENDIAN ? -1 : 1))
8334 output_asm_insn ("mov%?\t%Q0, %Q1\n\tmov%?\t%R0, %R1", operands);
8335 else
8336 output_asm_insn ("mov%?\t%R0, %R1\n\tmov%?\t%Q0, %Q1", operands);
8338 else if (code1 == CONST_VECTOR)
8340 HOST_WIDE_INT hint = 0;
8342 switch (GET_MODE (operands[1]))
8344 case V2SImode:
8345 otherops[1] = GEN_INT (INTVAL (CONST_VECTOR_ELT (operands[1], 1)));
8346 operands[1] = GEN_INT (INTVAL (CONST_VECTOR_ELT (operands[1], 0)));
8347 break;
8349 case V4HImode:
8350 if (BYTES_BIG_ENDIAN)
8352 hint = INTVAL (CONST_VECTOR_ELT (operands[1], 2));
8353 hint <<= 16;
8354 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 3));
8356 else
8358 hint = INTVAL (CONST_VECTOR_ELT (operands[1], 3));
8359 hint <<= 16;
8360 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 2));
8363 otherops[1] = GEN_INT (hint);
8364 hint = 0;
8366 if (BYTES_BIG_ENDIAN)
8368 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 0));
8369 hint <<= 16;
8370 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 1));
8372 else
8374 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 1));
8375 hint <<= 16;
8376 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 0));
8379 operands[1] = GEN_INT (hint);
8380 break;
8382 case V8QImode:
8383 if (BYTES_BIG_ENDIAN)
8385 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 4));
8386 hint <<= 8;
8387 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 5));
8388 hint <<= 8;
8389 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 6));
8390 hint <<= 8;
8391 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 7));
8393 else
8395 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 7));
8396 hint <<= 8;
8397 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 6));
8398 hint <<= 8;
8399 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 5));
8400 hint <<= 8;
8401 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 4));
8404 otherops[1] = GEN_INT (hint);
8405 hint = 0;
8407 if (BYTES_BIG_ENDIAN)
8409 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 0));
8410 hint <<= 8;
8411 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 1));
8412 hint <<= 8;
8413 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 2));
8414 hint <<= 8;
8415 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 3));
8417 else
8419 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 3));
8420 hint <<= 8;
8421 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 2));
8422 hint <<= 8;
8423 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 1));
8424 hint <<= 8;
8425 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 0));
8428 operands[1] = GEN_INT (hint);
8429 break;
8431 default:
8432 abort ();
8434 output_mov_immediate (operands);
8435 output_mov_immediate (otherops);
8437 else if (code1 == CONST_DOUBLE)
8439 if (GET_MODE (operands[1]) == DFmode)
8441 REAL_VALUE_TYPE r;
8442 long l[2];
8444 REAL_VALUE_FROM_CONST_DOUBLE (r, operands[1]);
8445 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
8446 otherops[1] = GEN_INT (l[1]);
8447 operands[1] = GEN_INT (l[0]);
8449 else if (GET_MODE (operands[1]) != VOIDmode)
8450 abort ();
8451 else if (WORDS_BIG_ENDIAN)
8453 otherops[1] = GEN_INT (CONST_DOUBLE_LOW (operands[1]));
8454 operands[1] = GEN_INT (CONST_DOUBLE_HIGH (operands[1]));
8456 else
8458 otherops[1] = GEN_INT (CONST_DOUBLE_HIGH (operands[1]));
8459 operands[1] = GEN_INT (CONST_DOUBLE_LOW (operands[1]));
8462 output_mov_immediate (operands);
8463 output_mov_immediate (otherops);
8465 else if (code1 == CONST_INT)
8467 #if HOST_BITS_PER_WIDE_INT > 32
8468 /* If HOST_WIDE_INT is more than 32 bits, the intval tells us
8469 what the upper word is. */
8470 if (WORDS_BIG_ENDIAN)
8472 otherops[1] = GEN_INT (ARM_SIGN_EXTEND (INTVAL (operands[1])));
8473 operands[1] = GEN_INT (INTVAL (operands[1]) >> 32);
8475 else
8477 otherops[1] = GEN_INT (INTVAL (operands[1]) >> 32);
8478 operands[1] = GEN_INT (ARM_SIGN_EXTEND (INTVAL (operands[1])));
8480 #else
8481 /* Sign extend the intval into the high-order word. */
8482 if (WORDS_BIG_ENDIAN)
8484 otherops[1] = operands[1];
8485 operands[1] = (INTVAL (operands[1]) < 0
8486 ? constm1_rtx : const0_rtx);
8488 else
8489 otherops[1] = INTVAL (operands[1]) < 0 ? constm1_rtx : const0_rtx;
8490 #endif
8491 output_mov_immediate (otherops);
8492 output_mov_immediate (operands);
8494 else if (code1 == MEM)
8496 switch (GET_CODE (XEXP (operands[1], 0)))
8498 case REG:
8499 output_asm_insn ("ldm%?ia\t%m1, %M0", operands);
8500 break;
8502 case PRE_INC:
8503 if (!TARGET_LDRD)
8504 abort (); /* Should never happen now. */
8505 output_asm_insn ("ldr%?d\t%0, [%m1, #8]!", operands);
8506 break;
8508 case PRE_DEC:
8509 output_asm_insn ("ldm%?db\t%m1!, %M0", operands);
8510 break;
8512 case POST_INC:
8513 output_asm_insn ("ldm%?ia\t%m1!, %M0", operands);
8514 break;
8516 case POST_DEC:
8517 if (!TARGET_LDRD)
8518 abort (); /* Should never happen now. */
8519 output_asm_insn ("ldr%?d\t%0, [%m1], #-8", operands);
8520 break;
8522 case PRE_MODIFY:
8523 case POST_MODIFY:
8524 otherops[0] = operands[0];
8525 otherops[1] = XEXP (XEXP (XEXP (operands[1], 0), 1), 0);
8526 otherops[2] = XEXP (XEXP (XEXP (operands[1], 0), 1), 1);
8528 if (GET_CODE (XEXP (operands[1], 0)) == PRE_MODIFY)
8530 if (reg_overlap_mentioned_p (otherops[0], otherops[2]))
8532 /* Registers overlap so split out the increment. */
8533 output_asm_insn ("add%?\t%1, %1, %2", otherops);
8534 output_asm_insn ("ldr%?d\t%0, [%1] @split", otherops);
8536 else
8537 output_asm_insn ("ldr%?d\t%0, [%1, %2]!", otherops);
8539 else
8541 /* We only allow constant increments, so this is safe. */
8542 output_asm_insn ("ldr%?d\t%0, [%1], %2", otherops);
8544 break;
8546 case LABEL_REF:
8547 case CONST:
8548 output_asm_insn ("adr%?\t%0, %1", operands);
8549 output_asm_insn ("ldm%?ia\t%0, %M0", operands);
8550 break;
8552 default:
8553 if (arm_add_operand (XEXP (XEXP (operands[1], 0), 1),
8554 GET_MODE (XEXP (XEXP (operands[1], 0), 1))))
8556 otherops[0] = operands[0];
8557 otherops[1] = XEXP (XEXP (operands[1], 0), 0);
8558 otherops[2] = XEXP (XEXP (operands[1], 0), 1);
8560 if (GET_CODE (XEXP (operands[1], 0)) == PLUS)
8562 if (GET_CODE (otherops[2]) == CONST_INT)
8564 switch ((int) INTVAL (otherops[2]))
8566 case -8:
8567 output_asm_insn ("ldm%?db\t%1, %M0", otherops);
8568 return "";
8569 case -4:
8570 output_asm_insn ("ldm%?da\t%1, %M0", otherops);
8571 return "";
8572 case 4:
8573 output_asm_insn ("ldm%?ib\t%1, %M0", otherops);
8574 return "";
8577 if (TARGET_LDRD
8578 && (GET_CODE (otherops[2]) == REG
8579 || (GET_CODE (otherops[2]) == CONST_INT
8580 && INTVAL (otherops[2]) > -256
8581 && INTVAL (otherops[2]) < 256)))
8583 if (reg_overlap_mentioned_p (otherops[0],
8584 otherops[2]))
8586 /* Swap base and index registers over to
8587 avoid a conflict. */
8588 otherops[1] = XEXP (XEXP (operands[1], 0), 1);
8589 otherops[2] = XEXP (XEXP (operands[1], 0), 0);
8592 /* If both registers conflict, it will usually
8593 have been fixed by a splitter. */
8594 if (reg_overlap_mentioned_p (otherops[0],
8595 otherops[2]))
8597 output_asm_insn ("add%?\t%1, %1, %2", otherops);
8598 output_asm_insn ("ldr%?d\t%0, [%1]",
8599 otherops);
8600 return "";
8602 else
8604 output_asm_insn ("ldr%?d\t%0, [%1, %2]",
8605 otherops);
8606 return "";
8609 if (GET_CODE (otherops[2]) == CONST_INT)
8611 if (!(const_ok_for_arm (INTVAL (otherops[2]))))
8612 output_asm_insn ("sub%?\t%0, %1, #%n2", otherops);
8613 else
8614 output_asm_insn ("add%?\t%0, %1, %2", otherops);
8616 else
8617 output_asm_insn ("add%?\t%0, %1, %2", otherops);
8619 else
8620 output_asm_insn ("sub%?\t%0, %1, %2", otherops);
8622 return "ldm%?ia\t%0, %M0";
8624 else
8626 otherops[1] = adjust_address (operands[1], SImode, 4);
8627 /* Take care of overlapping base/data reg. */
8628 if (reg_mentioned_p (operands[0], operands[1]))
8630 output_asm_insn ("ldr%?\t%0, %1", otherops);
8631 output_asm_insn ("ldr%?\t%0, %1", operands);
8633 else
8635 output_asm_insn ("ldr%?\t%0, %1", operands);
8636 output_asm_insn ("ldr%?\t%0, %1", otherops);
8641 else
8642 abort (); /* Constraints should prevent this. */
8644 else if (code0 == MEM && code1 == REG)
8646 if (REGNO (operands[1]) == IP_REGNUM)
8647 abort ();
8649 switch (GET_CODE (XEXP (operands[0], 0)))
8651 case REG:
8652 output_asm_insn ("stm%?ia\t%m0, %M1", operands);
8653 break;
8655 case PRE_INC:
8656 if (!TARGET_LDRD)
8657 abort (); /* Should never happen now. */
8658 output_asm_insn ("str%?d\t%1, [%m0, #8]!", operands);
8659 break;
8661 case PRE_DEC:
8662 output_asm_insn ("stm%?db\t%m0!, %M1", operands);
8663 break;
8665 case POST_INC:
8666 output_asm_insn ("stm%?ia\t%m0!, %M1", operands);
8667 break;
8669 case POST_DEC:
8670 if (!TARGET_LDRD)
8671 abort (); /* Should never happen now. */
8672 output_asm_insn ("str%?d\t%1, [%m0], #-8", operands);
8673 break;
8675 case PRE_MODIFY:
8676 case POST_MODIFY:
8677 otherops[0] = operands[1];
8678 otherops[1] = XEXP (XEXP (XEXP (operands[0], 0), 1), 0);
8679 otherops[2] = XEXP (XEXP (XEXP (operands[0], 0), 1), 1);
8681 if (GET_CODE (XEXP (operands[0], 0)) == PRE_MODIFY)
8682 output_asm_insn ("str%?d\t%0, [%1, %2]!", otherops);
8683 else
8684 output_asm_insn ("str%?d\t%0, [%1], %2", otherops);
8685 break;
8687 case PLUS:
8688 otherops[2] = XEXP (XEXP (operands[0], 0), 1);
8689 if (GET_CODE (otherops[2]) == CONST_INT)
8691 switch ((int) INTVAL (XEXP (XEXP (operands[0], 0), 1)))
8693 case -8:
8694 output_asm_insn ("stm%?db\t%m0, %M1", operands);
8695 return "";
8697 case -4:
8698 output_asm_insn ("stm%?da\t%m0, %M1", operands);
8699 return "";
8701 case 4:
8702 output_asm_insn ("stm%?ib\t%m0, %M1", operands);
8703 return "";
8706 if (TARGET_LDRD
8707 && (GET_CODE (otherops[2]) == REG
8708 || (GET_CODE (otherops[2]) == CONST_INT
8709 && INTVAL (otherops[2]) > -256
8710 && INTVAL (otherops[2]) < 256)))
8712 otherops[0] = operands[1];
8713 otherops[1] = XEXP (XEXP (operands[0], 0), 0);
8714 output_asm_insn ("str%?d\t%0, [%1, %2]", otherops);
8715 return "";
8717 /* Fall through */
8719 default:
8720 otherops[0] = adjust_address (operands[0], SImode, 4);
8721 otherops[1] = gen_rtx_REG (SImode, 1 + REGNO (operands[1]));
8722 output_asm_insn ("str%?\t%1, %0", operands);
8723 output_asm_insn ("str%?\t%1, %0", otherops);
8726 else
8727 /* Constraints should prevent this. */
8728 abort ();
8730 return "";
8734 /* Output an arbitrary MOV reg, #n.
8735 OPERANDS[0] is a register. OPERANDS[1] is a const_int. */
8736 const char *
8737 output_mov_immediate (rtx *operands)
8739 HOST_WIDE_INT n = INTVAL (operands[1]);
8741 /* Try to use one MOV. */
8742 if (const_ok_for_arm (n))
8743 output_asm_insn ("mov%?\t%0, %1", operands);
8745 /* Try to use one MVN. */
8746 else if (const_ok_for_arm (~n))
8748 operands[1] = GEN_INT (~n);
8749 output_asm_insn ("mvn%?\t%0, %1", operands);
8751 else
8753 int n_ones = 0;
8754 int i;
8756 /* If all else fails, make it out of ORRs or BICs as appropriate. */
8757 for (i = 0; i < 32; i++)
8758 if (n & 1 << i)
8759 n_ones++;
8761 if (n_ones > 16) /* Shorter to use MVN with BIC in this case. */
8762 output_multi_immediate (operands, "mvn%?\t%0, %1", "bic%?\t%0, %0, %1", 1, ~ n);
8763 else
8764 output_multi_immediate (operands, "mov%?\t%0, %1", "orr%?\t%0, %0, %1", 1, n);
8767 return "";
8770 /* Output an ADD r, s, #n where n may be too big for one instruction.
8771 If adding zero to one register, output nothing. */
8772 const char *
8773 output_add_immediate (rtx *operands)
8775 HOST_WIDE_INT n = INTVAL (operands[2]);
8777 if (n != 0 || REGNO (operands[0]) != REGNO (operands[1]))
8779 if (n < 0)
8780 output_multi_immediate (operands,
8781 "sub%?\t%0, %1, %2", "sub%?\t%0, %0, %2", 2,
8782 -n);
8783 else
8784 output_multi_immediate (operands,
8785 "add%?\t%0, %1, %2", "add%?\t%0, %0, %2", 2,
8789 return "";
8792 /* Output a multiple immediate operation.
8793 OPERANDS is the vector of operands referred to in the output patterns.
8794 INSTR1 is the output pattern to use for the first constant.
8795 INSTR2 is the output pattern to use for subsequent constants.
8796 IMMED_OP is the index of the constant slot in OPERANDS.
8797 N is the constant value. */
8798 static const char *
8799 output_multi_immediate (rtx *operands, const char *instr1, const char *instr2,
8800 int immed_op, HOST_WIDE_INT n)
8802 #if HOST_BITS_PER_WIDE_INT > 32
8803 n &= 0xffffffff;
8804 #endif
8806 if (n == 0)
8808 /* Quick and easy output. */
8809 operands[immed_op] = const0_rtx;
8810 output_asm_insn (instr1, operands);
8812 else
8814 int i;
8815 const char * instr = instr1;
8817 /* Note that n is never zero here (which would give no output). */
8818 for (i = 0; i < 32; i += 2)
8820 if (n & (3 << i))
8822 operands[immed_op] = GEN_INT (n & (255 << i));
8823 output_asm_insn (instr, operands);
8824 instr = instr2;
8825 i += 6;
8830 return "";
8833 /* Return the appropriate ARM instruction for the operation code.
8834 The returned result should not be overwritten. OP is the rtx of the
8835 operation. SHIFT_FIRST_ARG is TRUE if the first argument of the operator
8836 was shifted. */
8837 const char *
8838 arithmetic_instr (rtx op, int shift_first_arg)
8840 switch (GET_CODE (op))
8842 case PLUS:
8843 return "add";
8845 case MINUS:
8846 return shift_first_arg ? "rsb" : "sub";
8848 case IOR:
8849 return "orr";
8851 case XOR:
8852 return "eor";
8854 case AND:
8855 return "and";
8857 default:
8858 abort ();
8862 /* Ensure valid constant shifts and return the appropriate shift mnemonic
8863 for the operation code. The returned result should not be overwritten.
8864 OP is the rtx code of the shift.
8865 On exit, *AMOUNTP will be -1 if the shift is by a register, or a constant
8866 shift. */
8867 static const char *
8868 shift_op (rtx op, HOST_WIDE_INT *amountp)
8870 const char * mnem;
8871 enum rtx_code code = GET_CODE (op);
8873 if (GET_CODE (XEXP (op, 1)) == REG || GET_CODE (XEXP (op, 1)) == SUBREG)
8874 *amountp = -1;
8875 else if (GET_CODE (XEXP (op, 1)) == CONST_INT)
8876 *amountp = INTVAL (XEXP (op, 1));
8877 else
8878 abort ();
8880 switch (code)
8882 case ASHIFT:
8883 mnem = "asl";
8884 break;
8886 case ASHIFTRT:
8887 mnem = "asr";
8888 break;
8890 case LSHIFTRT:
8891 mnem = "lsr";
8892 break;
8894 case ROTATERT:
8895 mnem = "ror";
8896 break;
8898 case MULT:
8899 /* We never have to worry about the amount being other than a
8900 power of 2, since this case can never be reloaded from a reg. */
8901 if (*amountp != -1)
8902 *amountp = int_log2 (*amountp);
8903 else
8904 abort ();
8905 return "asl";
8907 default:
8908 abort ();
8911 if (*amountp != -1)
8913 /* This is not 100% correct, but follows from the desire to merge
8914 multiplication by a power of 2 with the recognizer for a
8915 shift. >=32 is not a valid shift for "asl", so we must try and
8916 output a shift that produces the correct arithmetical result.
8917 Using lsr #32 is identical except for the fact that the carry bit
8918 is not set correctly if we set the flags; but we never use the
8919 carry bit from such an operation, so we can ignore that. */
8920 if (code == ROTATERT)
8921 /* Rotate is just modulo 32. */
8922 *amountp &= 31;
8923 else if (*amountp != (*amountp & 31))
8925 if (code == ASHIFT)
8926 mnem = "lsr";
8927 *amountp = 32;
8930 /* Shifts of 0 are no-ops. */
8931 if (*amountp == 0)
8932 return NULL;
8935 return mnem;
8938 /* Obtain the shift from the POWER of two. */
8940 static HOST_WIDE_INT
8941 int_log2 (HOST_WIDE_INT power)
8943 HOST_WIDE_INT shift = 0;
8945 while ((((HOST_WIDE_INT) 1 << shift) & power) == 0)
8947 if (shift > 31)
8948 abort ();
8949 shift++;
8952 return shift;
8955 /* Output a .ascii pseudo-op, keeping track of lengths. This is because
8956 /bin/as is horribly restrictive. */
8957 #define MAX_ASCII_LEN 51
8959 void
8960 output_ascii_pseudo_op (FILE *stream, const unsigned char *p, int len)
8962 int i;
8963 int len_so_far = 0;
8965 fputs ("\t.ascii\t\"", stream);
8967 for (i = 0; i < len; i++)
8969 int c = p[i];
8971 if (len_so_far >= MAX_ASCII_LEN)
8973 fputs ("\"\n\t.ascii\t\"", stream);
8974 len_so_far = 0;
8977 switch (c)
8979 case TARGET_TAB:
8980 fputs ("\\t", stream);
8981 len_so_far += 2;
8982 break;
8984 case TARGET_FF:
8985 fputs ("\\f", stream);
8986 len_so_far += 2;
8987 break;
8989 case TARGET_BS:
8990 fputs ("\\b", stream);
8991 len_so_far += 2;
8992 break;
8994 case TARGET_CR:
8995 fputs ("\\r", stream);
8996 len_so_far += 2;
8997 break;
8999 case TARGET_NEWLINE:
9000 fputs ("\\n", stream);
9001 c = p [i + 1];
9002 if ((c >= ' ' && c <= '~')
9003 || c == TARGET_TAB)
9004 /* This is a good place for a line break. */
9005 len_so_far = MAX_ASCII_LEN;
9006 else
9007 len_so_far += 2;
9008 break;
9010 case '\"':
9011 case '\\':
9012 putc ('\\', stream);
9013 len_so_far++;
9014 /* Drop through. */
9016 default:
9017 if (c >= ' ' && c <= '~')
9019 putc (c, stream);
9020 len_so_far++;
9022 else
9024 fprintf (stream, "\\%03o", c);
9025 len_so_far += 4;
9027 break;
9031 fputs ("\"\n", stream);
9034 /* Compute the register save mask for registers 0 through 12
9035 inclusive. This code is used by arm_compute_save_reg_mask. */
9036 static unsigned long
9037 arm_compute_save_reg0_reg12_mask (void)
9039 unsigned long func_type = arm_current_func_type ();
9040 unsigned int save_reg_mask = 0;
9041 unsigned int reg;
9043 if (IS_INTERRUPT (func_type))
9045 unsigned int max_reg;
9046 /* Interrupt functions must not corrupt any registers,
9047 even call clobbered ones. If this is a leaf function
9048 we can just examine the registers used by the RTL, but
9049 otherwise we have to assume that whatever function is
9050 called might clobber anything, and so we have to save
9051 all the call-clobbered registers as well. */
9052 if (ARM_FUNC_TYPE (func_type) == ARM_FT_FIQ)
9053 /* FIQ handlers have registers r8 - r12 banked, so
9054 we only need to check r0 - r7, Normal ISRs only
9055 bank r14 and r15, so we must check up to r12.
9056 r13 is the stack pointer which is always preserved,
9057 so we do not need to consider it here. */
9058 max_reg = 7;
9059 else
9060 max_reg = 12;
9062 for (reg = 0; reg <= max_reg; reg++)
9063 if (regs_ever_live[reg]
9064 || (! current_function_is_leaf && call_used_regs [reg]))
9065 save_reg_mask |= (1 << reg);
9067 else
9069 /* In the normal case we only need to save those registers
9070 which are call saved and which are used by this function. */
9071 for (reg = 0; reg <= 10; reg++)
9072 if (regs_ever_live[reg] && ! call_used_regs [reg])
9073 save_reg_mask |= (1 << reg);
9075 /* Handle the frame pointer as a special case. */
9076 if (! TARGET_APCS_FRAME
9077 && ! frame_pointer_needed
9078 && regs_ever_live[HARD_FRAME_POINTER_REGNUM]
9079 && ! call_used_regs[HARD_FRAME_POINTER_REGNUM])
9080 save_reg_mask |= 1 << HARD_FRAME_POINTER_REGNUM;
9082 /* If we aren't loading the PIC register,
9083 don't stack it even though it may be live. */
9084 if (flag_pic
9085 && ! TARGET_SINGLE_PIC_BASE
9086 && regs_ever_live[PIC_OFFSET_TABLE_REGNUM])
9087 save_reg_mask |= 1 << PIC_OFFSET_TABLE_REGNUM;
9090 /* Save registers so the exception handler can modify them. */
9091 if (current_function_calls_eh_return)
9093 unsigned int i;
9095 for (i = 0; ; i++)
9097 reg = EH_RETURN_DATA_REGNO (i);
9098 if (reg == INVALID_REGNUM)
9099 break;
9100 save_reg_mask |= 1 << reg;
9104 return save_reg_mask;
9107 /* Compute a bit mask of which registers need to be
9108 saved on the stack for the current function. */
9110 static unsigned long
9111 arm_compute_save_reg_mask (void)
9113 unsigned int save_reg_mask = 0;
9114 unsigned long func_type = arm_current_func_type ();
9116 if (IS_NAKED (func_type))
9117 /* This should never really happen. */
9118 return 0;
9120 /* If we are creating a stack frame, then we must save the frame pointer,
9121 IP (which will hold the old stack pointer), LR and the PC. */
9122 if (frame_pointer_needed)
9123 save_reg_mask |=
9124 (1 << ARM_HARD_FRAME_POINTER_REGNUM)
9125 | (1 << IP_REGNUM)
9126 | (1 << LR_REGNUM)
9127 | (1 << PC_REGNUM);
9129 /* Volatile functions do not return, so there
9130 is no need to save any other registers. */
9131 if (IS_VOLATILE (func_type))
9132 return save_reg_mask;
9134 save_reg_mask |= arm_compute_save_reg0_reg12_mask ();
9136 /* Decide if we need to save the link register.
9137 Interrupt routines have their own banked link register,
9138 so they never need to save it.
9139 Otherwise if we do not use the link register we do not need to save
9140 it. If we are pushing other registers onto the stack however, we
9141 can save an instruction in the epilogue by pushing the link register
9142 now and then popping it back into the PC. This incurs extra memory
9143 accesses though, so we only do it when optimizing for size, and only
9144 if we know that we will not need a fancy return sequence. */
9145 if (regs_ever_live [LR_REGNUM]
9146 || (save_reg_mask
9147 && optimize_size
9148 && ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
9149 && !current_function_calls_eh_return))
9150 save_reg_mask |= 1 << LR_REGNUM;
9152 if (cfun->machine->lr_save_eliminated)
9153 save_reg_mask &= ~ (1 << LR_REGNUM);
9155 if (TARGET_REALLY_IWMMXT
9156 && ((bit_count (save_reg_mask)
9157 + ARM_NUM_INTS (current_function_pretend_args_size)) % 2) != 0)
9159 unsigned int reg;
9161 /* The total number of registers that are going to be pushed
9162 onto the stack is odd. We need to ensure that the stack
9163 is 64-bit aligned before we start to save iWMMXt registers,
9164 and also before we start to create locals. (A local variable
9165 might be a double or long long which we will load/store using
9166 an iWMMXt instruction). Therefore we need to push another
9167 ARM register, so that the stack will be 64-bit aligned. We
9168 try to avoid using the arg registers (r0 -r3) as they might be
9169 used to pass values in a tail call. */
9170 for (reg = 4; reg <= 12; reg++)
9171 if ((save_reg_mask & (1 << reg)) == 0)
9172 break;
9174 if (reg <= 12)
9175 save_reg_mask |= (1 << reg);
9176 else
9178 cfun->machine->sibcall_blocked = 1;
9179 save_reg_mask |= (1 << 3);
9183 return save_reg_mask;
9187 /* Compute a bit mask of which registers need to be
9188 saved on the stack for the current function. */
9189 static unsigned long
9190 thumb_compute_save_reg_mask (void)
9192 unsigned long mask;
9193 int reg;
9195 mask = 0;
9196 for (reg = 0; reg < 12; reg ++)
9198 if (regs_ever_live[reg] && !call_used_regs[reg])
9199 mask |= 1 << reg;
9202 if (flag_pic && !TARGET_SINGLE_PIC_BASE)
9203 mask |= PIC_OFFSET_TABLE_REGNUM;
9204 if (TARGET_SINGLE_PIC_BASE)
9205 mask &= ~(1 << arm_pic_register);
9207 /* lr will also be pushed if any lo regs are pushed. */
9208 if (mask & 0xff || thumb_force_lr_save ())
9209 mask |= (1 << LR_REGNUM);
9211 /* Make sure we have a low work register if we need one. */
9212 if (((mask & 0xff) == 0 && regs_ever_live[LAST_ARG_REGNUM])
9213 && ((mask & 0x0f00) || TARGET_BACKTRACE))
9214 mask |= 1 << LAST_LO_REGNUM;
9216 return mask;
9220 /* Return the number of bytes required to save VFP registers. */
9221 static int
9222 arm_get_vfp_saved_size (void)
9224 unsigned int regno;
9225 int count;
9226 int saved;
9228 saved = 0;
9229 /* Space for saved VFP registers. */
9230 if (TARGET_HARD_FLOAT && TARGET_VFP)
9232 count = 0;
9233 for (regno = FIRST_VFP_REGNUM;
9234 regno < LAST_VFP_REGNUM;
9235 regno += 2)
9237 if ((!regs_ever_live[regno] || call_used_regs[regno])
9238 && (!regs_ever_live[regno + 1] || call_used_regs[regno + 1]))
9240 if (count > 0)
9242 /* Workaround ARM10 VFPr1 bug. */
9243 if (count == 2 && !arm_arch6)
9244 count++;
9245 saved += count * 8 + 4;
9247 count = 0;
9249 else
9250 count++;
9252 if (count > 0)
9254 if (count == 2 && !arm_arch6)
9255 count++;
9256 saved += count * 8 + 4;
9259 return saved;
9263 /* Generate a function exit sequence. If REALLY_RETURN is false, then do
9264 everything bar the final return instruction. */
9265 const char *
9266 output_return_instruction (rtx operand, int really_return, int reverse)
9268 char conditional[10];
9269 char instr[100];
9270 int reg;
9271 unsigned long live_regs_mask;
9272 unsigned long func_type;
9273 arm_stack_offsets *offsets;
9275 func_type = arm_current_func_type ();
9277 if (IS_NAKED (func_type))
9278 return "";
9280 if (IS_VOLATILE (func_type) && TARGET_ABORT_NORETURN)
9282 /* If this function was declared non-returning, and we have
9283 found a tail call, then we have to trust that the called
9284 function won't return. */
9285 if (really_return)
9287 rtx ops[2];
9289 /* Otherwise, trap an attempted return by aborting. */
9290 ops[0] = operand;
9291 ops[1] = gen_rtx_SYMBOL_REF (Pmode, NEED_PLT_RELOC ? "abort(PLT)"
9292 : "abort");
9293 assemble_external_libcall (ops[1]);
9294 output_asm_insn (reverse ? "bl%D0\t%a1" : "bl%d0\t%a1", ops);
9297 return "";
9300 if (current_function_calls_alloca && !really_return)
9301 abort ();
9303 sprintf (conditional, "%%?%%%c0", reverse ? 'D' : 'd');
9305 return_used_this_function = 1;
9307 live_regs_mask = arm_compute_save_reg_mask ();
9309 if (live_regs_mask)
9311 const char * return_reg;
9313 /* If we do not have any special requirements for function exit
9314 (eg interworking, or ISR) then we can load the return address
9315 directly into the PC. Otherwise we must load it into LR. */
9316 if (really_return
9317 && ! TARGET_INTERWORK)
9318 return_reg = reg_names[PC_REGNUM];
9319 else
9320 return_reg = reg_names[LR_REGNUM];
9322 if ((live_regs_mask & (1 << IP_REGNUM)) == (1 << IP_REGNUM))
9324 /* There are three possible reasons for the IP register
9325 being saved. 1) a stack frame was created, in which case
9326 IP contains the old stack pointer, or 2) an ISR routine
9327 corrupted it, or 3) it was saved to align the stack on
9328 iWMMXt. In case 1, restore IP into SP, otherwise just
9329 restore IP. */
9330 if (frame_pointer_needed)
9332 live_regs_mask &= ~ (1 << IP_REGNUM);
9333 live_regs_mask |= (1 << SP_REGNUM);
9335 else
9337 if (! IS_INTERRUPT (func_type)
9338 && ! TARGET_REALLY_IWMMXT)
9339 abort ();
9343 /* On some ARM architectures it is faster to use LDR rather than
9344 LDM to load a single register. On other architectures, the
9345 cost is the same. In 26 bit mode, or for exception handlers,
9346 we have to use LDM to load the PC so that the CPSR is also
9347 restored. */
9348 for (reg = 0; reg <= LAST_ARM_REGNUM; reg++)
9350 if (live_regs_mask == (unsigned int)(1 << reg))
9351 break;
9353 if (reg <= LAST_ARM_REGNUM
9354 && (reg != LR_REGNUM
9355 || ! really_return
9356 || ! IS_INTERRUPT (func_type)))
9358 sprintf (instr, "ldr%s\t%%|%s, [%%|sp], #4", conditional,
9359 (reg == LR_REGNUM) ? return_reg : reg_names[reg]);
9361 else
9363 char *p;
9364 int first = 1;
9366 /* Generate the load multiple instruction to restore the
9367 registers. Note we can get here, even if
9368 frame_pointer_needed is true, but only if sp already
9369 points to the base of the saved core registers. */
9370 if (live_regs_mask & (1 << SP_REGNUM))
9372 unsigned HOST_WIDE_INT stack_adjust;
9374 offsets = arm_get_frame_offsets ();
9375 stack_adjust = offsets->outgoing_args - offsets->saved_regs;
9376 if (stack_adjust != 0 && stack_adjust != 4)
9377 abort ();
9379 if (stack_adjust && arm_arch5)
9380 sprintf (instr, "ldm%sib\t%%|sp, {", conditional);
9381 else
9383 /* If we can't use ldmib (SA110 bug), then try to pop r3
9384 instead. */
9385 if (stack_adjust)
9386 live_regs_mask |= 1 << 3;
9387 sprintf (instr, "ldm%sfd\t%%|sp, {", conditional);
9390 else
9391 sprintf (instr, "ldm%sfd\t%%|sp!, {", conditional);
9393 p = instr + strlen (instr);
9395 for (reg = 0; reg <= SP_REGNUM; reg++)
9396 if (live_regs_mask & (1 << reg))
9398 int l = strlen (reg_names[reg]);
9400 if (first)
9401 first = 0;
9402 else
9404 memcpy (p, ", ", 2);
9405 p += 2;
9408 memcpy (p, "%|", 2);
9409 memcpy (p + 2, reg_names[reg], l);
9410 p += l + 2;
9413 if (live_regs_mask & (1 << LR_REGNUM))
9415 sprintf (p, "%s%%|%s}", first ? "" : ", ", return_reg);
9416 /* If returning from an interrupt, restore the CPSR. */
9417 if (IS_INTERRUPT (func_type))
9418 strcat (p, "^");
9420 else
9421 strcpy (p, "}");
9424 output_asm_insn (instr, & operand);
9426 /* See if we need to generate an extra instruction to
9427 perform the actual function return. */
9428 if (really_return
9429 && func_type != ARM_FT_INTERWORKED
9430 && (live_regs_mask & (1 << LR_REGNUM)) != 0)
9432 /* The return has already been handled
9433 by loading the LR into the PC. */
9434 really_return = 0;
9438 if (really_return)
9440 switch ((int) ARM_FUNC_TYPE (func_type))
9442 case ARM_FT_ISR:
9443 case ARM_FT_FIQ:
9444 sprintf (instr, "sub%ss\t%%|pc, %%|lr, #4", conditional);
9445 break;
9447 case ARM_FT_INTERWORKED:
9448 sprintf (instr, "bx%s\t%%|lr", conditional);
9449 break;
9451 case ARM_FT_EXCEPTION:
9452 sprintf (instr, "mov%ss\t%%|pc, %%|lr", conditional);
9453 break;
9455 default:
9456 /* Use bx if it's available. */
9457 if (arm_arch5 || arm_arch4t)
9458 sprintf (instr, "bx%s\t%%|lr", conditional);
9459 else
9460 sprintf (instr, "mov%s\t%%|pc, %%|lr", conditional);
9461 break;
9464 output_asm_insn (instr, & operand);
9467 return "";
9470 /* Write the function name into the code section, directly preceding
9471 the function prologue.
9473 Code will be output similar to this:
9475 .ascii "arm_poke_function_name", 0
9476 .align
9478 .word 0xff000000 + (t1 - t0)
9479 arm_poke_function_name
9480 mov ip, sp
9481 stmfd sp!, {fp, ip, lr, pc}
9482 sub fp, ip, #4
9484 When performing a stack backtrace, code can inspect the value
9485 of 'pc' stored at 'fp' + 0. If the trace function then looks
9486 at location pc - 12 and the top 8 bits are set, then we know
9487 that there is a function name embedded immediately preceding this
9488 location and has length ((pc[-3]) & 0xff000000).
9490 We assume that pc is declared as a pointer to an unsigned long.
9492 It is of no benefit to output the function name if we are assembling
9493 a leaf function. These function types will not contain a stack
9494 backtrace structure, therefore it is not possible to determine the
9495 function name. */
9496 void
9497 arm_poke_function_name (FILE *stream, const char *name)
9499 unsigned long alignlength;
9500 unsigned long length;
9501 rtx x;
9503 length = strlen (name) + 1;
9504 alignlength = ROUND_UP_WORD (length);
9506 ASM_OUTPUT_ASCII (stream, name, length);
9507 ASM_OUTPUT_ALIGN (stream, 2);
9508 x = GEN_INT ((unsigned HOST_WIDE_INT) 0xff000000 + alignlength);
9509 assemble_aligned_integer (UNITS_PER_WORD, x);
9512 /* Place some comments into the assembler stream
9513 describing the current function. */
9514 static void
9515 arm_output_function_prologue (FILE *f, HOST_WIDE_INT frame_size)
9517 unsigned long func_type;
9519 if (!TARGET_ARM)
9521 thumb_output_function_prologue (f, frame_size);
9522 return;
9525 /* Sanity check. */
9526 if (arm_ccfsm_state || arm_target_insn)
9527 abort ();
9529 func_type = arm_current_func_type ();
9531 switch ((int) ARM_FUNC_TYPE (func_type))
9533 default:
9534 case ARM_FT_NORMAL:
9535 break;
9536 case ARM_FT_INTERWORKED:
9537 asm_fprintf (f, "\t%@ Function supports interworking.\n");
9538 break;
9539 case ARM_FT_ISR:
9540 asm_fprintf (f, "\t%@ Interrupt Service Routine.\n");
9541 break;
9542 case ARM_FT_FIQ:
9543 asm_fprintf (f, "\t%@ Fast Interrupt Service Routine.\n");
9544 break;
9545 case ARM_FT_EXCEPTION:
9546 asm_fprintf (f, "\t%@ ARM Exception Handler.\n");
9547 break;
9550 if (IS_NAKED (func_type))
9551 asm_fprintf (f, "\t%@ Naked Function: prologue and epilogue provided by programmer.\n");
9553 if (IS_VOLATILE (func_type))
9554 asm_fprintf (f, "\t%@ Volatile: function does not return.\n");
9556 if (IS_NESTED (func_type))
9557 asm_fprintf (f, "\t%@ Nested: function declared inside another function.\n");
9559 asm_fprintf (f, "\t%@ args = %d, pretend = %d, frame = %wd\n",
9560 current_function_args_size,
9561 current_function_pretend_args_size, frame_size);
9563 asm_fprintf (f, "\t%@ frame_needed = %d, uses_anonymous_args = %d\n",
9564 frame_pointer_needed,
9565 cfun->machine->uses_anonymous_args);
9567 if (cfun->machine->lr_save_eliminated)
9568 asm_fprintf (f, "\t%@ link register save eliminated.\n");
9570 if (current_function_calls_eh_return)
9571 asm_fprintf (f, "\t@ Calls __builtin_eh_return.\n");
9573 #ifdef AOF_ASSEMBLER
9574 if (flag_pic)
9575 asm_fprintf (f, "\tmov\t%r, %r\n", IP_REGNUM, PIC_OFFSET_TABLE_REGNUM);
9576 #endif
9578 return_used_this_function = 0;
9581 const char *
9582 arm_output_epilogue (rtx sibling)
9584 int reg;
9585 unsigned long saved_regs_mask;
9586 unsigned long func_type;
9587 /* Floats_offset is the offset from the "virtual" frame. In an APCS
9588 frame that is $fp + 4 for a non-variadic function. */
9589 int floats_offset = 0;
9590 rtx operands[3];
9591 FILE * f = asm_out_file;
9592 unsigned int lrm_count = 0;
9593 int really_return = (sibling == NULL);
9594 int start_reg;
9595 arm_stack_offsets *offsets;
9597 /* If we have already generated the return instruction
9598 then it is futile to generate anything else. */
9599 if (use_return_insn (FALSE, sibling) && return_used_this_function)
9600 return "";
9602 func_type = arm_current_func_type ();
9604 if (IS_NAKED (func_type))
9605 /* Naked functions don't have epilogues. */
9606 return "";
9608 if (IS_VOLATILE (func_type) && TARGET_ABORT_NORETURN)
9610 rtx op;
9612 /* A volatile function should never return. Call abort. */
9613 op = gen_rtx_SYMBOL_REF (Pmode, NEED_PLT_RELOC ? "abort(PLT)" : "abort");
9614 assemble_external_libcall (op);
9615 output_asm_insn ("bl\t%a0", &op);
9617 return "";
9620 if (current_function_calls_eh_return
9621 && ! really_return)
9622 /* If we are throwing an exception, then we really must
9623 be doing a return, so we can't tail-call. */
9624 abort ();
9626 offsets = arm_get_frame_offsets ();
9627 saved_regs_mask = arm_compute_save_reg_mask ();
9629 if (TARGET_IWMMXT)
9630 lrm_count = bit_count (saved_regs_mask);
9632 floats_offset = offsets->saved_args;
9633 /* Compute how far away the floats will be. */
9634 for (reg = 0; reg <= LAST_ARM_REGNUM; reg++)
9635 if (saved_regs_mask & (1 << reg))
9636 floats_offset += 4;
9638 if (frame_pointer_needed)
9640 /* This variable is for the Virtual Frame Pointer, not VFP regs. */
9641 int vfp_offset = offsets->frame;
9643 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
9645 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
9646 if (regs_ever_live[reg] && !call_used_regs[reg])
9648 floats_offset += 12;
9649 asm_fprintf (f, "\tldfe\t%r, [%r, #-%d]\n",
9650 reg, FP_REGNUM, floats_offset - vfp_offset);
9653 else
9655 start_reg = LAST_FPA_REGNUM;
9657 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
9659 if (regs_ever_live[reg] && !call_used_regs[reg])
9661 floats_offset += 12;
9663 /* We can't unstack more than four registers at once. */
9664 if (start_reg - reg == 3)
9666 asm_fprintf (f, "\tlfm\t%r, 4, [%r, #-%d]\n",
9667 reg, FP_REGNUM, floats_offset - vfp_offset);
9668 start_reg = reg - 1;
9671 else
9673 if (reg != start_reg)
9674 asm_fprintf (f, "\tlfm\t%r, %d, [%r, #-%d]\n",
9675 reg + 1, start_reg - reg,
9676 FP_REGNUM, floats_offset - vfp_offset);
9677 start_reg = reg - 1;
9681 /* Just in case the last register checked also needs unstacking. */
9682 if (reg != start_reg)
9683 asm_fprintf (f, "\tlfm\t%r, %d, [%r, #-%d]\n",
9684 reg + 1, start_reg - reg,
9685 FP_REGNUM, floats_offset - vfp_offset);
9688 if (TARGET_HARD_FLOAT && TARGET_VFP)
9690 int saved_size;
9692 /* The fldmx insn does not have base+offset addressing modes,
9693 so we use IP to hold the address. */
9694 saved_size = arm_get_vfp_saved_size ();
9696 if (saved_size > 0)
9698 floats_offset += saved_size;
9699 asm_fprintf (f, "\tsub\t%r, %r, #%d\n", IP_REGNUM,
9700 FP_REGNUM, floats_offset - vfp_offset);
9702 start_reg = FIRST_VFP_REGNUM;
9703 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
9705 if ((!regs_ever_live[reg] || call_used_regs[reg])
9706 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
9708 if (start_reg != reg)
9709 arm_output_fldmx (f, IP_REGNUM,
9710 (start_reg - FIRST_VFP_REGNUM) / 2,
9711 (reg - start_reg) / 2);
9712 start_reg = reg + 2;
9715 if (start_reg != reg)
9716 arm_output_fldmx (f, IP_REGNUM,
9717 (start_reg - FIRST_VFP_REGNUM) / 2,
9718 (reg - start_reg) / 2);
9721 if (TARGET_IWMMXT)
9723 /* The frame pointer is guaranteed to be non-double-word aligned.
9724 This is because it is set to (old_stack_pointer - 4) and the
9725 old_stack_pointer was double word aligned. Thus the offset to
9726 the iWMMXt registers to be loaded must also be non-double-word
9727 sized, so that the resultant address *is* double-word aligned.
9728 We can ignore floats_offset since that was already included in
9729 the live_regs_mask. */
9730 lrm_count += (lrm_count % 2 ? 2 : 1);
9732 for (reg = LAST_IWMMXT_REGNUM; reg >= FIRST_IWMMXT_REGNUM; reg--)
9733 if (regs_ever_live[reg] && !call_used_regs[reg])
9735 asm_fprintf (f, "\twldrd\t%r, [%r, #-%d]\n",
9736 reg, FP_REGNUM, lrm_count * 4);
9737 lrm_count += 2;
9741 /* saved_regs_mask should contain the IP, which at the time of stack
9742 frame generation actually contains the old stack pointer. So a
9743 quick way to unwind the stack is just pop the IP register directly
9744 into the stack pointer. */
9745 if ((saved_regs_mask & (1 << IP_REGNUM)) == 0)
9746 abort ();
9747 saved_regs_mask &= ~ (1 << IP_REGNUM);
9748 saved_regs_mask |= (1 << SP_REGNUM);
9750 /* There are two registers left in saved_regs_mask - LR and PC. We
9751 only need to restore the LR register (the return address), but to
9752 save time we can load it directly into the PC, unless we need a
9753 special function exit sequence, or we are not really returning. */
9754 if (really_return
9755 && ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
9756 && !current_function_calls_eh_return)
9757 /* Delete the LR from the register mask, so that the LR on
9758 the stack is loaded into the PC in the register mask. */
9759 saved_regs_mask &= ~ (1 << LR_REGNUM);
9760 else
9761 saved_regs_mask &= ~ (1 << PC_REGNUM);
9763 /* We must use SP as the base register, because SP is one of the
9764 registers being restored. If an interrupt or page fault
9765 happens in the ldm instruction, the SP might or might not
9766 have been restored. That would be bad, as then SP will no
9767 longer indicate the safe area of stack, and we can get stack
9768 corruption. Using SP as the base register means that it will
9769 be reset correctly to the original value, should an interrupt
9770 occur. If the stack pointer already points at the right
9771 place, then omit the subtraction. */
9772 if (offsets->outgoing_args != (1 + (int) bit_count (saved_regs_mask))
9773 || current_function_calls_alloca)
9774 asm_fprintf (f, "\tsub\t%r, %r, #%d\n", SP_REGNUM, FP_REGNUM,
9775 4 * bit_count (saved_regs_mask));
9776 print_multi_reg (f, "ldmfd\t%r", SP_REGNUM, saved_regs_mask);
9778 if (IS_INTERRUPT (func_type))
9779 /* Interrupt handlers will have pushed the
9780 IP onto the stack, so restore it now. */
9781 print_multi_reg (f, "ldmfd\t%r!", SP_REGNUM, 1 << IP_REGNUM);
9783 else
9785 /* Restore stack pointer if necessary. */
9786 if (offsets->outgoing_args != offsets->saved_regs)
9788 operands[0] = operands[1] = stack_pointer_rtx;
9789 operands[2] = GEN_INT (offsets->outgoing_args - offsets->saved_regs);
9790 output_add_immediate (operands);
9793 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
9795 for (reg = FIRST_FPA_REGNUM; reg <= LAST_FPA_REGNUM; reg++)
9796 if (regs_ever_live[reg] && !call_used_regs[reg])
9797 asm_fprintf (f, "\tldfe\t%r, [%r], #12\n",
9798 reg, SP_REGNUM);
9800 else
9802 start_reg = FIRST_FPA_REGNUM;
9804 for (reg = FIRST_FPA_REGNUM; reg <= LAST_FPA_REGNUM; reg++)
9806 if (regs_ever_live[reg] && !call_used_regs[reg])
9808 if (reg - start_reg == 3)
9810 asm_fprintf (f, "\tlfmfd\t%r, 4, [%r]!\n",
9811 start_reg, SP_REGNUM);
9812 start_reg = reg + 1;
9815 else
9817 if (reg != start_reg)
9818 asm_fprintf (f, "\tlfmfd\t%r, %d, [%r]!\n",
9819 start_reg, reg - start_reg,
9820 SP_REGNUM);
9822 start_reg = reg + 1;
9826 /* Just in case the last register checked also needs unstacking. */
9827 if (reg != start_reg)
9828 asm_fprintf (f, "\tlfmfd\t%r, %d, [%r]!\n",
9829 start_reg, reg - start_reg, SP_REGNUM);
9832 if (TARGET_HARD_FLOAT && TARGET_VFP)
9834 start_reg = FIRST_VFP_REGNUM;
9835 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
9837 if ((!regs_ever_live[reg] || call_used_regs[reg])
9838 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
9840 if (start_reg != reg)
9841 arm_output_fldmx (f, SP_REGNUM,
9842 (start_reg - FIRST_VFP_REGNUM) / 2,
9843 (reg - start_reg) / 2);
9844 start_reg = reg + 2;
9847 if (start_reg != reg)
9848 arm_output_fldmx (f, SP_REGNUM,
9849 (start_reg - FIRST_VFP_REGNUM) / 2,
9850 (reg - start_reg) / 2);
9852 if (TARGET_IWMMXT)
9853 for (reg = FIRST_IWMMXT_REGNUM; reg <= LAST_IWMMXT_REGNUM; reg++)
9854 if (regs_ever_live[reg] && !call_used_regs[reg])
9855 asm_fprintf (f, "\twldrd\t%r, [%r], #8\n", reg, SP_REGNUM);
9857 /* If we can, restore the LR into the PC. */
9858 if (ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
9859 && really_return
9860 && current_function_pretend_args_size == 0
9861 && saved_regs_mask & (1 << LR_REGNUM)
9862 && !current_function_calls_eh_return)
9864 saved_regs_mask &= ~ (1 << LR_REGNUM);
9865 saved_regs_mask |= (1 << PC_REGNUM);
9868 /* Load the registers off the stack. If we only have one register
9869 to load use the LDR instruction - it is faster. */
9870 if (saved_regs_mask == (1 << LR_REGNUM))
9872 asm_fprintf (f, "\tldr\t%r, [%r], #4\n", LR_REGNUM, SP_REGNUM);
9874 else if (saved_regs_mask)
9876 if (saved_regs_mask & (1 << SP_REGNUM))
9877 /* Note - write back to the stack register is not enabled
9878 (ie "ldmfd sp!..."). We know that the stack pointer is
9879 in the list of registers and if we add writeback the
9880 instruction becomes UNPREDICTABLE. */
9881 print_multi_reg (f, "ldmfd\t%r", SP_REGNUM, saved_regs_mask);
9882 else
9883 print_multi_reg (f, "ldmfd\t%r!", SP_REGNUM, saved_regs_mask);
9886 if (current_function_pretend_args_size)
9888 /* Unwind the pre-pushed regs. */
9889 operands[0] = operands[1] = stack_pointer_rtx;
9890 operands[2] = GEN_INT (current_function_pretend_args_size);
9891 output_add_immediate (operands);
9895 /* We may have already restored PC directly from the stack. */
9896 if (!really_return || saved_regs_mask & (1 << PC_REGNUM))
9897 return "";
9899 /* Stack adjustment for exception handler. */
9900 if (current_function_calls_eh_return)
9901 asm_fprintf (f, "\tadd\t%r, %r, %r\n", SP_REGNUM, SP_REGNUM,
9902 ARM_EH_STACKADJ_REGNUM);
9904 /* Generate the return instruction. */
9905 switch ((int) ARM_FUNC_TYPE (func_type))
9907 case ARM_FT_ISR:
9908 case ARM_FT_FIQ:
9909 asm_fprintf (f, "\tsubs\t%r, %r, #4\n", PC_REGNUM, LR_REGNUM);
9910 break;
9912 case ARM_FT_EXCEPTION:
9913 asm_fprintf (f, "\tmovs\t%r, %r\n", PC_REGNUM, LR_REGNUM);
9914 break;
9916 case ARM_FT_INTERWORKED:
9917 asm_fprintf (f, "\tbx\t%r\n", LR_REGNUM);
9918 break;
9920 default:
9921 if (arm_arch5 || arm_arch4t)
9922 asm_fprintf (f, "\tbx\t%r\n", LR_REGNUM);
9923 else
9924 asm_fprintf (f, "\tmov\t%r, %r\n", PC_REGNUM, LR_REGNUM);
9925 break;
9928 return "";
9931 static void
9932 arm_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
9933 HOST_WIDE_INT frame_size ATTRIBUTE_UNUSED)
9935 arm_stack_offsets *offsets;
9937 if (TARGET_THUMB)
9939 /* ??? Probably not safe to set this here, since it assumes that a
9940 function will be emitted as assembly immediately after we generate
9941 RTL for it. This does not happen for inline functions. */
9942 return_used_this_function = 0;
9944 else
9946 /* We need to take into account any stack-frame rounding. */
9947 offsets = arm_get_frame_offsets ();
9949 if (use_return_insn (FALSE, NULL)
9950 && return_used_this_function
9951 && offsets->saved_regs != offsets->outgoing_args
9952 && !frame_pointer_needed)
9953 abort ();
9955 /* Reset the ARM-specific per-function variables. */
9956 after_arm_reorg = 0;
9960 /* Generate and emit an insn that we will recognize as a push_multi.
9961 Unfortunately, since this insn does not reflect very well the actual
9962 semantics of the operation, we need to annotate the insn for the benefit
9963 of DWARF2 frame unwind information. */
9964 static rtx
9965 emit_multi_reg_push (int mask)
9967 int num_regs = 0;
9968 int num_dwarf_regs;
9969 int i, j;
9970 rtx par;
9971 rtx dwarf;
9972 int dwarf_par_index;
9973 rtx tmp, reg;
9975 for (i = 0; i <= LAST_ARM_REGNUM; i++)
9976 if (mask & (1 << i))
9977 num_regs++;
9979 if (num_regs == 0 || num_regs > 16)
9980 abort ();
9982 /* We don't record the PC in the dwarf frame information. */
9983 num_dwarf_regs = num_regs;
9984 if (mask & (1 << PC_REGNUM))
9985 num_dwarf_regs--;
9987 /* For the body of the insn we are going to generate an UNSPEC in
9988 parallel with several USEs. This allows the insn to be recognized
9989 by the push_multi pattern in the arm.md file. The insn looks
9990 something like this:
9992 (parallel [
9993 (set (mem:BLK (pre_dec:BLK (reg:SI sp)))
9994 (unspec:BLK [(reg:SI r4)] UNSPEC_PUSH_MULT))
9995 (use (reg:SI 11 fp))
9996 (use (reg:SI 12 ip))
9997 (use (reg:SI 14 lr))
9998 (use (reg:SI 15 pc))
10001 For the frame note however, we try to be more explicit and actually
10002 show each register being stored into the stack frame, plus a (single)
10003 decrement of the stack pointer. We do it this way in order to be
10004 friendly to the stack unwinding code, which only wants to see a single
10005 stack decrement per instruction. The RTL we generate for the note looks
10006 something like this:
10008 (sequence [
10009 (set (reg:SI sp) (plus:SI (reg:SI sp) (const_int -20)))
10010 (set (mem:SI (reg:SI sp)) (reg:SI r4))
10011 (set (mem:SI (plus:SI (reg:SI sp) (const_int 4))) (reg:SI fp))
10012 (set (mem:SI (plus:SI (reg:SI sp) (const_int 8))) (reg:SI ip))
10013 (set (mem:SI (plus:SI (reg:SI sp) (const_int 12))) (reg:SI lr))
10016 This sequence is used both by the code to support stack unwinding for
10017 exceptions handlers and the code to generate dwarf2 frame debugging. */
10019 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (num_regs));
10020 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (num_dwarf_regs + 1));
10021 dwarf_par_index = 1;
10023 for (i = 0; i <= LAST_ARM_REGNUM; i++)
10025 if (mask & (1 << i))
10027 reg = gen_rtx_REG (SImode, i);
10029 XVECEXP (par, 0, 0)
10030 = gen_rtx_SET (VOIDmode,
10031 gen_rtx_MEM (BLKmode,
10032 gen_rtx_PRE_DEC (BLKmode,
10033 stack_pointer_rtx)),
10034 gen_rtx_UNSPEC (BLKmode,
10035 gen_rtvec (1, reg),
10036 UNSPEC_PUSH_MULT));
10038 if (i != PC_REGNUM)
10040 tmp = gen_rtx_SET (VOIDmode,
10041 gen_rtx_MEM (SImode, stack_pointer_rtx),
10042 reg);
10043 RTX_FRAME_RELATED_P (tmp) = 1;
10044 XVECEXP (dwarf, 0, dwarf_par_index) = tmp;
10045 dwarf_par_index++;
10048 break;
10052 for (j = 1, i++; j < num_regs; i++)
10054 if (mask & (1 << i))
10056 reg = gen_rtx_REG (SImode, i);
10058 XVECEXP (par, 0, j) = gen_rtx_USE (VOIDmode, reg);
10060 if (i != PC_REGNUM)
10062 tmp = gen_rtx_SET (VOIDmode,
10063 gen_rtx_MEM (SImode,
10064 plus_constant (stack_pointer_rtx,
10065 4 * j)),
10066 reg);
10067 RTX_FRAME_RELATED_P (tmp) = 1;
10068 XVECEXP (dwarf, 0, dwarf_par_index++) = tmp;
10071 j++;
10075 par = emit_insn (par);
10077 tmp = gen_rtx_SET (SImode,
10078 stack_pointer_rtx,
10079 gen_rtx_PLUS (SImode,
10080 stack_pointer_rtx,
10081 GEN_INT (-4 * num_regs)));
10082 RTX_FRAME_RELATED_P (tmp) = 1;
10083 XVECEXP (dwarf, 0, 0) = tmp;
10085 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
10086 REG_NOTES (par));
10087 return par;
10090 static rtx
10091 emit_sfm (int base_reg, int count)
10093 rtx par;
10094 rtx dwarf;
10095 rtx tmp, reg;
10096 int i;
10098 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
10099 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (count + 1));
10101 reg = gen_rtx_REG (XFmode, base_reg++);
10103 XVECEXP (par, 0, 0)
10104 = gen_rtx_SET (VOIDmode,
10105 gen_rtx_MEM (BLKmode,
10106 gen_rtx_PRE_DEC (BLKmode, stack_pointer_rtx)),
10107 gen_rtx_UNSPEC (BLKmode,
10108 gen_rtvec (1, reg),
10109 UNSPEC_PUSH_MULT));
10110 tmp = gen_rtx_SET (VOIDmode,
10111 gen_rtx_MEM (XFmode, stack_pointer_rtx), reg);
10112 RTX_FRAME_RELATED_P (tmp) = 1;
10113 XVECEXP (dwarf, 0, 1) = tmp;
10115 for (i = 1; i < count; i++)
10117 reg = gen_rtx_REG (XFmode, base_reg++);
10118 XVECEXP (par, 0, i) = gen_rtx_USE (VOIDmode, reg);
10120 tmp = gen_rtx_SET (VOIDmode,
10121 gen_rtx_MEM (XFmode,
10122 plus_constant (stack_pointer_rtx,
10123 i * 12)),
10124 reg);
10125 RTX_FRAME_RELATED_P (tmp) = 1;
10126 XVECEXP (dwarf, 0, i + 1) = tmp;
10129 tmp = gen_rtx_SET (VOIDmode,
10130 stack_pointer_rtx,
10131 gen_rtx_PLUS (SImode,
10132 stack_pointer_rtx,
10133 GEN_INT (-12 * count)));
10134 RTX_FRAME_RELATED_P (tmp) = 1;
10135 XVECEXP (dwarf, 0, 0) = tmp;
10137 par = emit_insn (par);
10138 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
10139 REG_NOTES (par));
10140 return par;
10144 /* Return true if the current function needs to save/restore LR. */
10146 static bool
10147 thumb_force_lr_save (void)
10149 return !cfun->machine->lr_save_eliminated
10150 && (!leaf_function_p ()
10151 || thumb_far_jump_used_p ()
10152 || regs_ever_live [LR_REGNUM]);
10156 /* Compute the distance from register FROM to register TO.
10157 These can be the arg pointer (26), the soft frame pointer (25),
10158 the stack pointer (13) or the hard frame pointer (11).
10159 In thumb mode r7 is used as the soft frame pointer, if needed.
10160 Typical stack layout looks like this:
10162 old stack pointer -> | |
10163 ----
10164 | | \
10165 | | saved arguments for
10166 | | vararg functions
10167 | | /
10169 hard FP & arg pointer -> | | \
10170 | | stack
10171 | | frame
10172 | | /
10174 | | \
10175 | | call saved
10176 | | registers
10177 soft frame pointer -> | | /
10179 | | \
10180 | | local
10181 | | variables
10182 | | /
10184 | | \
10185 | | outgoing
10186 | | arguments
10187 current stack pointer -> | | /
10190 For a given function some or all of these stack components
10191 may not be needed, giving rise to the possibility of
10192 eliminating some of the registers.
10194 The values returned by this function must reflect the behavior
10195 of arm_expand_prologue() and arm_compute_save_reg_mask().
10197 The sign of the number returned reflects the direction of stack
10198 growth, so the values are positive for all eliminations except
10199 from the soft frame pointer to the hard frame pointer.
10201 SFP may point just inside the local variables block to ensure correct
10202 alignment. */
10205 /* Calculate stack offsets. These are used to calculate register elimination
10206 offsets and in prologue/epilogue code. */
10208 static arm_stack_offsets *
10209 arm_get_frame_offsets (void)
10211 struct arm_stack_offsets *offsets;
10212 unsigned long func_type;
10213 int leaf;
10214 int saved;
10215 HOST_WIDE_INT frame_size;
10217 offsets = &cfun->machine->stack_offsets;
10219 /* We need to know if we are a leaf function. Unfortunately, it
10220 is possible to be called after start_sequence has been called,
10221 which causes get_insns to return the insns for the sequence,
10222 not the function, which will cause leaf_function_p to return
10223 the incorrect result.
10225 to know about leaf functions once reload has completed, and the
10226 frame size cannot be changed after that time, so we can safely
10227 use the cached value. */
10229 if (reload_completed)
10230 return offsets;
10232 /* Initially this is the size of the local variables. It will translated
10233 into an offset once we have determined the size of preceding data. */
10234 frame_size = ROUND_UP_WORD (get_frame_size ());
10236 leaf = leaf_function_p ();
10238 /* Space for variadic functions. */
10239 offsets->saved_args = current_function_pretend_args_size;
10241 offsets->frame = offsets->saved_args + (frame_pointer_needed ? 4 : 0);
10243 if (TARGET_ARM)
10245 unsigned int regno;
10247 saved = bit_count (arm_compute_save_reg_mask ()) * 4;
10249 /* We know that SP will be doubleword aligned on entry, and we must
10250 preserve that condition at any subroutine call. We also require the
10251 soft frame pointer to be doubleword aligned. */
10253 if (TARGET_REALLY_IWMMXT)
10255 /* Check for the call-saved iWMMXt registers. */
10256 for (regno = FIRST_IWMMXT_REGNUM;
10257 regno <= LAST_IWMMXT_REGNUM;
10258 regno++)
10259 if (regs_ever_live [regno] && ! call_used_regs [regno])
10260 saved += 8;
10263 func_type = arm_current_func_type ();
10264 if (! IS_VOLATILE (func_type))
10266 /* Space for saved FPA registers. */
10267 for (regno = FIRST_FPA_REGNUM; regno <= LAST_FPA_REGNUM; regno++)
10268 if (regs_ever_live[regno] && ! call_used_regs[regno])
10269 saved += 12;
10271 /* Space for saved VFP registers. */
10272 if (TARGET_HARD_FLOAT && TARGET_VFP)
10273 saved += arm_get_vfp_saved_size ();
10276 else /* TARGET_THUMB */
10278 saved = bit_count (thumb_compute_save_reg_mask ()) * 4;
10279 if (TARGET_BACKTRACE)
10280 saved += 16;
10283 /* Saved registers include the stack frame. */
10284 offsets->saved_regs = offsets->saved_args + saved;
10285 offsets->soft_frame = offsets->saved_regs;
10286 /* A leaf function does not need any stack alignment if it has nothing
10287 on the stack. */
10288 if (leaf && frame_size == 0)
10290 offsets->outgoing_args = offsets->soft_frame;
10291 return offsets;
10294 /* Ensure SFP has the correct alignment. */
10295 if (ARM_DOUBLEWORD_ALIGN
10296 && (offsets->soft_frame & 7))
10297 offsets->soft_frame += 4;
10299 offsets->outgoing_args = offsets->soft_frame + frame_size
10300 + current_function_outgoing_args_size;
10302 if (ARM_DOUBLEWORD_ALIGN)
10304 /* Ensure SP remains doubleword aligned. */
10305 if (offsets->outgoing_args & 7)
10306 offsets->outgoing_args += 4;
10307 if (offsets->outgoing_args & 7)
10308 abort ();
10311 return offsets;
10315 /* Calculate the relative offsets for the different stack pointers. Positive
10316 offsets are in the direction of stack growth. */
10318 HOST_WIDE_INT
10319 arm_compute_initial_elimination_offset (unsigned int from, unsigned int to)
10321 arm_stack_offsets *offsets;
10323 offsets = arm_get_frame_offsets ();
10325 /* OK, now we have enough information to compute the distances.
10326 There must be an entry in these switch tables for each pair
10327 of registers in ELIMINABLE_REGS, even if some of the entries
10328 seem to be redundant or useless. */
10329 switch (from)
10331 case ARG_POINTER_REGNUM:
10332 switch (to)
10334 case THUMB_HARD_FRAME_POINTER_REGNUM:
10335 return 0;
10337 case FRAME_POINTER_REGNUM:
10338 /* This is the reverse of the soft frame pointer
10339 to hard frame pointer elimination below. */
10340 return offsets->soft_frame - offsets->saved_args;
10342 case ARM_HARD_FRAME_POINTER_REGNUM:
10343 /* If there is no stack frame then the hard
10344 frame pointer and the arg pointer coincide. */
10345 if (offsets->frame == offsets->saved_regs)
10346 return 0;
10347 /* FIXME: Not sure about this. Maybe we should always return 0 ? */
10348 return (frame_pointer_needed
10349 && cfun->static_chain_decl != NULL
10350 && ! cfun->machine->uses_anonymous_args) ? 4 : 0;
10352 case STACK_POINTER_REGNUM:
10353 /* If nothing has been pushed on the stack at all
10354 then this will return -4. This *is* correct! */
10355 return offsets->outgoing_args - (offsets->saved_args + 4);
10357 default:
10358 abort ();
10360 break;
10362 case FRAME_POINTER_REGNUM:
10363 switch (to)
10365 case THUMB_HARD_FRAME_POINTER_REGNUM:
10366 return 0;
10368 case ARM_HARD_FRAME_POINTER_REGNUM:
10369 /* The hard frame pointer points to the top entry in the
10370 stack frame. The soft frame pointer to the bottom entry
10371 in the stack frame. If there is no stack frame at all,
10372 then they are identical. */
10374 return offsets->frame - offsets->soft_frame;
10376 case STACK_POINTER_REGNUM:
10377 return offsets->outgoing_args - offsets->soft_frame;
10379 default:
10380 abort ();
10382 break;
10384 default:
10385 /* You cannot eliminate from the stack pointer.
10386 In theory you could eliminate from the hard frame
10387 pointer to the stack pointer, but this will never
10388 happen, since if a stack frame is not needed the
10389 hard frame pointer will never be used. */
10390 abort ();
10395 /* Generate the prologue instructions for entry into an ARM function. */
10396 void
10397 arm_expand_prologue (void)
10399 int reg;
10400 rtx amount;
10401 rtx insn;
10402 rtx ip_rtx;
10403 unsigned long live_regs_mask;
10404 unsigned long func_type;
10405 int fp_offset = 0;
10406 int saved_pretend_args = 0;
10407 int saved_regs = 0;
10408 unsigned int args_to_push;
10409 arm_stack_offsets *offsets;
10411 func_type = arm_current_func_type ();
10413 /* Naked functions don't have prologues. */
10414 if (IS_NAKED (func_type))
10415 return;
10417 /* Make a copy of c_f_p_a_s as we may need to modify it locally. */
10418 args_to_push = current_function_pretend_args_size;
10420 /* Compute which register we will have to save onto the stack. */
10421 live_regs_mask = arm_compute_save_reg_mask ();
10423 ip_rtx = gen_rtx_REG (SImode, IP_REGNUM);
10425 if (frame_pointer_needed)
10427 if (IS_INTERRUPT (func_type))
10429 /* Interrupt functions must not corrupt any registers.
10430 Creating a frame pointer however, corrupts the IP
10431 register, so we must push it first. */
10432 insn = emit_multi_reg_push (1 << IP_REGNUM);
10434 /* Do not set RTX_FRAME_RELATED_P on this insn.
10435 The dwarf stack unwinding code only wants to see one
10436 stack decrement per function, and this is not it. If
10437 this instruction is labeled as being part of the frame
10438 creation sequence then dwarf2out_frame_debug_expr will
10439 abort when it encounters the assignment of IP to FP
10440 later on, since the use of SP here establishes SP as
10441 the CFA register and not IP.
10443 Anyway this instruction is not really part of the stack
10444 frame creation although it is part of the prologue. */
10446 else if (IS_NESTED (func_type))
10448 /* The Static chain register is the same as the IP register
10449 used as a scratch register during stack frame creation.
10450 To get around this need to find somewhere to store IP
10451 whilst the frame is being created. We try the following
10452 places in order:
10454 1. The last argument register.
10455 2. A slot on the stack above the frame. (This only
10456 works if the function is not a varargs function).
10457 3. Register r3, after pushing the argument registers
10458 onto the stack.
10460 Note - we only need to tell the dwarf2 backend about the SP
10461 adjustment in the second variant; the static chain register
10462 doesn't need to be unwound, as it doesn't contain a value
10463 inherited from the caller. */
10465 if (regs_ever_live[3] == 0)
10467 insn = gen_rtx_REG (SImode, 3);
10468 insn = gen_rtx_SET (SImode, insn, ip_rtx);
10469 insn = emit_insn (insn);
10471 else if (args_to_push == 0)
10473 rtx dwarf;
10474 insn = gen_rtx_PRE_DEC (SImode, stack_pointer_rtx);
10475 insn = gen_rtx_MEM (SImode, insn);
10476 insn = gen_rtx_SET (VOIDmode, insn, ip_rtx);
10477 insn = emit_insn (insn);
10479 fp_offset = 4;
10481 /* Just tell the dwarf backend that we adjusted SP. */
10482 dwarf = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
10483 gen_rtx_PLUS (SImode, stack_pointer_rtx,
10484 GEN_INT (-fp_offset)));
10485 RTX_FRAME_RELATED_P (insn) = 1;
10486 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
10487 dwarf, REG_NOTES (insn));
10489 else
10491 /* Store the args on the stack. */
10492 if (cfun->machine->uses_anonymous_args)
10493 insn = emit_multi_reg_push
10494 ((0xf0 >> (args_to_push / 4)) & 0xf);
10495 else
10496 insn = emit_insn
10497 (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10498 GEN_INT (- args_to_push)));
10500 RTX_FRAME_RELATED_P (insn) = 1;
10502 saved_pretend_args = 1;
10503 fp_offset = args_to_push;
10504 args_to_push = 0;
10506 /* Now reuse r3 to preserve IP. */
10507 insn = gen_rtx_REG (SImode, 3);
10508 insn = gen_rtx_SET (SImode, insn, ip_rtx);
10509 (void) emit_insn (insn);
10513 if (fp_offset)
10515 insn = gen_rtx_PLUS (SImode, stack_pointer_rtx, GEN_INT (fp_offset));
10516 insn = gen_rtx_SET (SImode, ip_rtx, insn);
10518 else
10519 insn = gen_movsi (ip_rtx, stack_pointer_rtx);
10521 insn = emit_insn (insn);
10522 RTX_FRAME_RELATED_P (insn) = 1;
10525 if (args_to_push)
10527 /* Push the argument registers, or reserve space for them. */
10528 if (cfun->machine->uses_anonymous_args)
10529 insn = emit_multi_reg_push
10530 ((0xf0 >> (args_to_push / 4)) & 0xf);
10531 else
10532 insn = emit_insn
10533 (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10534 GEN_INT (- args_to_push)));
10535 RTX_FRAME_RELATED_P (insn) = 1;
10538 /* If this is an interrupt service routine, and the link register
10539 is going to be pushed, and we are not creating a stack frame,
10540 (which would involve an extra push of IP and a pop in the epilogue)
10541 subtracting four from LR now will mean that the function return
10542 can be done with a single instruction. */
10543 if ((func_type == ARM_FT_ISR || func_type == ARM_FT_FIQ)
10544 && (live_regs_mask & (1 << LR_REGNUM)) != 0
10545 && ! frame_pointer_needed)
10546 emit_insn (gen_rtx_SET (SImode,
10547 gen_rtx_REG (SImode, LR_REGNUM),
10548 gen_rtx_PLUS (SImode,
10549 gen_rtx_REG (SImode, LR_REGNUM),
10550 GEN_INT (-4))));
10552 if (live_regs_mask)
10554 insn = emit_multi_reg_push (live_regs_mask);
10555 saved_regs += bit_count (live_regs_mask) * 4;
10556 RTX_FRAME_RELATED_P (insn) = 1;
10559 if (TARGET_IWMMXT)
10560 for (reg = LAST_IWMMXT_REGNUM; reg >= FIRST_IWMMXT_REGNUM; reg--)
10561 if (regs_ever_live[reg] && ! call_used_regs [reg])
10563 insn = gen_rtx_PRE_DEC (V2SImode, stack_pointer_rtx);
10564 insn = gen_rtx_MEM (V2SImode, insn);
10565 insn = emit_insn (gen_rtx_SET (VOIDmode, insn,
10566 gen_rtx_REG (V2SImode, reg)));
10567 RTX_FRAME_RELATED_P (insn) = 1;
10568 saved_regs += 8;
10571 if (! IS_VOLATILE (func_type))
10573 int start_reg;
10575 /* Save any floating point call-saved registers used by this
10576 function. */
10577 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
10579 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
10580 if (regs_ever_live[reg] && !call_used_regs[reg])
10582 insn = gen_rtx_PRE_DEC (XFmode, stack_pointer_rtx);
10583 insn = gen_rtx_MEM (XFmode, insn);
10584 insn = emit_insn (gen_rtx_SET (VOIDmode, insn,
10585 gen_rtx_REG (XFmode, reg)));
10586 RTX_FRAME_RELATED_P (insn) = 1;
10587 saved_regs += 12;
10590 else
10592 start_reg = LAST_FPA_REGNUM;
10594 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
10596 if (regs_ever_live[reg] && !call_used_regs[reg])
10598 if (start_reg - reg == 3)
10600 insn = emit_sfm (reg, 4);
10601 RTX_FRAME_RELATED_P (insn) = 1;
10602 saved_regs += 48;
10603 start_reg = reg - 1;
10606 else
10608 if (start_reg != reg)
10610 insn = emit_sfm (reg + 1, start_reg - reg);
10611 RTX_FRAME_RELATED_P (insn) = 1;
10612 saved_regs += (start_reg - reg) * 12;
10614 start_reg = reg - 1;
10618 if (start_reg != reg)
10620 insn = emit_sfm (reg + 1, start_reg - reg);
10621 saved_regs += (start_reg - reg) * 12;
10622 RTX_FRAME_RELATED_P (insn) = 1;
10625 if (TARGET_HARD_FLOAT && TARGET_VFP)
10627 start_reg = FIRST_VFP_REGNUM;
10629 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
10631 if ((!regs_ever_live[reg] || call_used_regs[reg])
10632 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
10634 if (start_reg != reg)
10635 saved_regs += vfp_emit_fstmx (start_reg,
10636 (reg - start_reg) / 2);
10637 start_reg = reg + 2;
10640 if (start_reg != reg)
10641 saved_regs += vfp_emit_fstmx (start_reg,
10642 (reg - start_reg) / 2);
10646 if (frame_pointer_needed)
10648 /* Create the new frame pointer. */
10649 insn = GEN_INT (-(4 + args_to_push + fp_offset));
10650 insn = emit_insn (gen_addsi3 (hard_frame_pointer_rtx, ip_rtx, insn));
10651 RTX_FRAME_RELATED_P (insn) = 1;
10653 if (IS_NESTED (func_type))
10655 /* Recover the static chain register. */
10656 if (regs_ever_live [3] == 0
10657 || saved_pretend_args)
10658 insn = gen_rtx_REG (SImode, 3);
10659 else /* if (current_function_pretend_args_size == 0) */
10661 insn = gen_rtx_PLUS (SImode, hard_frame_pointer_rtx,
10662 GEN_INT (4));
10663 insn = gen_rtx_MEM (SImode, insn);
10666 emit_insn (gen_rtx_SET (SImode, ip_rtx, insn));
10667 /* Add a USE to stop propagate_one_insn() from barfing. */
10668 emit_insn (gen_prologue_use (ip_rtx));
10672 offsets = arm_get_frame_offsets ();
10673 if (offsets->outgoing_args != offsets->saved_args + saved_regs)
10675 /* This add can produce multiple insns for a large constant, so we
10676 need to get tricky. */
10677 rtx last = get_last_insn ();
10679 amount = GEN_INT (offsets->saved_args + saved_regs
10680 - offsets->outgoing_args);
10682 insn = emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10683 amount));
10686 last = last ? NEXT_INSN (last) : get_insns ();
10687 RTX_FRAME_RELATED_P (last) = 1;
10689 while (last != insn);
10691 /* If the frame pointer is needed, emit a special barrier that
10692 will prevent the scheduler from moving stores to the frame
10693 before the stack adjustment. */
10694 if (frame_pointer_needed)
10695 insn = emit_insn (gen_stack_tie (stack_pointer_rtx,
10696 hard_frame_pointer_rtx));
10700 if (flag_pic)
10701 arm_load_pic_register ();
10703 /* If we are profiling, make sure no instructions are scheduled before
10704 the call to mcount. Similarly if the user has requested no
10705 scheduling in the prolog. */
10706 if (current_function_profile || TARGET_NO_SCHED_PRO)
10707 emit_insn (gen_blockage ());
10709 /* If the link register is being kept alive, with the return address in it,
10710 then make sure that it does not get reused by the ce2 pass. */
10711 if ((live_regs_mask & (1 << LR_REGNUM)) == 0)
10713 emit_insn (gen_prologue_use (gen_rtx_REG (SImode, LR_REGNUM)));
10714 cfun->machine->lr_save_eliminated = 1;
10718 /* If CODE is 'd', then the X is a condition operand and the instruction
10719 should only be executed if the condition is true.
10720 if CODE is 'D', then the X is a condition operand and the instruction
10721 should only be executed if the condition is false: however, if the mode
10722 of the comparison is CCFPEmode, then always execute the instruction -- we
10723 do this because in these circumstances !GE does not necessarily imply LT;
10724 in these cases the instruction pattern will take care to make sure that
10725 an instruction containing %d will follow, thereby undoing the effects of
10726 doing this instruction unconditionally.
10727 If CODE is 'N' then X is a floating point operand that must be negated
10728 before output.
10729 If CODE is 'B' then output a bitwise inverted value of X (a const int).
10730 If X is a REG and CODE is `M', output a ldm/stm style multi-reg. */
10731 void
10732 arm_print_operand (FILE *stream, rtx x, int code)
10734 switch (code)
10736 case '@':
10737 fputs (ASM_COMMENT_START, stream);
10738 return;
10740 case '_':
10741 fputs (user_label_prefix, stream);
10742 return;
10744 case '|':
10745 fputs (REGISTER_PREFIX, stream);
10746 return;
10748 case '?':
10749 if (arm_ccfsm_state == 3 || arm_ccfsm_state == 4)
10751 if (TARGET_THUMB || current_insn_predicate != NULL)
10752 abort ();
10754 fputs (arm_condition_codes[arm_current_cc], stream);
10756 else if (current_insn_predicate)
10758 enum arm_cond_code code;
10760 if (TARGET_THUMB)
10761 abort ();
10763 code = get_arm_condition_code (current_insn_predicate);
10764 fputs (arm_condition_codes[code], stream);
10766 return;
10768 case 'N':
10770 REAL_VALUE_TYPE r;
10771 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
10772 r = REAL_VALUE_NEGATE (r);
10773 fprintf (stream, "%s", fp_const_from_val (&r));
10775 return;
10777 case 'B':
10778 if (GET_CODE (x) == CONST_INT)
10780 HOST_WIDE_INT val;
10781 val = ARM_SIGN_EXTEND (~INTVAL (x));
10782 fprintf (stream, HOST_WIDE_INT_PRINT_DEC, val);
10784 else
10786 putc ('~', stream);
10787 output_addr_const (stream, x);
10789 return;
10791 case 'i':
10792 fprintf (stream, "%s", arithmetic_instr (x, 1));
10793 return;
10795 /* Truncate Cirrus shift counts. */
10796 case 's':
10797 if (GET_CODE (x) == CONST_INT)
10799 fprintf (stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0x3f);
10800 return;
10802 arm_print_operand (stream, x, 0);
10803 return;
10805 case 'I':
10806 fprintf (stream, "%s", arithmetic_instr (x, 0));
10807 return;
10809 case 'S':
10811 HOST_WIDE_INT val;
10812 const char * shift = shift_op (x, &val);
10814 if (shift)
10816 fprintf (stream, ", %s ", shift_op (x, &val));
10817 if (val == -1)
10818 arm_print_operand (stream, XEXP (x, 1), 0);
10819 else
10820 fprintf (stream, "#" HOST_WIDE_INT_PRINT_DEC, val);
10823 return;
10825 /* An explanation of the 'Q', 'R' and 'H' register operands:
10827 In a pair of registers containing a DI or DF value the 'Q'
10828 operand returns the register number of the register containing
10829 the least significant part of the value. The 'R' operand returns
10830 the register number of the register containing the most
10831 significant part of the value.
10833 The 'H' operand returns the higher of the two register numbers.
10834 On a run where WORDS_BIG_ENDIAN is true the 'H' operand is the
10835 same as the 'Q' operand, since the most significant part of the
10836 value is held in the lower number register. The reverse is true
10837 on systems where WORDS_BIG_ENDIAN is false.
10839 The purpose of these operands is to distinguish between cases
10840 where the endian-ness of the values is important (for example
10841 when they are added together), and cases where the endian-ness
10842 is irrelevant, but the order of register operations is important.
10843 For example when loading a value from memory into a register
10844 pair, the endian-ness does not matter. Provided that the value
10845 from the lower memory address is put into the lower numbered
10846 register, and the value from the higher address is put into the
10847 higher numbered register, the load will work regardless of whether
10848 the value being loaded is big-wordian or little-wordian. The
10849 order of the two register loads can matter however, if the address
10850 of the memory location is actually held in one of the registers
10851 being overwritten by the load. */
10852 case 'Q':
10853 if (REGNO (x) > LAST_ARM_REGNUM)
10854 abort ();
10855 asm_fprintf (stream, "%r", REGNO (x) + (WORDS_BIG_ENDIAN ? 1 : 0));
10856 return;
10858 case 'R':
10859 if (REGNO (x) > LAST_ARM_REGNUM)
10860 abort ();
10861 asm_fprintf (stream, "%r", REGNO (x) + (WORDS_BIG_ENDIAN ? 0 : 1));
10862 return;
10864 case 'H':
10865 if (REGNO (x) > LAST_ARM_REGNUM)
10866 abort ();
10867 asm_fprintf (stream, "%r", REGNO (x) + 1);
10868 return;
10870 case 'm':
10871 asm_fprintf (stream, "%r",
10872 GET_CODE (XEXP (x, 0)) == REG
10873 ? REGNO (XEXP (x, 0)) : REGNO (XEXP (XEXP (x, 0), 0)));
10874 return;
10876 case 'M':
10877 asm_fprintf (stream, "{%r-%r}",
10878 REGNO (x),
10879 REGNO (x) + ARM_NUM_REGS (GET_MODE (x)) - 1);
10880 return;
10882 case 'd':
10883 /* CONST_TRUE_RTX means always -- that's the default. */
10884 if (x == const_true_rtx)
10885 return;
10887 fputs (arm_condition_codes[get_arm_condition_code (x)],
10888 stream);
10889 return;
10891 case 'D':
10892 /* CONST_TRUE_RTX means not always -- ie never. We shouldn't ever
10893 want to do that. */
10894 if (x == const_true_rtx)
10895 abort ();
10897 fputs (arm_condition_codes[ARM_INVERSE_CONDITION_CODE
10898 (get_arm_condition_code (x))],
10899 stream);
10900 return;
10902 /* Cirrus registers can be accessed in a variety of ways:
10903 single floating point (f)
10904 double floating point (d)
10905 32bit integer (fx)
10906 64bit integer (dx). */
10907 case 'W': /* Cirrus register in F mode. */
10908 case 'X': /* Cirrus register in D mode. */
10909 case 'Y': /* Cirrus register in FX mode. */
10910 case 'Z': /* Cirrus register in DX mode. */
10911 if (GET_CODE (x) != REG || REGNO_REG_CLASS (REGNO (x)) != CIRRUS_REGS)
10912 abort ();
10914 fprintf (stream, "mv%s%s",
10915 code == 'W' ? "f"
10916 : code == 'X' ? "d"
10917 : code == 'Y' ? "fx" : "dx", reg_names[REGNO (x)] + 2);
10919 return;
10921 /* Print cirrus register in the mode specified by the register's mode. */
10922 case 'V':
10924 int mode = GET_MODE (x);
10926 if (GET_CODE (x) != REG || REGNO_REG_CLASS (REGNO (x)) != CIRRUS_REGS)
10927 abort ();
10929 fprintf (stream, "mv%s%s",
10930 mode == DFmode ? "d"
10931 : mode == SImode ? "fx"
10932 : mode == DImode ? "dx"
10933 : "f", reg_names[REGNO (x)] + 2);
10935 return;
10938 case 'U':
10939 if (GET_CODE (x) != REG
10940 || REGNO (x) < FIRST_IWMMXT_GR_REGNUM
10941 || REGNO (x) > LAST_IWMMXT_GR_REGNUM)
10942 /* Bad value for wCG register number. */
10943 abort ();
10944 else
10945 fprintf (stream, "%d", REGNO (x) - FIRST_IWMMXT_GR_REGNUM);
10946 return;
10948 /* Print an iWMMXt control register name. */
10949 case 'w':
10950 if (GET_CODE (x) != CONST_INT
10951 || INTVAL (x) < 0
10952 || INTVAL (x) >= 16)
10953 /* Bad value for wC register number. */
10954 abort ();
10955 else
10957 static const char * wc_reg_names [16] =
10959 "wCID", "wCon", "wCSSF", "wCASF",
10960 "wC4", "wC5", "wC6", "wC7",
10961 "wCGR0", "wCGR1", "wCGR2", "wCGR3",
10962 "wC12", "wC13", "wC14", "wC15"
10965 fprintf (stream, wc_reg_names [INTVAL (x)]);
10967 return;
10969 /* Print a VFP double precision register name. */
10970 case 'P':
10972 int mode = GET_MODE (x);
10973 int num;
10975 if (mode != DImode && mode != DFmode)
10976 abort ();
10978 if (GET_CODE (x) != REG
10979 || !IS_VFP_REGNUM (REGNO (x)))
10980 abort ();
10982 num = REGNO(x) - FIRST_VFP_REGNUM;
10983 if (num & 1)
10984 abort ();
10986 fprintf (stream, "d%d", num >> 1);
10988 return;
10990 default:
10991 if (x == 0)
10992 abort ();
10994 if (GET_CODE (x) == REG)
10995 asm_fprintf (stream, "%r", REGNO (x));
10996 else if (GET_CODE (x) == MEM)
10998 output_memory_reference_mode = GET_MODE (x);
10999 output_address (XEXP (x, 0));
11001 else if (GET_CODE (x) == CONST_DOUBLE)
11002 fprintf (stream, "#%s", fp_immediate_constant (x));
11003 else if (GET_CODE (x) == NEG)
11004 abort (); /* This should never happen now. */
11005 else
11007 fputc ('#', stream);
11008 output_addr_const (stream, x);
11013 #ifndef AOF_ASSEMBLER
11014 /* Target hook for assembling integer objects. The ARM version needs to
11015 handle word-sized values specially. */
11016 static bool
11017 arm_assemble_integer (rtx x, unsigned int size, int aligned_p)
11019 if (size == UNITS_PER_WORD && aligned_p)
11021 fputs ("\t.word\t", asm_out_file);
11022 output_addr_const (asm_out_file, x);
11024 /* Mark symbols as position independent. We only do this in the
11025 .text segment, not in the .data segment. */
11026 if (NEED_GOT_RELOC && flag_pic && making_const_table &&
11027 (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF))
11029 if (GET_CODE (x) == SYMBOL_REF
11030 && (CONSTANT_POOL_ADDRESS_P (x)
11031 || SYMBOL_REF_LOCAL_P (x)))
11032 fputs ("(GOTOFF)", asm_out_file);
11033 else if (GET_CODE (x) == LABEL_REF)
11034 fputs ("(GOTOFF)", asm_out_file);
11035 else
11036 fputs ("(GOT)", asm_out_file);
11038 fputc ('\n', asm_out_file);
11039 return true;
11042 if (VECTOR_MODE_SUPPORTED_P (GET_MODE (x)))
11044 int i, units;
11046 if (GET_CODE (x) != CONST_VECTOR)
11047 abort ();
11049 units = CONST_VECTOR_NUNITS (x);
11051 switch (GET_MODE (x))
11053 case V2SImode: size = 4; break;
11054 case V4HImode: size = 2; break;
11055 case V8QImode: size = 1; break;
11056 default:
11057 abort ();
11060 for (i = 0; i < units; i++)
11062 rtx elt;
11064 elt = CONST_VECTOR_ELT (x, i);
11065 assemble_integer
11066 (elt, size, i == 0 ? BIGGEST_ALIGNMENT : size * BITS_PER_UNIT, 1);
11069 return true;
11072 return default_assemble_integer (x, size, aligned_p);
11074 #endif
11076 /* A finite state machine takes care of noticing whether or not instructions
11077 can be conditionally executed, and thus decrease execution time and code
11078 size by deleting branch instructions. The fsm is controlled by
11079 final_prescan_insn, and controls the actions of ASM_OUTPUT_OPCODE. */
11081 /* The state of the fsm controlling condition codes are:
11082 0: normal, do nothing special
11083 1: make ASM_OUTPUT_OPCODE not output this instruction
11084 2: make ASM_OUTPUT_OPCODE not output this instruction
11085 3: make instructions conditional
11086 4: make instructions conditional
11088 State transitions (state->state by whom under condition):
11089 0 -> 1 final_prescan_insn if the `target' is a label
11090 0 -> 2 final_prescan_insn if the `target' is an unconditional branch
11091 1 -> 3 ASM_OUTPUT_OPCODE after not having output the conditional branch
11092 2 -> 4 ASM_OUTPUT_OPCODE after not having output the conditional branch
11093 3 -> 0 (*targetm.asm_out.internal_label) if the `target' label is reached
11094 (the target label has CODE_LABEL_NUMBER equal to arm_target_label).
11095 4 -> 0 final_prescan_insn if the `target' unconditional branch is reached
11096 (the target insn is arm_target_insn).
11098 If the jump clobbers the conditions then we use states 2 and 4.
11100 A similar thing can be done with conditional return insns.
11102 XXX In case the `target' is an unconditional branch, this conditionalising
11103 of the instructions always reduces code size, but not always execution
11104 time. But then, I want to reduce the code size to somewhere near what
11105 /bin/cc produces. */
11107 /* Returns the index of the ARM condition code string in
11108 `arm_condition_codes'. COMPARISON should be an rtx like
11109 `(eq (...) (...))'. */
11110 static enum arm_cond_code
11111 get_arm_condition_code (rtx comparison)
11113 enum machine_mode mode = GET_MODE (XEXP (comparison, 0));
11114 int code;
11115 enum rtx_code comp_code = GET_CODE (comparison);
11117 if (GET_MODE_CLASS (mode) != MODE_CC)
11118 mode = SELECT_CC_MODE (comp_code, XEXP (comparison, 0),
11119 XEXP (comparison, 1));
11121 switch (mode)
11123 case CC_DNEmode: code = ARM_NE; goto dominance;
11124 case CC_DEQmode: code = ARM_EQ; goto dominance;
11125 case CC_DGEmode: code = ARM_GE; goto dominance;
11126 case CC_DGTmode: code = ARM_GT; goto dominance;
11127 case CC_DLEmode: code = ARM_LE; goto dominance;
11128 case CC_DLTmode: code = ARM_LT; goto dominance;
11129 case CC_DGEUmode: code = ARM_CS; goto dominance;
11130 case CC_DGTUmode: code = ARM_HI; goto dominance;
11131 case CC_DLEUmode: code = ARM_LS; goto dominance;
11132 case CC_DLTUmode: code = ARM_CC;
11134 dominance:
11135 if (comp_code != EQ && comp_code != NE)
11136 abort ();
11138 if (comp_code == EQ)
11139 return ARM_INVERSE_CONDITION_CODE (code);
11140 return code;
11142 case CC_NOOVmode:
11143 switch (comp_code)
11145 case NE: return ARM_NE;
11146 case EQ: return ARM_EQ;
11147 case GE: return ARM_PL;
11148 case LT: return ARM_MI;
11149 default: abort ();
11152 case CC_Zmode:
11153 switch (comp_code)
11155 case NE: return ARM_NE;
11156 case EQ: return ARM_EQ;
11157 default: abort ();
11160 case CC_Nmode:
11161 switch (comp_code)
11163 case NE: return ARM_MI;
11164 case EQ: return ARM_PL;
11165 default: abort ();
11168 case CCFPEmode:
11169 case CCFPmode:
11170 /* These encodings assume that AC=1 in the FPA system control
11171 byte. This allows us to handle all cases except UNEQ and
11172 LTGT. */
11173 switch (comp_code)
11175 case GE: return ARM_GE;
11176 case GT: return ARM_GT;
11177 case LE: return ARM_LS;
11178 case LT: return ARM_MI;
11179 case NE: return ARM_NE;
11180 case EQ: return ARM_EQ;
11181 case ORDERED: return ARM_VC;
11182 case UNORDERED: return ARM_VS;
11183 case UNLT: return ARM_LT;
11184 case UNLE: return ARM_LE;
11185 case UNGT: return ARM_HI;
11186 case UNGE: return ARM_PL;
11187 /* UNEQ and LTGT do not have a representation. */
11188 case UNEQ: /* Fall through. */
11189 case LTGT: /* Fall through. */
11190 default: abort ();
11193 case CC_SWPmode:
11194 switch (comp_code)
11196 case NE: return ARM_NE;
11197 case EQ: return ARM_EQ;
11198 case GE: return ARM_LE;
11199 case GT: return ARM_LT;
11200 case LE: return ARM_GE;
11201 case LT: return ARM_GT;
11202 case GEU: return ARM_LS;
11203 case GTU: return ARM_CC;
11204 case LEU: return ARM_CS;
11205 case LTU: return ARM_HI;
11206 default: abort ();
11209 case CC_Cmode:
11210 switch (comp_code)
11212 case LTU: return ARM_CS;
11213 case GEU: return ARM_CC;
11214 default: abort ();
11217 case CCmode:
11218 switch (comp_code)
11220 case NE: return ARM_NE;
11221 case EQ: return ARM_EQ;
11222 case GE: return ARM_GE;
11223 case GT: return ARM_GT;
11224 case LE: return ARM_LE;
11225 case LT: return ARM_LT;
11226 case GEU: return ARM_CS;
11227 case GTU: return ARM_HI;
11228 case LEU: return ARM_LS;
11229 case LTU: return ARM_CC;
11230 default: abort ();
11233 default: abort ();
11236 abort ();
11239 void
11240 arm_final_prescan_insn (rtx insn)
11242 /* BODY will hold the body of INSN. */
11243 rtx body = PATTERN (insn);
11245 /* This will be 1 if trying to repeat the trick, and things need to be
11246 reversed if it appears to fail. */
11247 int reverse = 0;
11249 /* JUMP_CLOBBERS will be one implies that the conditions if a branch is
11250 taken are clobbered, even if the rtl suggests otherwise. It also
11251 means that we have to grub around within the jump expression to find
11252 out what the conditions are when the jump isn't taken. */
11253 int jump_clobbers = 0;
11255 /* If we start with a return insn, we only succeed if we find another one. */
11256 int seeking_return = 0;
11258 /* START_INSN will hold the insn from where we start looking. This is the
11259 first insn after the following code_label if REVERSE is true. */
11260 rtx start_insn = insn;
11262 /* If in state 4, check if the target branch is reached, in order to
11263 change back to state 0. */
11264 if (arm_ccfsm_state == 4)
11266 if (insn == arm_target_insn)
11268 arm_target_insn = NULL;
11269 arm_ccfsm_state = 0;
11271 return;
11274 /* If in state 3, it is possible to repeat the trick, if this insn is an
11275 unconditional branch to a label, and immediately following this branch
11276 is the previous target label which is only used once, and the label this
11277 branch jumps to is not too far off. */
11278 if (arm_ccfsm_state == 3)
11280 if (simplejump_p (insn))
11282 start_insn = next_nonnote_insn (start_insn);
11283 if (GET_CODE (start_insn) == BARRIER)
11285 /* XXX Isn't this always a barrier? */
11286 start_insn = next_nonnote_insn (start_insn);
11288 if (GET_CODE (start_insn) == CODE_LABEL
11289 && CODE_LABEL_NUMBER (start_insn) == arm_target_label
11290 && LABEL_NUSES (start_insn) == 1)
11291 reverse = TRUE;
11292 else
11293 return;
11295 else if (GET_CODE (body) == RETURN)
11297 start_insn = next_nonnote_insn (start_insn);
11298 if (GET_CODE (start_insn) == BARRIER)
11299 start_insn = next_nonnote_insn (start_insn);
11300 if (GET_CODE (start_insn) == CODE_LABEL
11301 && CODE_LABEL_NUMBER (start_insn) == arm_target_label
11302 && LABEL_NUSES (start_insn) == 1)
11304 reverse = TRUE;
11305 seeking_return = 1;
11307 else
11308 return;
11310 else
11311 return;
11314 if (arm_ccfsm_state != 0 && !reverse)
11315 abort ();
11316 if (GET_CODE (insn) != JUMP_INSN)
11317 return;
11319 /* This jump might be paralleled with a clobber of the condition codes
11320 the jump should always come first */
11321 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) > 0)
11322 body = XVECEXP (body, 0, 0);
11324 if (reverse
11325 || (GET_CODE (body) == SET && GET_CODE (SET_DEST (body)) == PC
11326 && GET_CODE (SET_SRC (body)) == IF_THEN_ELSE))
11328 int insns_skipped;
11329 int fail = FALSE, succeed = FALSE;
11330 /* Flag which part of the IF_THEN_ELSE is the LABEL_REF. */
11331 int then_not_else = TRUE;
11332 rtx this_insn = start_insn, label = 0;
11334 /* If the jump cannot be done with one instruction, we cannot
11335 conditionally execute the instruction in the inverse case. */
11336 if (get_attr_conds (insn) == CONDS_JUMP_CLOB)
11338 jump_clobbers = 1;
11339 return;
11342 /* Register the insn jumped to. */
11343 if (reverse)
11345 if (!seeking_return)
11346 label = XEXP (SET_SRC (body), 0);
11348 else if (GET_CODE (XEXP (SET_SRC (body), 1)) == LABEL_REF)
11349 label = XEXP (XEXP (SET_SRC (body), 1), 0);
11350 else if (GET_CODE (XEXP (SET_SRC (body), 2)) == LABEL_REF)
11352 label = XEXP (XEXP (SET_SRC (body), 2), 0);
11353 then_not_else = FALSE;
11355 else if (GET_CODE (XEXP (SET_SRC (body), 1)) == RETURN)
11356 seeking_return = 1;
11357 else if (GET_CODE (XEXP (SET_SRC (body), 2)) == RETURN)
11359 seeking_return = 1;
11360 then_not_else = FALSE;
11362 else
11363 abort ();
11365 /* See how many insns this branch skips, and what kind of insns. If all
11366 insns are okay, and the label or unconditional branch to the same
11367 label is not too far away, succeed. */
11368 for (insns_skipped = 0;
11369 !fail && !succeed && insns_skipped++ < max_insns_skipped;)
11371 rtx scanbody;
11373 this_insn = next_nonnote_insn (this_insn);
11374 if (!this_insn)
11375 break;
11377 switch (GET_CODE (this_insn))
11379 case CODE_LABEL:
11380 /* Succeed if it is the target label, otherwise fail since
11381 control falls in from somewhere else. */
11382 if (this_insn == label)
11384 if (jump_clobbers)
11386 arm_ccfsm_state = 2;
11387 this_insn = next_nonnote_insn (this_insn);
11389 else
11390 arm_ccfsm_state = 1;
11391 succeed = TRUE;
11393 else
11394 fail = TRUE;
11395 break;
11397 case BARRIER:
11398 /* Succeed if the following insn is the target label.
11399 Otherwise fail.
11400 If return insns are used then the last insn in a function
11401 will be a barrier. */
11402 this_insn = next_nonnote_insn (this_insn);
11403 if (this_insn && this_insn == label)
11405 if (jump_clobbers)
11407 arm_ccfsm_state = 2;
11408 this_insn = next_nonnote_insn (this_insn);
11410 else
11411 arm_ccfsm_state = 1;
11412 succeed = TRUE;
11414 else
11415 fail = TRUE;
11416 break;
11418 case CALL_INSN:
11419 /* The AAPCS says that conditional calls should not be
11420 used since they make interworking inefficient (the
11421 linker can't transform BL<cond> into BLX). That's
11422 only a problem if the machine has BLX. */
11423 if (arm_arch5)
11425 fail = TRUE;
11426 break;
11429 /* Succeed if the following insn is the target label, or
11430 if the following two insns are a barrier and the
11431 target label. */
11432 this_insn = next_nonnote_insn (this_insn);
11433 if (this_insn && GET_CODE (this_insn) == BARRIER)
11434 this_insn = next_nonnote_insn (this_insn);
11436 if (this_insn && this_insn == label
11437 && insns_skipped < max_insns_skipped)
11439 if (jump_clobbers)
11441 arm_ccfsm_state = 2;
11442 this_insn = next_nonnote_insn (this_insn);
11444 else
11445 arm_ccfsm_state = 1;
11446 succeed = TRUE;
11448 else
11449 fail = TRUE;
11450 break;
11452 case JUMP_INSN:
11453 /* If this is an unconditional branch to the same label, succeed.
11454 If it is to another label, do nothing. If it is conditional,
11455 fail. */
11456 /* XXX Probably, the tests for SET and the PC are
11457 unnecessary. */
11459 scanbody = PATTERN (this_insn);
11460 if (GET_CODE (scanbody) == SET
11461 && GET_CODE (SET_DEST (scanbody)) == PC)
11463 if (GET_CODE (SET_SRC (scanbody)) == LABEL_REF
11464 && XEXP (SET_SRC (scanbody), 0) == label && !reverse)
11466 arm_ccfsm_state = 2;
11467 succeed = TRUE;
11469 else if (GET_CODE (SET_SRC (scanbody)) == IF_THEN_ELSE)
11470 fail = TRUE;
11472 /* Fail if a conditional return is undesirable (eg on a
11473 StrongARM), but still allow this if optimizing for size. */
11474 else if (GET_CODE (scanbody) == RETURN
11475 && !use_return_insn (TRUE, NULL)
11476 && !optimize_size)
11477 fail = TRUE;
11478 else if (GET_CODE (scanbody) == RETURN
11479 && seeking_return)
11481 arm_ccfsm_state = 2;
11482 succeed = TRUE;
11484 else if (GET_CODE (scanbody) == PARALLEL)
11486 switch (get_attr_conds (this_insn))
11488 case CONDS_NOCOND:
11489 break;
11490 default:
11491 fail = TRUE;
11492 break;
11495 else
11496 fail = TRUE; /* Unrecognized jump (eg epilogue). */
11498 break;
11500 case INSN:
11501 /* Instructions using or affecting the condition codes make it
11502 fail. */
11503 scanbody = PATTERN (this_insn);
11504 if (!(GET_CODE (scanbody) == SET
11505 || GET_CODE (scanbody) == PARALLEL)
11506 || get_attr_conds (this_insn) != CONDS_NOCOND)
11507 fail = TRUE;
11509 /* A conditional cirrus instruction must be followed by
11510 a non Cirrus instruction. However, since we
11511 conditionalize instructions in this function and by
11512 the time we get here we can't add instructions
11513 (nops), because shorten_branches() has already been
11514 called, we will disable conditionalizing Cirrus
11515 instructions to be safe. */
11516 if (GET_CODE (scanbody) != USE
11517 && GET_CODE (scanbody) != CLOBBER
11518 && get_attr_cirrus (this_insn) != CIRRUS_NOT)
11519 fail = TRUE;
11520 break;
11522 default:
11523 break;
11526 if (succeed)
11528 if ((!seeking_return) && (arm_ccfsm_state == 1 || reverse))
11529 arm_target_label = CODE_LABEL_NUMBER (label);
11530 else if (seeking_return || arm_ccfsm_state == 2)
11532 while (this_insn && GET_CODE (PATTERN (this_insn)) == USE)
11534 this_insn = next_nonnote_insn (this_insn);
11535 if (this_insn && (GET_CODE (this_insn) == BARRIER
11536 || GET_CODE (this_insn) == CODE_LABEL))
11537 abort ();
11539 if (!this_insn)
11541 /* Oh, dear! we ran off the end.. give up. */
11542 recog (PATTERN (insn), insn, NULL);
11543 arm_ccfsm_state = 0;
11544 arm_target_insn = NULL;
11545 return;
11547 arm_target_insn = this_insn;
11549 else
11550 abort ();
11551 if (jump_clobbers)
11553 if (reverse)
11554 abort ();
11555 arm_current_cc =
11556 get_arm_condition_code (XEXP (XEXP (XEXP (SET_SRC (body),
11557 0), 0), 1));
11558 if (GET_CODE (XEXP (XEXP (SET_SRC (body), 0), 0)) == AND)
11559 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
11560 if (GET_CODE (XEXP (SET_SRC (body), 0)) == NE)
11561 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
11563 else
11565 /* If REVERSE is true, ARM_CURRENT_CC needs to be inverted from
11566 what it was. */
11567 if (!reverse)
11568 arm_current_cc = get_arm_condition_code (XEXP (SET_SRC (body),
11569 0));
11572 if (reverse || then_not_else)
11573 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
11576 /* Restore recog_data (getting the attributes of other insns can
11577 destroy this array, but final.c assumes that it remains intact
11578 across this call; since the insn has been recognized already we
11579 call recog direct). */
11580 recog (PATTERN (insn), insn, NULL);
11584 /* Returns true if REGNO is a valid register
11585 for holding a quantity of tyoe MODE. */
11587 arm_hard_regno_mode_ok (unsigned int regno, enum machine_mode mode)
11589 if (GET_MODE_CLASS (mode) == MODE_CC)
11590 return regno == CC_REGNUM || regno == VFPCC_REGNUM;
11592 if (TARGET_THUMB)
11593 /* For the Thumb we only allow values bigger than SImode in
11594 registers 0 - 6, so that there is always a second low
11595 register available to hold the upper part of the value.
11596 We probably we ought to ensure that the register is the
11597 start of an even numbered register pair. */
11598 return (ARM_NUM_REGS (mode) < 2) || (regno < LAST_LO_REGNUM);
11600 if (IS_CIRRUS_REGNUM (regno))
11601 /* We have outlawed SI values in Cirrus registers because they
11602 reside in the lower 32 bits, but SF values reside in the
11603 upper 32 bits. This causes gcc all sorts of grief. We can't
11604 even split the registers into pairs because Cirrus SI values
11605 get sign extended to 64bits-- aldyh. */
11606 return (GET_MODE_CLASS (mode) == MODE_FLOAT) || (mode == DImode);
11608 if (IS_VFP_REGNUM (regno))
11610 if (mode == SFmode || mode == SImode)
11611 return TRUE;
11613 /* DFmode values are only valid in even register pairs. */
11614 if (mode == DFmode)
11615 return ((regno - FIRST_VFP_REGNUM) & 1) == 0;
11616 return FALSE;
11619 if (IS_IWMMXT_GR_REGNUM (regno))
11620 return mode == SImode;
11622 if (IS_IWMMXT_REGNUM (regno))
11623 return VALID_IWMMXT_REG_MODE (mode);
11625 /* We allow any value to be stored in the general registers.
11626 Restrict doubleword quantities to even register pairs so that we can
11627 use ldrd. */
11628 if (regno <= LAST_ARM_REGNUM)
11629 return !(TARGET_LDRD && GET_MODE_SIZE (mode) > 4 && (regno & 1) != 0);
11631 if ( regno == FRAME_POINTER_REGNUM
11632 || regno == ARG_POINTER_REGNUM)
11633 /* We only allow integers in the fake hard registers. */
11634 return GET_MODE_CLASS (mode) == MODE_INT;
11636 /* The only registers left are the FPA registers
11637 which we only allow to hold FP values. */
11638 return GET_MODE_CLASS (mode) == MODE_FLOAT
11639 && regno >= FIRST_FPA_REGNUM
11640 && regno <= LAST_FPA_REGNUM;
11644 arm_regno_class (int regno)
11646 if (TARGET_THUMB)
11648 if (regno == STACK_POINTER_REGNUM)
11649 return STACK_REG;
11650 if (regno == CC_REGNUM)
11651 return CC_REG;
11652 if (regno < 8)
11653 return LO_REGS;
11654 return HI_REGS;
11657 if ( regno <= LAST_ARM_REGNUM
11658 || regno == FRAME_POINTER_REGNUM
11659 || regno == ARG_POINTER_REGNUM)
11660 return GENERAL_REGS;
11662 if (regno == CC_REGNUM || regno == VFPCC_REGNUM)
11663 return NO_REGS;
11665 if (IS_CIRRUS_REGNUM (regno))
11666 return CIRRUS_REGS;
11668 if (IS_VFP_REGNUM (regno))
11669 return VFP_REGS;
11671 if (IS_IWMMXT_REGNUM (regno))
11672 return IWMMXT_REGS;
11674 if (IS_IWMMXT_GR_REGNUM (regno))
11675 return IWMMXT_GR_REGS;
11677 return FPA_REGS;
11680 /* Handle a special case when computing the offset
11681 of an argument from the frame pointer. */
11683 arm_debugger_arg_offset (int value, rtx addr)
11685 rtx insn;
11687 /* We are only interested if dbxout_parms() failed to compute the offset. */
11688 if (value != 0)
11689 return 0;
11691 /* We can only cope with the case where the address is held in a register. */
11692 if (GET_CODE (addr) != REG)
11693 return 0;
11695 /* If we are using the frame pointer to point at the argument, then
11696 an offset of 0 is correct. */
11697 if (REGNO (addr) == (unsigned) HARD_FRAME_POINTER_REGNUM)
11698 return 0;
11700 /* If we are using the stack pointer to point at the
11701 argument, then an offset of 0 is correct. */
11702 if ((TARGET_THUMB || !frame_pointer_needed)
11703 && REGNO (addr) == SP_REGNUM)
11704 return 0;
11706 /* Oh dear. The argument is pointed to by a register rather
11707 than being held in a register, or being stored at a known
11708 offset from the frame pointer. Since GDB only understands
11709 those two kinds of argument we must translate the address
11710 held in the register into an offset from the frame pointer.
11711 We do this by searching through the insns for the function
11712 looking to see where this register gets its value. If the
11713 register is initialized from the frame pointer plus an offset
11714 then we are in luck and we can continue, otherwise we give up.
11716 This code is exercised by producing debugging information
11717 for a function with arguments like this:
11719 double func (double a, double b, int c, double d) {return d;}
11721 Without this code the stab for parameter 'd' will be set to
11722 an offset of 0 from the frame pointer, rather than 8. */
11724 /* The if() statement says:
11726 If the insn is a normal instruction
11727 and if the insn is setting the value in a register
11728 and if the register being set is the register holding the address of the argument
11729 and if the address is computing by an addition
11730 that involves adding to a register
11731 which is the frame pointer
11732 a constant integer
11734 then... */
11736 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
11738 if ( GET_CODE (insn) == INSN
11739 && GET_CODE (PATTERN (insn)) == SET
11740 && REGNO (XEXP (PATTERN (insn), 0)) == REGNO (addr)
11741 && GET_CODE (XEXP (PATTERN (insn), 1)) == PLUS
11742 && GET_CODE (XEXP (XEXP (PATTERN (insn), 1), 0)) == REG
11743 && REGNO (XEXP (XEXP (PATTERN (insn), 1), 0)) == (unsigned) HARD_FRAME_POINTER_REGNUM
11744 && GET_CODE (XEXP (XEXP (PATTERN (insn), 1), 1)) == CONST_INT
11747 value = INTVAL (XEXP (XEXP (PATTERN (insn), 1), 1));
11749 break;
11753 if (value == 0)
11755 debug_rtx (addr);
11756 warning ("unable to compute real location of stacked parameter");
11757 value = 8; /* XXX magic hack */
11760 return value;
11763 #define def_mbuiltin(MASK, NAME, TYPE, CODE) \
11764 do \
11766 if ((MASK) & insn_flags) \
11767 lang_hooks.builtin_function ((NAME), (TYPE), (CODE), \
11768 BUILT_IN_MD, NULL, NULL_TREE); \
11770 while (0)
11772 struct builtin_description
11774 const unsigned int mask;
11775 const enum insn_code icode;
11776 const char * const name;
11777 const enum arm_builtins code;
11778 const enum rtx_code comparison;
11779 const unsigned int flag;
11782 static const struct builtin_description bdesc_2arg[] =
11784 #define IWMMXT_BUILTIN(code, string, builtin) \
11785 { FL_IWMMXT, CODE_FOR_##code, "__builtin_arm_" string, \
11786 ARM_BUILTIN_##builtin, 0, 0 },
11788 IWMMXT_BUILTIN (addv8qi3, "waddb", WADDB)
11789 IWMMXT_BUILTIN (addv4hi3, "waddh", WADDH)
11790 IWMMXT_BUILTIN (addv2si3, "waddw", WADDW)
11791 IWMMXT_BUILTIN (subv8qi3, "wsubb", WSUBB)
11792 IWMMXT_BUILTIN (subv4hi3, "wsubh", WSUBH)
11793 IWMMXT_BUILTIN (subv2si3, "wsubw", WSUBW)
11794 IWMMXT_BUILTIN (ssaddv8qi3, "waddbss", WADDSSB)
11795 IWMMXT_BUILTIN (ssaddv4hi3, "waddhss", WADDSSH)
11796 IWMMXT_BUILTIN (ssaddv2si3, "waddwss", WADDSSW)
11797 IWMMXT_BUILTIN (sssubv8qi3, "wsubbss", WSUBSSB)
11798 IWMMXT_BUILTIN (sssubv4hi3, "wsubhss", WSUBSSH)
11799 IWMMXT_BUILTIN (sssubv2si3, "wsubwss", WSUBSSW)
11800 IWMMXT_BUILTIN (usaddv8qi3, "waddbus", WADDUSB)
11801 IWMMXT_BUILTIN (usaddv4hi3, "waddhus", WADDUSH)
11802 IWMMXT_BUILTIN (usaddv2si3, "waddwus", WADDUSW)
11803 IWMMXT_BUILTIN (ussubv8qi3, "wsubbus", WSUBUSB)
11804 IWMMXT_BUILTIN (ussubv4hi3, "wsubhus", WSUBUSH)
11805 IWMMXT_BUILTIN (ussubv2si3, "wsubwus", WSUBUSW)
11806 IWMMXT_BUILTIN (mulv4hi3, "wmulul", WMULUL)
11807 IWMMXT_BUILTIN (smulv4hi3_highpart, "wmulsm", WMULSM)
11808 IWMMXT_BUILTIN (umulv4hi3_highpart, "wmulum", WMULUM)
11809 IWMMXT_BUILTIN (eqv8qi3, "wcmpeqb", WCMPEQB)
11810 IWMMXT_BUILTIN (eqv4hi3, "wcmpeqh", WCMPEQH)
11811 IWMMXT_BUILTIN (eqv2si3, "wcmpeqw", WCMPEQW)
11812 IWMMXT_BUILTIN (gtuv8qi3, "wcmpgtub", WCMPGTUB)
11813 IWMMXT_BUILTIN (gtuv4hi3, "wcmpgtuh", WCMPGTUH)
11814 IWMMXT_BUILTIN (gtuv2si3, "wcmpgtuw", WCMPGTUW)
11815 IWMMXT_BUILTIN (gtv8qi3, "wcmpgtsb", WCMPGTSB)
11816 IWMMXT_BUILTIN (gtv4hi3, "wcmpgtsh", WCMPGTSH)
11817 IWMMXT_BUILTIN (gtv2si3, "wcmpgtsw", WCMPGTSW)
11818 IWMMXT_BUILTIN (umaxv8qi3, "wmaxub", WMAXUB)
11819 IWMMXT_BUILTIN (smaxv8qi3, "wmaxsb", WMAXSB)
11820 IWMMXT_BUILTIN (umaxv4hi3, "wmaxuh", WMAXUH)
11821 IWMMXT_BUILTIN (smaxv4hi3, "wmaxsh", WMAXSH)
11822 IWMMXT_BUILTIN (umaxv2si3, "wmaxuw", WMAXUW)
11823 IWMMXT_BUILTIN (smaxv2si3, "wmaxsw", WMAXSW)
11824 IWMMXT_BUILTIN (uminv8qi3, "wminub", WMINUB)
11825 IWMMXT_BUILTIN (sminv8qi3, "wminsb", WMINSB)
11826 IWMMXT_BUILTIN (uminv4hi3, "wminuh", WMINUH)
11827 IWMMXT_BUILTIN (sminv4hi3, "wminsh", WMINSH)
11828 IWMMXT_BUILTIN (uminv2si3, "wminuw", WMINUW)
11829 IWMMXT_BUILTIN (sminv2si3, "wminsw", WMINSW)
11830 IWMMXT_BUILTIN (iwmmxt_anddi3, "wand", WAND)
11831 IWMMXT_BUILTIN (iwmmxt_nanddi3, "wandn", WANDN)
11832 IWMMXT_BUILTIN (iwmmxt_iordi3, "wor", WOR)
11833 IWMMXT_BUILTIN (iwmmxt_xordi3, "wxor", WXOR)
11834 IWMMXT_BUILTIN (iwmmxt_uavgv8qi3, "wavg2b", WAVG2B)
11835 IWMMXT_BUILTIN (iwmmxt_uavgv4hi3, "wavg2h", WAVG2H)
11836 IWMMXT_BUILTIN (iwmmxt_uavgrndv8qi3, "wavg2br", WAVG2BR)
11837 IWMMXT_BUILTIN (iwmmxt_uavgrndv4hi3, "wavg2hr", WAVG2HR)
11838 IWMMXT_BUILTIN (iwmmxt_wunpckilb, "wunpckilb", WUNPCKILB)
11839 IWMMXT_BUILTIN (iwmmxt_wunpckilh, "wunpckilh", WUNPCKILH)
11840 IWMMXT_BUILTIN (iwmmxt_wunpckilw, "wunpckilw", WUNPCKILW)
11841 IWMMXT_BUILTIN (iwmmxt_wunpckihb, "wunpckihb", WUNPCKIHB)
11842 IWMMXT_BUILTIN (iwmmxt_wunpckihh, "wunpckihh", WUNPCKIHH)
11843 IWMMXT_BUILTIN (iwmmxt_wunpckihw, "wunpckihw", WUNPCKIHW)
11844 IWMMXT_BUILTIN (iwmmxt_wmadds, "wmadds", WMADDS)
11845 IWMMXT_BUILTIN (iwmmxt_wmaddu, "wmaddu", WMADDU)
11847 #define IWMMXT_BUILTIN2(code, builtin) \
11848 { FL_IWMMXT, CODE_FOR_##code, NULL, ARM_BUILTIN_##builtin, 0, 0 },
11850 IWMMXT_BUILTIN2 (iwmmxt_wpackhss, WPACKHSS)
11851 IWMMXT_BUILTIN2 (iwmmxt_wpackwss, WPACKWSS)
11852 IWMMXT_BUILTIN2 (iwmmxt_wpackdss, WPACKDSS)
11853 IWMMXT_BUILTIN2 (iwmmxt_wpackhus, WPACKHUS)
11854 IWMMXT_BUILTIN2 (iwmmxt_wpackwus, WPACKWUS)
11855 IWMMXT_BUILTIN2 (iwmmxt_wpackdus, WPACKDUS)
11856 IWMMXT_BUILTIN2 (ashlv4hi3_di, WSLLH)
11857 IWMMXT_BUILTIN2 (ashlv4hi3, WSLLHI)
11858 IWMMXT_BUILTIN2 (ashlv2si3_di, WSLLW)
11859 IWMMXT_BUILTIN2 (ashlv2si3, WSLLWI)
11860 IWMMXT_BUILTIN2 (ashldi3_di, WSLLD)
11861 IWMMXT_BUILTIN2 (ashldi3_iwmmxt, WSLLDI)
11862 IWMMXT_BUILTIN2 (lshrv4hi3_di, WSRLH)
11863 IWMMXT_BUILTIN2 (lshrv4hi3, WSRLHI)
11864 IWMMXT_BUILTIN2 (lshrv2si3_di, WSRLW)
11865 IWMMXT_BUILTIN2 (lshrv2si3, WSRLWI)
11866 IWMMXT_BUILTIN2 (lshrdi3_di, WSRLD)
11867 IWMMXT_BUILTIN2 (lshrdi3_iwmmxt, WSRLDI)
11868 IWMMXT_BUILTIN2 (ashrv4hi3_di, WSRAH)
11869 IWMMXT_BUILTIN2 (ashrv4hi3, WSRAHI)
11870 IWMMXT_BUILTIN2 (ashrv2si3_di, WSRAW)
11871 IWMMXT_BUILTIN2 (ashrv2si3, WSRAWI)
11872 IWMMXT_BUILTIN2 (ashrdi3_di, WSRAD)
11873 IWMMXT_BUILTIN2 (ashrdi3_iwmmxt, WSRADI)
11874 IWMMXT_BUILTIN2 (rorv4hi3_di, WRORH)
11875 IWMMXT_BUILTIN2 (rorv4hi3, WRORHI)
11876 IWMMXT_BUILTIN2 (rorv2si3_di, WRORW)
11877 IWMMXT_BUILTIN2 (rorv2si3, WRORWI)
11878 IWMMXT_BUILTIN2 (rordi3_di, WRORD)
11879 IWMMXT_BUILTIN2 (rordi3, WRORDI)
11880 IWMMXT_BUILTIN2 (iwmmxt_wmacuz, WMACUZ)
11881 IWMMXT_BUILTIN2 (iwmmxt_wmacsz, WMACSZ)
11884 static const struct builtin_description bdesc_1arg[] =
11886 IWMMXT_BUILTIN (iwmmxt_tmovmskb, "tmovmskb", TMOVMSKB)
11887 IWMMXT_BUILTIN (iwmmxt_tmovmskh, "tmovmskh", TMOVMSKH)
11888 IWMMXT_BUILTIN (iwmmxt_tmovmskw, "tmovmskw", TMOVMSKW)
11889 IWMMXT_BUILTIN (iwmmxt_waccb, "waccb", WACCB)
11890 IWMMXT_BUILTIN (iwmmxt_wacch, "wacch", WACCH)
11891 IWMMXT_BUILTIN (iwmmxt_waccw, "waccw", WACCW)
11892 IWMMXT_BUILTIN (iwmmxt_wunpckehub, "wunpckehub", WUNPCKEHUB)
11893 IWMMXT_BUILTIN (iwmmxt_wunpckehuh, "wunpckehuh", WUNPCKEHUH)
11894 IWMMXT_BUILTIN (iwmmxt_wunpckehuw, "wunpckehuw", WUNPCKEHUW)
11895 IWMMXT_BUILTIN (iwmmxt_wunpckehsb, "wunpckehsb", WUNPCKEHSB)
11896 IWMMXT_BUILTIN (iwmmxt_wunpckehsh, "wunpckehsh", WUNPCKEHSH)
11897 IWMMXT_BUILTIN (iwmmxt_wunpckehsw, "wunpckehsw", WUNPCKEHSW)
11898 IWMMXT_BUILTIN (iwmmxt_wunpckelub, "wunpckelub", WUNPCKELUB)
11899 IWMMXT_BUILTIN (iwmmxt_wunpckeluh, "wunpckeluh", WUNPCKELUH)
11900 IWMMXT_BUILTIN (iwmmxt_wunpckeluw, "wunpckeluw", WUNPCKELUW)
11901 IWMMXT_BUILTIN (iwmmxt_wunpckelsb, "wunpckelsb", WUNPCKELSB)
11902 IWMMXT_BUILTIN (iwmmxt_wunpckelsh, "wunpckelsh", WUNPCKELSH)
11903 IWMMXT_BUILTIN (iwmmxt_wunpckelsw, "wunpckelsw", WUNPCKELSW)
11906 /* Set up all the iWMMXt builtins. This is
11907 not called if TARGET_IWMMXT is zero. */
11909 static void
11910 arm_init_iwmmxt_builtins (void)
11912 const struct builtin_description * d;
11913 size_t i;
11914 tree endlink = void_list_node;
11916 tree V2SI_type_node = build_vector_type_for_mode (intSI_type_node, V2SImode);
11917 tree V4HI_type_node = build_vector_type_for_mode (intHI_type_node, V4HImode);
11918 tree V8QI_type_node = build_vector_type_for_mode (intQI_type_node, V8QImode);
11920 tree int_ftype_int
11921 = build_function_type (integer_type_node,
11922 tree_cons (NULL_TREE, integer_type_node, endlink));
11923 tree v8qi_ftype_v8qi_v8qi_int
11924 = build_function_type (V8QI_type_node,
11925 tree_cons (NULL_TREE, V8QI_type_node,
11926 tree_cons (NULL_TREE, V8QI_type_node,
11927 tree_cons (NULL_TREE,
11928 integer_type_node,
11929 endlink))));
11930 tree v4hi_ftype_v4hi_int
11931 = build_function_type (V4HI_type_node,
11932 tree_cons (NULL_TREE, V4HI_type_node,
11933 tree_cons (NULL_TREE, integer_type_node,
11934 endlink)));
11935 tree v2si_ftype_v2si_int
11936 = build_function_type (V2SI_type_node,
11937 tree_cons (NULL_TREE, V2SI_type_node,
11938 tree_cons (NULL_TREE, integer_type_node,
11939 endlink)));
11940 tree v2si_ftype_di_di
11941 = build_function_type (V2SI_type_node,
11942 tree_cons (NULL_TREE, long_long_integer_type_node,
11943 tree_cons (NULL_TREE, long_long_integer_type_node,
11944 endlink)));
11945 tree di_ftype_di_int
11946 = build_function_type (long_long_integer_type_node,
11947 tree_cons (NULL_TREE, long_long_integer_type_node,
11948 tree_cons (NULL_TREE, integer_type_node,
11949 endlink)));
11950 tree di_ftype_di_int_int
11951 = build_function_type (long_long_integer_type_node,
11952 tree_cons (NULL_TREE, long_long_integer_type_node,
11953 tree_cons (NULL_TREE, integer_type_node,
11954 tree_cons (NULL_TREE,
11955 integer_type_node,
11956 endlink))));
11957 tree int_ftype_v8qi
11958 = build_function_type (integer_type_node,
11959 tree_cons (NULL_TREE, V8QI_type_node,
11960 endlink));
11961 tree int_ftype_v4hi
11962 = build_function_type (integer_type_node,
11963 tree_cons (NULL_TREE, V4HI_type_node,
11964 endlink));
11965 tree int_ftype_v2si
11966 = build_function_type (integer_type_node,
11967 tree_cons (NULL_TREE, V2SI_type_node,
11968 endlink));
11969 tree int_ftype_v8qi_int
11970 = build_function_type (integer_type_node,
11971 tree_cons (NULL_TREE, V8QI_type_node,
11972 tree_cons (NULL_TREE, integer_type_node,
11973 endlink)));
11974 tree int_ftype_v4hi_int
11975 = build_function_type (integer_type_node,
11976 tree_cons (NULL_TREE, V4HI_type_node,
11977 tree_cons (NULL_TREE, integer_type_node,
11978 endlink)));
11979 tree int_ftype_v2si_int
11980 = build_function_type (integer_type_node,
11981 tree_cons (NULL_TREE, V2SI_type_node,
11982 tree_cons (NULL_TREE, integer_type_node,
11983 endlink)));
11984 tree v8qi_ftype_v8qi_int_int
11985 = build_function_type (V8QI_type_node,
11986 tree_cons (NULL_TREE, V8QI_type_node,
11987 tree_cons (NULL_TREE, integer_type_node,
11988 tree_cons (NULL_TREE,
11989 integer_type_node,
11990 endlink))));
11991 tree v4hi_ftype_v4hi_int_int
11992 = build_function_type (V4HI_type_node,
11993 tree_cons (NULL_TREE, V4HI_type_node,
11994 tree_cons (NULL_TREE, integer_type_node,
11995 tree_cons (NULL_TREE,
11996 integer_type_node,
11997 endlink))));
11998 tree v2si_ftype_v2si_int_int
11999 = build_function_type (V2SI_type_node,
12000 tree_cons (NULL_TREE, V2SI_type_node,
12001 tree_cons (NULL_TREE, integer_type_node,
12002 tree_cons (NULL_TREE,
12003 integer_type_node,
12004 endlink))));
12005 /* Miscellaneous. */
12006 tree v8qi_ftype_v4hi_v4hi
12007 = build_function_type (V8QI_type_node,
12008 tree_cons (NULL_TREE, V4HI_type_node,
12009 tree_cons (NULL_TREE, V4HI_type_node,
12010 endlink)));
12011 tree v4hi_ftype_v2si_v2si
12012 = build_function_type (V4HI_type_node,
12013 tree_cons (NULL_TREE, V2SI_type_node,
12014 tree_cons (NULL_TREE, V2SI_type_node,
12015 endlink)));
12016 tree v2si_ftype_v4hi_v4hi
12017 = build_function_type (V2SI_type_node,
12018 tree_cons (NULL_TREE, V4HI_type_node,
12019 tree_cons (NULL_TREE, V4HI_type_node,
12020 endlink)));
12021 tree v2si_ftype_v8qi_v8qi
12022 = build_function_type (V2SI_type_node,
12023 tree_cons (NULL_TREE, V8QI_type_node,
12024 tree_cons (NULL_TREE, V8QI_type_node,
12025 endlink)));
12026 tree v4hi_ftype_v4hi_di
12027 = build_function_type (V4HI_type_node,
12028 tree_cons (NULL_TREE, V4HI_type_node,
12029 tree_cons (NULL_TREE,
12030 long_long_integer_type_node,
12031 endlink)));
12032 tree v2si_ftype_v2si_di
12033 = build_function_type (V2SI_type_node,
12034 tree_cons (NULL_TREE, V2SI_type_node,
12035 tree_cons (NULL_TREE,
12036 long_long_integer_type_node,
12037 endlink)));
12038 tree void_ftype_int_int
12039 = build_function_type (void_type_node,
12040 tree_cons (NULL_TREE, integer_type_node,
12041 tree_cons (NULL_TREE, integer_type_node,
12042 endlink)));
12043 tree di_ftype_void
12044 = build_function_type (long_long_unsigned_type_node, endlink);
12045 tree di_ftype_v8qi
12046 = build_function_type (long_long_integer_type_node,
12047 tree_cons (NULL_TREE, V8QI_type_node,
12048 endlink));
12049 tree di_ftype_v4hi
12050 = build_function_type (long_long_integer_type_node,
12051 tree_cons (NULL_TREE, V4HI_type_node,
12052 endlink));
12053 tree di_ftype_v2si
12054 = build_function_type (long_long_integer_type_node,
12055 tree_cons (NULL_TREE, V2SI_type_node,
12056 endlink));
12057 tree v2si_ftype_v4hi
12058 = build_function_type (V2SI_type_node,
12059 tree_cons (NULL_TREE, V4HI_type_node,
12060 endlink));
12061 tree v4hi_ftype_v8qi
12062 = build_function_type (V4HI_type_node,
12063 tree_cons (NULL_TREE, V8QI_type_node,
12064 endlink));
12066 tree di_ftype_di_v4hi_v4hi
12067 = build_function_type (long_long_unsigned_type_node,
12068 tree_cons (NULL_TREE,
12069 long_long_unsigned_type_node,
12070 tree_cons (NULL_TREE, V4HI_type_node,
12071 tree_cons (NULL_TREE,
12072 V4HI_type_node,
12073 endlink))));
12075 tree di_ftype_v4hi_v4hi
12076 = build_function_type (long_long_unsigned_type_node,
12077 tree_cons (NULL_TREE, V4HI_type_node,
12078 tree_cons (NULL_TREE, V4HI_type_node,
12079 endlink)));
12081 /* Normal vector binops. */
12082 tree v8qi_ftype_v8qi_v8qi
12083 = build_function_type (V8QI_type_node,
12084 tree_cons (NULL_TREE, V8QI_type_node,
12085 tree_cons (NULL_TREE, V8QI_type_node,
12086 endlink)));
12087 tree v4hi_ftype_v4hi_v4hi
12088 = build_function_type (V4HI_type_node,
12089 tree_cons (NULL_TREE, V4HI_type_node,
12090 tree_cons (NULL_TREE, V4HI_type_node,
12091 endlink)));
12092 tree v2si_ftype_v2si_v2si
12093 = build_function_type (V2SI_type_node,
12094 tree_cons (NULL_TREE, V2SI_type_node,
12095 tree_cons (NULL_TREE, V2SI_type_node,
12096 endlink)));
12097 tree di_ftype_di_di
12098 = build_function_type (long_long_unsigned_type_node,
12099 tree_cons (NULL_TREE, long_long_unsigned_type_node,
12100 tree_cons (NULL_TREE,
12101 long_long_unsigned_type_node,
12102 endlink)));
12104 /* Add all builtins that are more or less simple operations on two
12105 operands. */
12106 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
12108 /* Use one of the operands; the target can have a different mode for
12109 mask-generating compares. */
12110 enum machine_mode mode;
12111 tree type;
12113 if (d->name == 0)
12114 continue;
12116 mode = insn_data[d->icode].operand[1].mode;
12118 switch (mode)
12120 case V8QImode:
12121 type = v8qi_ftype_v8qi_v8qi;
12122 break;
12123 case V4HImode:
12124 type = v4hi_ftype_v4hi_v4hi;
12125 break;
12126 case V2SImode:
12127 type = v2si_ftype_v2si_v2si;
12128 break;
12129 case DImode:
12130 type = di_ftype_di_di;
12131 break;
12133 default:
12134 abort ();
12137 def_mbuiltin (d->mask, d->name, type, d->code);
12140 /* Add the remaining MMX insns with somewhat more complicated types. */
12141 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wzero", di_ftype_void, ARM_BUILTIN_WZERO);
12142 def_mbuiltin (FL_IWMMXT, "__builtin_arm_setwcx", void_ftype_int_int, ARM_BUILTIN_SETWCX);
12143 def_mbuiltin (FL_IWMMXT, "__builtin_arm_getwcx", int_ftype_int, ARM_BUILTIN_GETWCX);
12145 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSLLH);
12146 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllw", v2si_ftype_v2si_di, ARM_BUILTIN_WSLLW);
12147 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wslld", di_ftype_di_di, ARM_BUILTIN_WSLLD);
12148 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSLLHI);
12149 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllwi", v2si_ftype_v2si_int, ARM_BUILTIN_WSLLWI);
12150 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wslldi", di_ftype_di_int, ARM_BUILTIN_WSLLDI);
12152 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSRLH);
12153 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlw", v2si_ftype_v2si_di, ARM_BUILTIN_WSRLW);
12154 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrld", di_ftype_di_di, ARM_BUILTIN_WSRLD);
12155 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSRLHI);
12156 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlwi", v2si_ftype_v2si_int, ARM_BUILTIN_WSRLWI);
12157 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrldi", di_ftype_di_int, ARM_BUILTIN_WSRLDI);
12159 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrah", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSRAH);
12160 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsraw", v2si_ftype_v2si_di, ARM_BUILTIN_WSRAW);
12161 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrad", di_ftype_di_di, ARM_BUILTIN_WSRAD);
12162 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrahi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSRAHI);
12163 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrawi", v2si_ftype_v2si_int, ARM_BUILTIN_WSRAWI);
12164 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsradi", di_ftype_di_int, ARM_BUILTIN_WSRADI);
12166 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WRORH);
12167 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorw", v2si_ftype_v2si_di, ARM_BUILTIN_WRORW);
12168 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrord", di_ftype_di_di, ARM_BUILTIN_WRORD);
12169 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WRORHI);
12170 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorwi", v2si_ftype_v2si_int, ARM_BUILTIN_WRORWI);
12171 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrordi", di_ftype_di_int, ARM_BUILTIN_WRORDI);
12173 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wshufh", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSHUFH);
12175 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadb", v2si_ftype_v8qi_v8qi, ARM_BUILTIN_WSADB);
12176 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadh", v2si_ftype_v4hi_v4hi, ARM_BUILTIN_WSADH);
12177 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadbz", v2si_ftype_v8qi_v8qi, ARM_BUILTIN_WSADBZ);
12178 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadhz", v2si_ftype_v4hi_v4hi, ARM_BUILTIN_WSADHZ);
12180 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsb", int_ftype_v8qi_int, ARM_BUILTIN_TEXTRMSB);
12181 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsh", int_ftype_v4hi_int, ARM_BUILTIN_TEXTRMSH);
12182 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsw", int_ftype_v2si_int, ARM_BUILTIN_TEXTRMSW);
12183 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmub", int_ftype_v8qi_int, ARM_BUILTIN_TEXTRMUB);
12184 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmuh", int_ftype_v4hi_int, ARM_BUILTIN_TEXTRMUH);
12185 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmuw", int_ftype_v2si_int, ARM_BUILTIN_TEXTRMUW);
12186 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrb", v8qi_ftype_v8qi_int_int, ARM_BUILTIN_TINSRB);
12187 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrh", v4hi_ftype_v4hi_int_int, ARM_BUILTIN_TINSRH);
12188 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrw", v2si_ftype_v2si_int_int, ARM_BUILTIN_TINSRW);
12190 def_mbuiltin (FL_IWMMXT, "__builtin_arm_waccb", di_ftype_v8qi, ARM_BUILTIN_WACCB);
12191 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wacch", di_ftype_v4hi, ARM_BUILTIN_WACCH);
12192 def_mbuiltin (FL_IWMMXT, "__builtin_arm_waccw", di_ftype_v2si, ARM_BUILTIN_WACCW);
12194 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskb", int_ftype_v8qi, ARM_BUILTIN_TMOVMSKB);
12195 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskh", int_ftype_v4hi, ARM_BUILTIN_TMOVMSKH);
12196 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskw", int_ftype_v2si, ARM_BUILTIN_TMOVMSKW);
12198 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackhss", v8qi_ftype_v4hi_v4hi, ARM_BUILTIN_WPACKHSS);
12199 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackhus", v8qi_ftype_v4hi_v4hi, ARM_BUILTIN_WPACKHUS);
12200 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackwus", v4hi_ftype_v2si_v2si, ARM_BUILTIN_WPACKWUS);
12201 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackwss", v4hi_ftype_v2si_v2si, ARM_BUILTIN_WPACKWSS);
12202 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackdus", v2si_ftype_di_di, ARM_BUILTIN_WPACKDUS);
12203 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackdss", v2si_ftype_di_di, ARM_BUILTIN_WPACKDSS);
12205 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehub", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKEHUB);
12206 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehuh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKEHUH);
12207 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehuw", di_ftype_v2si, ARM_BUILTIN_WUNPCKEHUW);
12208 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsb", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKEHSB);
12209 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKEHSH);
12210 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsw", di_ftype_v2si, ARM_BUILTIN_WUNPCKEHSW);
12211 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelub", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKELUB);
12212 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckeluh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKELUH);
12213 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckeluw", di_ftype_v2si, ARM_BUILTIN_WUNPCKELUW);
12214 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsb", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKELSB);
12215 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKELSH);
12216 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsw", di_ftype_v2si, ARM_BUILTIN_WUNPCKELSW);
12218 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacs", di_ftype_di_v4hi_v4hi, ARM_BUILTIN_WMACS);
12219 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacsz", di_ftype_v4hi_v4hi, ARM_BUILTIN_WMACSZ);
12220 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacu", di_ftype_di_v4hi_v4hi, ARM_BUILTIN_WMACU);
12221 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacuz", di_ftype_v4hi_v4hi, ARM_BUILTIN_WMACUZ);
12223 def_mbuiltin (FL_IWMMXT, "__builtin_arm_walign", v8qi_ftype_v8qi_v8qi_int, ARM_BUILTIN_WALIGN);
12224 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmia", di_ftype_di_int_int, ARM_BUILTIN_TMIA);
12225 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiaph", di_ftype_di_int_int, ARM_BUILTIN_TMIAPH);
12226 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiabb", di_ftype_di_int_int, ARM_BUILTIN_TMIABB);
12227 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiabt", di_ftype_di_int_int, ARM_BUILTIN_TMIABT);
12228 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiatb", di_ftype_di_int_int, ARM_BUILTIN_TMIATB);
12229 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiatt", di_ftype_di_int_int, ARM_BUILTIN_TMIATT);
12232 static void
12233 arm_init_builtins (void)
12235 if (TARGET_REALLY_IWMMXT)
12236 arm_init_iwmmxt_builtins ();
12239 /* Errors in the source file can cause expand_expr to return const0_rtx
12240 where we expect a vector. To avoid crashing, use one of the vector
12241 clear instructions. */
12243 static rtx
12244 safe_vector_operand (rtx x, enum machine_mode mode)
12246 if (x != const0_rtx)
12247 return x;
12248 x = gen_reg_rtx (mode);
12250 emit_insn (gen_iwmmxt_clrdi (mode == DImode ? x
12251 : gen_rtx_SUBREG (DImode, x, 0)));
12252 return x;
12255 /* Subroutine of arm_expand_builtin to take care of binop insns. */
12257 static rtx
12258 arm_expand_binop_builtin (enum insn_code icode,
12259 tree arglist, rtx target)
12261 rtx pat;
12262 tree arg0 = TREE_VALUE (arglist);
12263 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12264 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12265 rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12266 enum machine_mode tmode = insn_data[icode].operand[0].mode;
12267 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
12268 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
12270 if (VECTOR_MODE_P (mode0))
12271 op0 = safe_vector_operand (op0, mode0);
12272 if (VECTOR_MODE_P (mode1))
12273 op1 = safe_vector_operand (op1, mode1);
12275 if (! target
12276 || GET_MODE (target) != tmode
12277 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12278 target = gen_reg_rtx (tmode);
12280 /* In case the insn wants input operands in modes different from
12281 the result, abort. */
12282 if (GET_MODE (op0) != mode0 || GET_MODE (op1) != mode1)
12283 abort ();
12285 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12286 op0 = copy_to_mode_reg (mode0, op0);
12287 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12288 op1 = copy_to_mode_reg (mode1, op1);
12290 pat = GEN_FCN (icode) (target, op0, op1);
12291 if (! pat)
12292 return 0;
12293 emit_insn (pat);
12294 return target;
12297 /* Subroutine of arm_expand_builtin to take care of unop insns. */
12299 static rtx
12300 arm_expand_unop_builtin (enum insn_code icode,
12301 tree arglist, rtx target, int do_load)
12303 rtx pat;
12304 tree arg0 = TREE_VALUE (arglist);
12305 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12306 enum machine_mode tmode = insn_data[icode].operand[0].mode;
12307 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
12309 if (! target
12310 || GET_MODE (target) != tmode
12311 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12312 target = gen_reg_rtx (tmode);
12313 if (do_load)
12314 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
12315 else
12317 if (VECTOR_MODE_P (mode0))
12318 op0 = safe_vector_operand (op0, mode0);
12320 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12321 op0 = copy_to_mode_reg (mode0, op0);
12324 pat = GEN_FCN (icode) (target, op0);
12325 if (! pat)
12326 return 0;
12327 emit_insn (pat);
12328 return target;
12331 /* Expand an expression EXP that calls a built-in function,
12332 with result going to TARGET if that's convenient
12333 (and in mode MODE if that's convenient).
12334 SUBTARGET may be used as the target for computing one of EXP's operands.
12335 IGNORE is nonzero if the value is to be ignored. */
12337 static rtx
12338 arm_expand_builtin (tree exp,
12339 rtx target,
12340 rtx subtarget ATTRIBUTE_UNUSED,
12341 enum machine_mode mode ATTRIBUTE_UNUSED,
12342 int ignore ATTRIBUTE_UNUSED)
12344 const struct builtin_description * d;
12345 enum insn_code icode;
12346 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
12347 tree arglist = TREE_OPERAND (exp, 1);
12348 tree arg0;
12349 tree arg1;
12350 tree arg2;
12351 rtx op0;
12352 rtx op1;
12353 rtx op2;
12354 rtx pat;
12355 int fcode = DECL_FUNCTION_CODE (fndecl);
12356 size_t i;
12357 enum machine_mode tmode;
12358 enum machine_mode mode0;
12359 enum machine_mode mode1;
12360 enum machine_mode mode2;
12362 switch (fcode)
12364 case ARM_BUILTIN_TEXTRMSB:
12365 case ARM_BUILTIN_TEXTRMUB:
12366 case ARM_BUILTIN_TEXTRMSH:
12367 case ARM_BUILTIN_TEXTRMUH:
12368 case ARM_BUILTIN_TEXTRMSW:
12369 case ARM_BUILTIN_TEXTRMUW:
12370 icode = (fcode == ARM_BUILTIN_TEXTRMSB ? CODE_FOR_iwmmxt_textrmsb
12371 : fcode == ARM_BUILTIN_TEXTRMUB ? CODE_FOR_iwmmxt_textrmub
12372 : fcode == ARM_BUILTIN_TEXTRMSH ? CODE_FOR_iwmmxt_textrmsh
12373 : fcode == ARM_BUILTIN_TEXTRMUH ? CODE_FOR_iwmmxt_textrmuh
12374 : CODE_FOR_iwmmxt_textrmw);
12376 arg0 = TREE_VALUE (arglist);
12377 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12378 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12379 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12380 tmode = insn_data[icode].operand[0].mode;
12381 mode0 = insn_data[icode].operand[1].mode;
12382 mode1 = insn_data[icode].operand[2].mode;
12384 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12385 op0 = copy_to_mode_reg (mode0, op0);
12386 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12388 /* @@@ better error message */
12389 error ("selector must be an immediate");
12390 return gen_reg_rtx (tmode);
12392 if (target == 0
12393 || GET_MODE (target) != tmode
12394 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12395 target = gen_reg_rtx (tmode);
12396 pat = GEN_FCN (icode) (target, op0, op1);
12397 if (! pat)
12398 return 0;
12399 emit_insn (pat);
12400 return target;
12402 case ARM_BUILTIN_TINSRB:
12403 case ARM_BUILTIN_TINSRH:
12404 case ARM_BUILTIN_TINSRW:
12405 icode = (fcode == ARM_BUILTIN_TINSRB ? CODE_FOR_iwmmxt_tinsrb
12406 : fcode == ARM_BUILTIN_TINSRH ? CODE_FOR_iwmmxt_tinsrh
12407 : CODE_FOR_iwmmxt_tinsrw);
12408 arg0 = TREE_VALUE (arglist);
12409 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12410 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
12411 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12412 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12413 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
12414 tmode = insn_data[icode].operand[0].mode;
12415 mode0 = insn_data[icode].operand[1].mode;
12416 mode1 = insn_data[icode].operand[2].mode;
12417 mode2 = insn_data[icode].operand[3].mode;
12419 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12420 op0 = copy_to_mode_reg (mode0, op0);
12421 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12422 op1 = copy_to_mode_reg (mode1, op1);
12423 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
12425 /* @@@ better error message */
12426 error ("selector must be an immediate");
12427 return const0_rtx;
12429 if (target == 0
12430 || GET_MODE (target) != tmode
12431 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12432 target = gen_reg_rtx (tmode);
12433 pat = GEN_FCN (icode) (target, op0, op1, op2);
12434 if (! pat)
12435 return 0;
12436 emit_insn (pat);
12437 return target;
12439 case ARM_BUILTIN_SETWCX:
12440 arg0 = TREE_VALUE (arglist);
12441 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12442 op0 = force_reg (SImode, expand_expr (arg0, NULL_RTX, VOIDmode, 0));
12443 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12444 emit_insn (gen_iwmmxt_tmcr (op1, op0));
12445 return 0;
12447 case ARM_BUILTIN_GETWCX:
12448 arg0 = TREE_VALUE (arglist);
12449 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12450 target = gen_reg_rtx (SImode);
12451 emit_insn (gen_iwmmxt_tmrc (target, op0));
12452 return target;
12454 case ARM_BUILTIN_WSHUFH:
12455 icode = CODE_FOR_iwmmxt_wshufh;
12456 arg0 = TREE_VALUE (arglist);
12457 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12458 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12459 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12460 tmode = insn_data[icode].operand[0].mode;
12461 mode1 = insn_data[icode].operand[1].mode;
12462 mode2 = insn_data[icode].operand[2].mode;
12464 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
12465 op0 = copy_to_mode_reg (mode1, op0);
12466 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
12468 /* @@@ better error message */
12469 error ("mask must be an immediate");
12470 return const0_rtx;
12472 if (target == 0
12473 || GET_MODE (target) != tmode
12474 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12475 target = gen_reg_rtx (tmode);
12476 pat = GEN_FCN (icode) (target, op0, op1);
12477 if (! pat)
12478 return 0;
12479 emit_insn (pat);
12480 return target;
12482 case ARM_BUILTIN_WSADB:
12483 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadb, arglist, target);
12484 case ARM_BUILTIN_WSADH:
12485 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadh, arglist, target);
12486 case ARM_BUILTIN_WSADBZ:
12487 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadbz, arglist, target);
12488 case ARM_BUILTIN_WSADHZ:
12489 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadhz, arglist, target);
12491 /* Several three-argument builtins. */
12492 case ARM_BUILTIN_WMACS:
12493 case ARM_BUILTIN_WMACU:
12494 case ARM_BUILTIN_WALIGN:
12495 case ARM_BUILTIN_TMIA:
12496 case ARM_BUILTIN_TMIAPH:
12497 case ARM_BUILTIN_TMIATT:
12498 case ARM_BUILTIN_TMIATB:
12499 case ARM_BUILTIN_TMIABT:
12500 case ARM_BUILTIN_TMIABB:
12501 icode = (fcode == ARM_BUILTIN_WMACS ? CODE_FOR_iwmmxt_wmacs
12502 : fcode == ARM_BUILTIN_WMACU ? CODE_FOR_iwmmxt_wmacu
12503 : fcode == ARM_BUILTIN_TMIA ? CODE_FOR_iwmmxt_tmia
12504 : fcode == ARM_BUILTIN_TMIAPH ? CODE_FOR_iwmmxt_tmiaph
12505 : fcode == ARM_BUILTIN_TMIABB ? CODE_FOR_iwmmxt_tmiabb
12506 : fcode == ARM_BUILTIN_TMIABT ? CODE_FOR_iwmmxt_tmiabt
12507 : fcode == ARM_BUILTIN_TMIATB ? CODE_FOR_iwmmxt_tmiatb
12508 : fcode == ARM_BUILTIN_TMIATT ? CODE_FOR_iwmmxt_tmiatt
12509 : CODE_FOR_iwmmxt_walign);
12510 arg0 = TREE_VALUE (arglist);
12511 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12512 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
12513 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12514 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12515 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
12516 tmode = insn_data[icode].operand[0].mode;
12517 mode0 = insn_data[icode].operand[1].mode;
12518 mode1 = insn_data[icode].operand[2].mode;
12519 mode2 = insn_data[icode].operand[3].mode;
12521 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12522 op0 = copy_to_mode_reg (mode0, op0);
12523 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12524 op1 = copy_to_mode_reg (mode1, op1);
12525 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
12526 op2 = copy_to_mode_reg (mode2, op2);
12527 if (target == 0
12528 || GET_MODE (target) != tmode
12529 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12530 target = gen_reg_rtx (tmode);
12531 pat = GEN_FCN (icode) (target, op0, op1, op2);
12532 if (! pat)
12533 return 0;
12534 emit_insn (pat);
12535 return target;
12537 case ARM_BUILTIN_WZERO:
12538 target = gen_reg_rtx (DImode);
12539 emit_insn (gen_iwmmxt_clrdi (target));
12540 return target;
12542 default:
12543 break;
12546 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
12547 if (d->code == (const enum arm_builtins) fcode)
12548 return arm_expand_binop_builtin (d->icode, arglist, target);
12550 for (i = 0, d = bdesc_1arg; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
12551 if (d->code == (const enum arm_builtins) fcode)
12552 return arm_expand_unop_builtin (d->icode, arglist, target, 0);
12554 /* @@@ Should really do something sensible here. */
12555 return NULL_RTX;
12558 /* Recursively search through all of the blocks in a function
12559 checking to see if any of the variables created in that
12560 function match the RTX called 'orig'. If they do then
12561 replace them with the RTX called 'new'. */
12562 static void
12563 replace_symbols_in_block (tree block, rtx orig, rtx new)
12565 for (; block; block = BLOCK_CHAIN (block))
12567 tree sym;
12569 if (!TREE_USED (block))
12570 continue;
12572 for (sym = BLOCK_VARS (block); sym; sym = TREE_CHAIN (sym))
12574 if ( (DECL_NAME (sym) == 0 && TREE_CODE (sym) != TYPE_DECL)
12575 || DECL_IGNORED_P (sym)
12576 || TREE_CODE (sym) != VAR_DECL
12577 || DECL_EXTERNAL (sym)
12578 || !rtx_equal_p (DECL_RTL (sym), orig)
12580 continue;
12582 SET_DECL_RTL (sym, new);
12585 replace_symbols_in_block (BLOCK_SUBBLOCKS (block), orig, new);
12589 /* Return the number (counting from 0) of
12590 the least significant set bit in MASK. */
12592 inline static int
12593 number_of_first_bit_set (int mask)
12595 int bit;
12597 for (bit = 0;
12598 (mask & (1 << bit)) == 0;
12599 ++bit)
12600 continue;
12602 return bit;
12605 /* Generate code to return from a thumb function.
12606 If 'reg_containing_return_addr' is -1, then the return address is
12607 actually on the stack, at the stack pointer. */
12608 static void
12609 thumb_exit (FILE *f, int reg_containing_return_addr)
12611 unsigned regs_available_for_popping;
12612 unsigned regs_to_pop;
12613 int pops_needed;
12614 unsigned available;
12615 unsigned required;
12616 int mode;
12617 int size;
12618 int restore_a4 = FALSE;
12620 /* Compute the registers we need to pop. */
12621 regs_to_pop = 0;
12622 pops_needed = 0;
12624 if (reg_containing_return_addr == -1)
12626 regs_to_pop |= 1 << LR_REGNUM;
12627 ++pops_needed;
12630 if (TARGET_BACKTRACE)
12632 /* Restore the (ARM) frame pointer and stack pointer. */
12633 regs_to_pop |= (1 << ARM_HARD_FRAME_POINTER_REGNUM) | (1 << SP_REGNUM);
12634 pops_needed += 2;
12637 /* If there is nothing to pop then just emit the BX instruction and
12638 return. */
12639 if (pops_needed == 0)
12641 if (current_function_calls_eh_return)
12642 asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, ARM_EH_STACKADJ_REGNUM);
12644 asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
12645 return;
12647 /* Otherwise if we are not supporting interworking and we have not created
12648 a backtrace structure and the function was not entered in ARM mode then
12649 just pop the return address straight into the PC. */
12650 else if (!TARGET_INTERWORK
12651 && !TARGET_BACKTRACE
12652 && !is_called_in_ARM_mode (current_function_decl)
12653 && !current_function_calls_eh_return)
12655 asm_fprintf (f, "\tpop\t{%r}\n", PC_REGNUM);
12656 return;
12659 /* Find out how many of the (return) argument registers we can corrupt. */
12660 regs_available_for_popping = 0;
12662 /* If returning via __builtin_eh_return, the bottom three registers
12663 all contain information needed for the return. */
12664 if (current_function_calls_eh_return)
12665 size = 12;
12666 else
12668 /* If we can deduce the registers used from the function's
12669 return value. This is more reliable that examining
12670 regs_ever_live[] because that will be set if the register is
12671 ever used in the function, not just if the register is used
12672 to hold a return value. */
12674 if (current_function_return_rtx != 0)
12675 mode = GET_MODE (current_function_return_rtx);
12676 else
12677 mode = DECL_MODE (DECL_RESULT (current_function_decl));
12679 size = GET_MODE_SIZE (mode);
12681 if (size == 0)
12683 /* In a void function we can use any argument register.
12684 In a function that returns a structure on the stack
12685 we can use the second and third argument registers. */
12686 if (mode == VOIDmode)
12687 regs_available_for_popping =
12688 (1 << ARG_REGISTER (1))
12689 | (1 << ARG_REGISTER (2))
12690 | (1 << ARG_REGISTER (3));
12691 else
12692 regs_available_for_popping =
12693 (1 << ARG_REGISTER (2))
12694 | (1 << ARG_REGISTER (3));
12696 else if (size <= 4)
12697 regs_available_for_popping =
12698 (1 << ARG_REGISTER (2))
12699 | (1 << ARG_REGISTER (3));
12700 else if (size <= 8)
12701 regs_available_for_popping =
12702 (1 << ARG_REGISTER (3));
12705 /* Match registers to be popped with registers into which we pop them. */
12706 for (available = regs_available_for_popping,
12707 required = regs_to_pop;
12708 required != 0 && available != 0;
12709 available &= ~(available & - available),
12710 required &= ~(required & - required))
12711 -- pops_needed;
12713 /* If we have any popping registers left over, remove them. */
12714 if (available > 0)
12715 regs_available_for_popping &= ~available;
12717 /* Otherwise if we need another popping register we can use
12718 the fourth argument register. */
12719 else if (pops_needed)
12721 /* If we have not found any free argument registers and
12722 reg a4 contains the return address, we must move it. */
12723 if (regs_available_for_popping == 0
12724 && reg_containing_return_addr == LAST_ARG_REGNUM)
12726 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM, LAST_ARG_REGNUM);
12727 reg_containing_return_addr = LR_REGNUM;
12729 else if (size > 12)
12731 /* Register a4 is being used to hold part of the return value,
12732 but we have dire need of a free, low register. */
12733 restore_a4 = TRUE;
12735 asm_fprintf (f, "\tmov\t%r, %r\n",IP_REGNUM, LAST_ARG_REGNUM);
12738 if (reg_containing_return_addr != LAST_ARG_REGNUM)
12740 /* The fourth argument register is available. */
12741 regs_available_for_popping |= 1 << LAST_ARG_REGNUM;
12743 --pops_needed;
12747 /* Pop as many registers as we can. */
12748 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
12749 regs_available_for_popping);
12751 /* Process the registers we popped. */
12752 if (reg_containing_return_addr == -1)
12754 /* The return address was popped into the lowest numbered register. */
12755 regs_to_pop &= ~(1 << LR_REGNUM);
12757 reg_containing_return_addr =
12758 number_of_first_bit_set (regs_available_for_popping);
12760 /* Remove this register for the mask of available registers, so that
12761 the return address will not be corrupted by further pops. */
12762 regs_available_for_popping &= ~(1 << reg_containing_return_addr);
12765 /* If we popped other registers then handle them here. */
12766 if (regs_available_for_popping)
12768 int frame_pointer;
12770 /* Work out which register currently contains the frame pointer. */
12771 frame_pointer = number_of_first_bit_set (regs_available_for_popping);
12773 /* Move it into the correct place. */
12774 asm_fprintf (f, "\tmov\t%r, %r\n",
12775 ARM_HARD_FRAME_POINTER_REGNUM, frame_pointer);
12777 /* (Temporarily) remove it from the mask of popped registers. */
12778 regs_available_for_popping &= ~(1 << frame_pointer);
12779 regs_to_pop &= ~(1 << ARM_HARD_FRAME_POINTER_REGNUM);
12781 if (regs_available_for_popping)
12783 int stack_pointer;
12785 /* We popped the stack pointer as well,
12786 find the register that contains it. */
12787 stack_pointer = number_of_first_bit_set (regs_available_for_popping);
12789 /* Move it into the stack register. */
12790 asm_fprintf (f, "\tmov\t%r, %r\n", SP_REGNUM, stack_pointer);
12792 /* At this point we have popped all necessary registers, so
12793 do not worry about restoring regs_available_for_popping
12794 to its correct value:
12796 assert (pops_needed == 0)
12797 assert (regs_available_for_popping == (1 << frame_pointer))
12798 assert (regs_to_pop == (1 << STACK_POINTER)) */
12800 else
12802 /* Since we have just move the popped value into the frame
12803 pointer, the popping register is available for reuse, and
12804 we know that we still have the stack pointer left to pop. */
12805 regs_available_for_popping |= (1 << frame_pointer);
12809 /* If we still have registers left on the stack, but we no longer have
12810 any registers into which we can pop them, then we must move the return
12811 address into the link register and make available the register that
12812 contained it. */
12813 if (regs_available_for_popping == 0 && pops_needed > 0)
12815 regs_available_for_popping |= 1 << reg_containing_return_addr;
12817 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM,
12818 reg_containing_return_addr);
12820 reg_containing_return_addr = LR_REGNUM;
12823 /* If we have registers left on the stack then pop some more.
12824 We know that at most we will want to pop FP and SP. */
12825 if (pops_needed > 0)
12827 int popped_into;
12828 int move_to;
12830 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
12831 regs_available_for_popping);
12833 /* We have popped either FP or SP.
12834 Move whichever one it is into the correct register. */
12835 popped_into = number_of_first_bit_set (regs_available_for_popping);
12836 move_to = number_of_first_bit_set (regs_to_pop);
12838 asm_fprintf (f, "\tmov\t%r, %r\n", move_to, popped_into);
12840 regs_to_pop &= ~(1 << move_to);
12842 --pops_needed;
12845 /* If we still have not popped everything then we must have only
12846 had one register available to us and we are now popping the SP. */
12847 if (pops_needed > 0)
12849 int popped_into;
12851 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
12852 regs_available_for_popping);
12854 popped_into = number_of_first_bit_set (regs_available_for_popping);
12856 asm_fprintf (f, "\tmov\t%r, %r\n", SP_REGNUM, popped_into);
12858 assert (regs_to_pop == (1 << STACK_POINTER))
12859 assert (pops_needed == 1)
12863 /* If necessary restore the a4 register. */
12864 if (restore_a4)
12866 if (reg_containing_return_addr != LR_REGNUM)
12868 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM, LAST_ARG_REGNUM);
12869 reg_containing_return_addr = LR_REGNUM;
12872 asm_fprintf (f, "\tmov\t%r, %r\n", LAST_ARG_REGNUM, IP_REGNUM);
12875 if (current_function_calls_eh_return)
12876 asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, ARM_EH_STACKADJ_REGNUM);
12878 /* Return to caller. */
12879 asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
12882 /* Emit code to push or pop registers to or from the stack. F is the
12883 assembly file. MASK is the registers to push or pop. PUSH is
12884 nonzero if we should push, and zero if we should pop. For debugging
12885 output, if pushing, adjust CFA_OFFSET by the amount of space added
12886 to the stack. REAL_REGS should have the same number of bits set as
12887 MASK, and will be used instead (in the same order) to describe which
12888 registers were saved - this is used to mark the save slots when we
12889 push high registers after moving them to low registers. */
12890 static void
12891 thumb_pushpop (FILE *f, int mask, int push, int *cfa_offset, int real_regs)
12893 int regno;
12894 int lo_mask = mask & 0xFF;
12895 int pushed_words = 0;
12897 if (lo_mask == 0 && !push && (mask & (1 << PC_REGNUM)))
12899 /* Special case. Do not generate a POP PC statement here, do it in
12900 thumb_exit() */
12901 thumb_exit (f, -1);
12902 return;
12905 fprintf (f, "\t%s\t{", push ? "push" : "pop");
12907 /* Look at the low registers first. */
12908 for (regno = 0; regno <= LAST_LO_REGNUM; regno++, lo_mask >>= 1)
12910 if (lo_mask & 1)
12912 asm_fprintf (f, "%r", regno);
12914 if ((lo_mask & ~1) != 0)
12915 fprintf (f, ", ");
12917 pushed_words++;
12921 if (push && (mask & (1 << LR_REGNUM)))
12923 /* Catch pushing the LR. */
12924 if (mask & 0xFF)
12925 fprintf (f, ", ");
12927 asm_fprintf (f, "%r", LR_REGNUM);
12929 pushed_words++;
12931 else if (!push && (mask & (1 << PC_REGNUM)))
12933 /* Catch popping the PC. */
12934 if (TARGET_INTERWORK || TARGET_BACKTRACE
12935 || current_function_calls_eh_return)
12937 /* The PC is never poped directly, instead
12938 it is popped into r3 and then BX is used. */
12939 fprintf (f, "}\n");
12941 thumb_exit (f, -1);
12943 return;
12945 else
12947 if (mask & 0xFF)
12948 fprintf (f, ", ");
12950 asm_fprintf (f, "%r", PC_REGNUM);
12954 fprintf (f, "}\n");
12956 if (push && pushed_words && dwarf2out_do_frame ())
12958 char *l = dwarf2out_cfi_label ();
12959 int pushed_mask = real_regs;
12961 *cfa_offset += pushed_words * 4;
12962 dwarf2out_def_cfa (l, SP_REGNUM, *cfa_offset);
12964 pushed_words = 0;
12965 pushed_mask = real_regs;
12966 for (regno = 0; regno <= 14; regno++, pushed_mask >>= 1)
12968 if (pushed_mask & 1)
12969 dwarf2out_reg_save (l, regno, 4 * pushed_words++ - *cfa_offset);
12974 void
12975 thumb_final_prescan_insn (rtx insn)
12977 if (flag_print_asm_name)
12978 asm_fprintf (asm_out_file, "%@ 0x%04x\n",
12979 INSN_ADDRESSES (INSN_UID (insn)));
12983 thumb_shiftable_const (unsigned HOST_WIDE_INT val)
12985 unsigned HOST_WIDE_INT mask = 0xff;
12986 int i;
12988 if (val == 0) /* XXX */
12989 return 0;
12991 for (i = 0; i < 25; i++)
12992 if ((val & (mask << i)) == val)
12993 return 1;
12995 return 0;
12998 /* Returns nonzero if the current function contains,
12999 or might contain a far jump. */
13000 static int
13001 thumb_far_jump_used_p (void)
13003 rtx insn;
13005 /* This test is only important for leaf functions. */
13006 /* assert (!leaf_function_p ()); */
13008 /* If we have already decided that far jumps may be used,
13009 do not bother checking again, and always return true even if
13010 it turns out that they are not being used. Once we have made
13011 the decision that far jumps are present (and that hence the link
13012 register will be pushed onto the stack) we cannot go back on it. */
13013 if (cfun->machine->far_jump_used)
13014 return 1;
13016 /* If this function is not being called from the prologue/epilogue
13017 generation code then it must be being called from the
13018 INITIAL_ELIMINATION_OFFSET macro. */
13019 if (!(ARM_DOUBLEWORD_ALIGN || reload_completed))
13021 /* In this case we know that we are being asked about the elimination
13022 of the arg pointer register. If that register is not being used,
13023 then there are no arguments on the stack, and we do not have to
13024 worry that a far jump might force the prologue to push the link
13025 register, changing the stack offsets. In this case we can just
13026 return false, since the presence of far jumps in the function will
13027 not affect stack offsets.
13029 If the arg pointer is live (or if it was live, but has now been
13030 eliminated and so set to dead) then we do have to test to see if
13031 the function might contain a far jump. This test can lead to some
13032 false negatives, since before reload is completed, then length of
13033 branch instructions is not known, so gcc defaults to returning their
13034 longest length, which in turn sets the far jump attribute to true.
13036 A false negative will not result in bad code being generated, but it
13037 will result in a needless push and pop of the link register. We
13038 hope that this does not occur too often.
13040 If we need doubleword stack alignment this could affect the other
13041 elimination offsets so we can't risk getting it wrong. */
13042 if (regs_ever_live [ARG_POINTER_REGNUM])
13043 cfun->machine->arg_pointer_live = 1;
13044 else if (!cfun->machine->arg_pointer_live)
13045 return 0;
13048 /* Check to see if the function contains a branch
13049 insn with the far jump attribute set. */
13050 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
13052 if (GET_CODE (insn) == JUMP_INSN
13053 /* Ignore tablejump patterns. */
13054 && GET_CODE (PATTERN (insn)) != ADDR_VEC
13055 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC
13056 && get_attr_far_jump (insn) == FAR_JUMP_YES
13059 /* Record the fact that we have decided that
13060 the function does use far jumps. */
13061 cfun->machine->far_jump_used = 1;
13062 return 1;
13066 return 0;
13069 /* Return nonzero if FUNC must be entered in ARM mode. */
13071 is_called_in_ARM_mode (tree func)
13073 if (TREE_CODE (func) != FUNCTION_DECL)
13074 abort ();
13076 /* Ignore the problem about functions whoes address is taken. */
13077 if (TARGET_CALLEE_INTERWORKING && TREE_PUBLIC (func))
13078 return TRUE;
13080 #ifdef ARM_PE
13081 return lookup_attribute ("interfacearm", DECL_ATTRIBUTES (func)) != NULL_TREE;
13082 #else
13083 return FALSE;
13084 #endif
13087 /* The bits which aren't usefully expanded as rtl. */
13088 const char *
13089 thumb_unexpanded_epilogue (void)
13091 int regno;
13092 int live_regs_mask = 0;
13093 int high_regs_pushed = 0;
13094 int had_to_push_lr;
13095 int size;
13096 int mode;
13098 if (return_used_this_function)
13099 return "";
13101 if (IS_NAKED (arm_current_func_type ()))
13102 return "";
13104 live_regs_mask = thumb_compute_save_reg_mask ();
13105 high_regs_pushed = bit_count (live_regs_mask & 0x0f00);
13107 /* If we can deduce the registers used from the function's return value.
13108 This is more reliable that examining regs_ever_live[] because that
13109 will be set if the register is ever used in the function, not just if
13110 the register is used to hold a return value. */
13112 if (current_function_return_rtx != 0)
13113 mode = GET_MODE (current_function_return_rtx);
13114 else
13115 mode = DECL_MODE (DECL_RESULT (current_function_decl));
13117 size = GET_MODE_SIZE (mode);
13119 /* The prolog may have pushed some high registers to use as
13120 work registers. eg the testsuite file:
13121 gcc/testsuite/gcc/gcc.c-torture/execute/complex-2.c
13122 compiles to produce:
13123 push {r4, r5, r6, r7, lr}
13124 mov r7, r9
13125 mov r6, r8
13126 push {r6, r7}
13127 as part of the prolog. We have to undo that pushing here. */
13129 if (high_regs_pushed)
13131 int mask = live_regs_mask & 0xff;
13132 int next_hi_reg;
13134 /* The available low registers depend on the size of the value we are
13135 returning. */
13136 if (size <= 12)
13137 mask |= 1 << 3;
13138 if (size <= 8)
13139 mask |= 1 << 2;
13141 if (mask == 0)
13142 /* Oh dear! We have no low registers into which we can pop
13143 high registers! */
13144 internal_error
13145 ("no low registers available for popping high registers");
13147 for (next_hi_reg = 8; next_hi_reg < 13; next_hi_reg++)
13148 if (live_regs_mask & (1 << next_hi_reg))
13149 break;
13151 while (high_regs_pushed)
13153 /* Find lo register(s) into which the high register(s) can
13154 be popped. */
13155 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
13157 if (mask & (1 << regno))
13158 high_regs_pushed--;
13159 if (high_regs_pushed == 0)
13160 break;
13163 mask &= (2 << regno) - 1; /* A noop if regno == 8 */
13165 /* Pop the values into the low register(s). */
13166 thumb_pushpop (asm_out_file, mask, 0, NULL, mask);
13168 /* Move the value(s) into the high registers. */
13169 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
13171 if (mask & (1 << regno))
13173 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", next_hi_reg,
13174 regno);
13176 for (next_hi_reg++; next_hi_reg < 13; next_hi_reg++)
13177 if (live_regs_mask & (1 << next_hi_reg))
13178 break;
13182 live_regs_mask &= ~0x0f00;
13185 had_to_push_lr = (live_regs_mask & (1 << LR_REGNUM)) != 0;
13186 live_regs_mask &= 0xff;
13188 if (current_function_pretend_args_size == 0 || TARGET_BACKTRACE)
13190 /* Pop the return address into the PC. */
13191 if (had_to_push_lr)
13192 live_regs_mask |= 1 << PC_REGNUM;
13194 /* Either no argument registers were pushed or a backtrace
13195 structure was created which includes an adjusted stack
13196 pointer, so just pop everything. */
13197 if (live_regs_mask)
13198 thumb_pushpop (asm_out_file, live_regs_mask, FALSE, NULL,
13199 live_regs_mask);
13201 /* We have either just popped the return address into the
13202 PC or it is was kept in LR for the entire function. */
13203 if (!had_to_push_lr)
13204 thumb_exit (asm_out_file, LR_REGNUM);
13206 else
13208 /* Pop everything but the return address. */
13209 if (live_regs_mask)
13210 thumb_pushpop (asm_out_file, live_regs_mask, FALSE, NULL,
13211 live_regs_mask);
13213 if (had_to_push_lr)
13215 if (size > 12)
13217 /* We have no free low regs, so save one. */
13218 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", IP_REGNUM,
13219 LAST_ARG_REGNUM);
13222 /* Get the return address into a temporary register. */
13223 thumb_pushpop (asm_out_file, 1 << LAST_ARG_REGNUM, 0, NULL,
13224 1 << LAST_ARG_REGNUM);
13226 if (size > 12)
13228 /* Move the return address to lr. */
13229 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", LR_REGNUM,
13230 LAST_ARG_REGNUM);
13231 /* Restore the low register. */
13232 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", LAST_ARG_REGNUM,
13233 IP_REGNUM);
13234 regno = LR_REGNUM;
13236 else
13237 regno = LAST_ARG_REGNUM;
13239 else
13240 regno = LR_REGNUM;
13242 /* Remove the argument registers that were pushed onto the stack. */
13243 asm_fprintf (asm_out_file, "\tadd\t%r, %r, #%d\n",
13244 SP_REGNUM, SP_REGNUM,
13245 current_function_pretend_args_size);
13247 thumb_exit (asm_out_file, regno);
13250 return "";
13253 /* Functions to save and restore machine-specific function data. */
13254 static struct machine_function *
13255 arm_init_machine_status (void)
13257 struct machine_function *machine;
13258 machine = (machine_function *) ggc_alloc_cleared (sizeof (machine_function));
13260 #if ARM_FT_UNKNOWN != 0
13261 machine->func_type = ARM_FT_UNKNOWN;
13262 #endif
13263 return machine;
13266 /* Return an RTX indicating where the return address to the
13267 calling function can be found. */
13269 arm_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
13271 if (count != 0)
13272 return NULL_RTX;
13274 return get_hard_reg_initial_val (Pmode, LR_REGNUM);
13277 /* Do anything needed before RTL is emitted for each function. */
13278 void
13279 arm_init_expanders (void)
13281 /* Arrange to initialize and mark the machine per-function status. */
13282 init_machine_status = arm_init_machine_status;
13284 /* This is to stop the combine pass optimizing away the alignment
13285 adjustment of va_arg. */
13286 /* ??? It is claimed that this should not be necessary. */
13287 if (cfun)
13288 mark_reg_pointer (arg_pointer_rtx, PARM_BOUNDARY);
13292 /* Like arm_compute_initial_elimination offset. Simpler because
13293 THUMB_HARD_FRAME_POINTER isn't actually the ABI specified frame pointer. */
13295 HOST_WIDE_INT
13296 thumb_compute_initial_elimination_offset (unsigned int from, unsigned int to)
13298 arm_stack_offsets *offsets;
13300 offsets = arm_get_frame_offsets ();
13302 switch (from)
13304 case ARG_POINTER_REGNUM:
13305 switch (to)
13307 case STACK_POINTER_REGNUM:
13308 return offsets->outgoing_args - offsets->saved_args;
13310 case FRAME_POINTER_REGNUM:
13311 return offsets->soft_frame - offsets->saved_args;
13313 case THUMB_HARD_FRAME_POINTER_REGNUM:
13314 case ARM_HARD_FRAME_POINTER_REGNUM:
13315 return offsets->saved_regs - offsets->saved_args;
13317 default:
13318 abort();
13320 break;
13322 case FRAME_POINTER_REGNUM:
13323 switch (to)
13325 case STACK_POINTER_REGNUM:
13326 return offsets->outgoing_args - offsets->soft_frame;
13328 case THUMB_HARD_FRAME_POINTER_REGNUM:
13329 case ARM_HARD_FRAME_POINTER_REGNUM:
13330 return offsets->saved_regs - offsets->soft_frame;
13332 default:
13333 abort();
13335 break;
13337 default:
13338 abort ();
13343 /* Generate the rest of a function's prologue. */
13344 void
13345 thumb_expand_prologue (void)
13347 rtx insn, dwarf;
13349 HOST_WIDE_INT amount;
13350 arm_stack_offsets *offsets;
13351 unsigned long func_type;
13352 int regno;
13353 unsigned long live_regs_mask;
13355 func_type = arm_current_func_type ();
13357 /* Naked functions don't have prologues. */
13358 if (IS_NAKED (func_type))
13359 return;
13361 if (IS_INTERRUPT (func_type))
13363 error ("interrupt Service Routines cannot be coded in Thumb mode");
13364 return;
13367 /* Load the pic recister before setting the frame pointer, so we can use r7
13368 as a temporary work register. */
13369 if (flag_pic)
13370 arm_load_pic_register ();
13372 offsets = arm_get_frame_offsets ();
13374 if (frame_pointer_needed)
13376 insn = emit_insn (gen_movsi (hard_frame_pointer_rtx,
13377 stack_pointer_rtx));
13378 RTX_FRAME_RELATED_P (insn) = 1;
13381 live_regs_mask = thumb_compute_save_reg_mask ();
13382 amount = offsets->outgoing_args - offsets->saved_regs;
13383 if (amount)
13385 if (amount < 512)
13387 insn = emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
13388 GEN_INT (- amount)));
13389 RTX_FRAME_RELATED_P (insn) = 1;
13391 else
13393 rtx reg;
13395 /* The stack decrement is too big for an immediate value in a single
13396 insn. In theory we could issue multiple subtracts, but after
13397 three of them it becomes more space efficient to place the full
13398 value in the constant pool and load into a register. (Also the
13399 ARM debugger really likes to see only one stack decrement per
13400 function). So instead we look for a scratch register into which
13401 we can load the decrement, and then we subtract this from the
13402 stack pointer. Unfortunately on the thumb the only available
13403 scratch registers are the argument registers, and we cannot use
13404 these as they may hold arguments to the function. Instead we
13405 attempt to locate a call preserved register which is used by this
13406 function. If we can find one, then we know that it will have
13407 been pushed at the start of the prologue and so we can corrupt
13408 it now. */
13409 for (regno = LAST_ARG_REGNUM + 1; regno <= LAST_LO_REGNUM; regno++)
13410 if (live_regs_mask & (1 << regno)
13411 && !(frame_pointer_needed
13412 && (regno == THUMB_HARD_FRAME_POINTER_REGNUM)))
13413 break;
13415 if (regno > LAST_LO_REGNUM) /* Very unlikely. */
13417 rtx spare = gen_rtx_REG (SImode, IP_REGNUM);
13419 /* Choose an arbitrary, non-argument low register. */
13420 reg = gen_rtx_REG (SImode, LAST_LO_REGNUM);
13422 /* Save it by copying it into a high, scratch register. */
13423 emit_insn (gen_movsi (spare, reg));
13424 /* Add a USE to stop propagate_one_insn() from barfing. */
13425 emit_insn (gen_prologue_use (spare));
13427 /* Decrement the stack. */
13428 emit_insn (gen_movsi (reg, GEN_INT (- amount)));
13429 insn = emit_insn (gen_addsi3 (stack_pointer_rtx,
13430 stack_pointer_rtx, reg));
13431 RTX_FRAME_RELATED_P (insn) = 1;
13432 dwarf = gen_rtx_SET (SImode, stack_pointer_rtx,
13433 plus_constant (stack_pointer_rtx,
13434 -amount));
13435 RTX_FRAME_RELATED_P (dwarf) = 1;
13436 REG_NOTES (insn)
13437 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
13438 REG_NOTES (insn));
13440 /* Restore the low register's original value. */
13441 emit_insn (gen_movsi (reg, spare));
13443 /* Emit a USE of the restored scratch register, so that flow
13444 analysis will not consider the restore redundant. The
13445 register won't be used again in this function and isn't
13446 restored by the epilogue. */
13447 emit_insn (gen_prologue_use (reg));
13449 else
13451 reg = gen_rtx_REG (SImode, regno);
13453 emit_insn (gen_movsi (reg, GEN_INT (- amount)));
13455 insn = emit_insn (gen_addsi3 (stack_pointer_rtx,
13456 stack_pointer_rtx, reg));
13457 RTX_FRAME_RELATED_P (insn) = 1;
13458 dwarf = gen_rtx_SET (SImode, stack_pointer_rtx,
13459 plus_constant (stack_pointer_rtx,
13460 -amount));
13461 RTX_FRAME_RELATED_P (dwarf) = 1;
13462 REG_NOTES (insn)
13463 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
13464 REG_NOTES (insn));
13467 /* If the frame pointer is needed, emit a special barrier that
13468 will prevent the scheduler from moving stores to the frame
13469 before the stack adjustment. */
13470 if (frame_pointer_needed)
13471 emit_insn (gen_stack_tie (stack_pointer_rtx,
13472 hard_frame_pointer_rtx));
13475 if (current_function_profile || TARGET_NO_SCHED_PRO)
13476 emit_insn (gen_blockage ());
13478 cfun->machine->lr_save_eliminated = !thumb_force_lr_save ();
13479 if (live_regs_mask & 0xff)
13480 cfun->machine->lr_save_eliminated = 0;
13482 /* If the link register is being kept alive, with the return address in it,
13483 then make sure that it does not get reused by the ce2 pass. */
13484 if (cfun->machine->lr_save_eliminated)
13485 emit_insn (gen_prologue_use (gen_rtx_REG (SImode, LR_REGNUM)));
13489 void
13490 thumb_expand_epilogue (void)
13492 HOST_WIDE_INT amount;
13493 arm_stack_offsets *offsets;
13494 int regno;
13496 /* Naked functions don't have prologues. */
13497 if (IS_NAKED (arm_current_func_type ()))
13498 return;
13500 offsets = arm_get_frame_offsets ();
13501 amount = offsets->outgoing_args - offsets->saved_regs;
13503 if (frame_pointer_needed)
13504 emit_insn (gen_movsi (stack_pointer_rtx, hard_frame_pointer_rtx));
13505 else if (amount)
13507 if (amount < 512)
13508 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
13509 GEN_INT (amount)));
13510 else
13512 /* r3 is always free in the epilogue. */
13513 rtx reg = gen_rtx_REG (SImode, LAST_ARG_REGNUM);
13515 emit_insn (gen_movsi (reg, GEN_INT (amount)));
13516 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, reg));
13520 /* Emit a USE (stack_pointer_rtx), so that
13521 the stack adjustment will not be deleted. */
13522 emit_insn (gen_prologue_use (stack_pointer_rtx));
13524 if (current_function_profile || TARGET_NO_SCHED_PRO)
13525 emit_insn (gen_blockage ());
13527 /* Emit a clobber for each insn that will be restored in the epilogue,
13528 so that flow2 will get register lifetimes correct. */
13529 for (regno = 0; regno < 13; regno++)
13530 if (regs_ever_live[regno] && !call_used_regs[regno])
13531 emit_insn (gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, regno)));
13533 if (! regs_ever_live[LR_REGNUM])
13534 emit_insn (gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, LR_REGNUM)));
13537 static void
13538 thumb_output_function_prologue (FILE *f, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
13540 int live_regs_mask = 0;
13541 int l_mask;
13542 int high_regs_pushed = 0;
13543 int cfa_offset = 0;
13544 int regno;
13546 if (IS_NAKED (arm_current_func_type ()))
13547 return;
13549 if (is_called_in_ARM_mode (current_function_decl))
13551 const char * name;
13553 if (GET_CODE (DECL_RTL (current_function_decl)) != MEM)
13554 abort ();
13555 if (GET_CODE (XEXP (DECL_RTL (current_function_decl), 0)) != SYMBOL_REF)
13556 abort ();
13557 name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
13559 /* Generate code sequence to switch us into Thumb mode. */
13560 /* The .code 32 directive has already been emitted by
13561 ASM_DECLARE_FUNCTION_NAME. */
13562 asm_fprintf (f, "\torr\t%r, %r, #1\n", IP_REGNUM, PC_REGNUM);
13563 asm_fprintf (f, "\tbx\t%r\n", IP_REGNUM);
13565 /* Generate a label, so that the debugger will notice the
13566 change in instruction sets. This label is also used by
13567 the assembler to bypass the ARM code when this function
13568 is called from a Thumb encoded function elsewhere in the
13569 same file. Hence the definition of STUB_NAME here must
13570 agree with the definition in gas/config/tc-arm.c. */
13572 #define STUB_NAME ".real_start_of"
13574 fprintf (f, "\t.code\t16\n");
13575 #ifdef ARM_PE
13576 if (arm_dllexport_name_p (name))
13577 name = arm_strip_name_encoding (name);
13578 #endif
13579 asm_fprintf (f, "\t.globl %s%U%s\n", STUB_NAME, name);
13580 fprintf (f, "\t.thumb_func\n");
13581 asm_fprintf (f, "%s%U%s:\n", STUB_NAME, name);
13584 if (current_function_pretend_args_size)
13586 if (cfun->machine->uses_anonymous_args)
13588 int num_pushes;
13590 fprintf (f, "\tpush\t{");
13592 num_pushes = ARM_NUM_INTS (current_function_pretend_args_size);
13594 for (regno = LAST_ARG_REGNUM + 1 - num_pushes;
13595 regno <= LAST_ARG_REGNUM;
13596 regno++)
13597 asm_fprintf (f, "%r%s", regno,
13598 regno == LAST_ARG_REGNUM ? "" : ", ");
13600 fprintf (f, "}\n");
13602 else
13603 asm_fprintf (f, "\tsub\t%r, %r, #%d\n",
13604 SP_REGNUM, SP_REGNUM,
13605 current_function_pretend_args_size);
13607 /* We don't need to record the stores for unwinding (would it
13608 help the debugger any if we did?), but record the change in
13609 the stack pointer. */
13610 if (dwarf2out_do_frame ())
13612 char *l = dwarf2out_cfi_label ();
13613 cfa_offset = cfa_offset + current_function_pretend_args_size;
13614 dwarf2out_def_cfa (l, SP_REGNUM, cfa_offset);
13618 live_regs_mask = thumb_compute_save_reg_mask ();
13619 /* Just low regs and lr. */
13620 l_mask = live_regs_mask & 0x40ff;
13622 if (TARGET_BACKTRACE)
13624 int offset;
13625 int work_register;
13627 /* We have been asked to create a stack backtrace structure.
13628 The code looks like this:
13630 0 .align 2
13631 0 func:
13632 0 sub SP, #16 Reserve space for 4 registers.
13633 2 push {R7} Push low registers.
13634 4 add R7, SP, #20 Get the stack pointer before the push.
13635 6 str R7, [SP, #8] Store the stack pointer (before reserving the space).
13636 8 mov R7, PC Get hold of the start of this code plus 12.
13637 10 str R7, [SP, #16] Store it.
13638 12 mov R7, FP Get hold of the current frame pointer.
13639 14 str R7, [SP, #4] Store it.
13640 16 mov R7, LR Get hold of the current return address.
13641 18 str R7, [SP, #12] Store it.
13642 20 add R7, SP, #16 Point at the start of the backtrace structure.
13643 22 mov FP, R7 Put this value into the frame pointer. */
13645 work_register = thumb_find_work_register (live_regs_mask);
13647 asm_fprintf
13648 (f, "\tsub\t%r, %r, #16\t%@ Create stack backtrace structure\n",
13649 SP_REGNUM, SP_REGNUM);
13651 if (dwarf2out_do_frame ())
13653 char *l = dwarf2out_cfi_label ();
13654 cfa_offset = cfa_offset + 16;
13655 dwarf2out_def_cfa (l, SP_REGNUM, cfa_offset);
13658 if (l_mask)
13660 thumb_pushpop (f, l_mask, 1, &cfa_offset, l_mask);
13661 offset = bit_count (l_mask);
13663 else
13664 offset = 0;
13666 asm_fprintf (f, "\tadd\t%r, %r, #%d\n", work_register, SP_REGNUM,
13667 offset + 16 + current_function_pretend_args_size);
13669 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13670 offset + 4);
13672 /* Make sure that the instruction fetching the PC is in the right place
13673 to calculate "start of backtrace creation code + 12". */
13674 if (l_mask)
13676 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, PC_REGNUM);
13677 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13678 offset + 12);
13679 asm_fprintf (f, "\tmov\t%r, %r\n", work_register,
13680 ARM_HARD_FRAME_POINTER_REGNUM);
13681 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13682 offset);
13684 else
13686 asm_fprintf (f, "\tmov\t%r, %r\n", work_register,
13687 ARM_HARD_FRAME_POINTER_REGNUM);
13688 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13689 offset);
13690 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, PC_REGNUM);
13691 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13692 offset + 12);
13695 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, LR_REGNUM);
13696 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13697 offset + 8);
13698 asm_fprintf (f, "\tadd\t%r, %r, #%d\n", work_register, SP_REGNUM,
13699 offset + 12);
13700 asm_fprintf (f, "\tmov\t%r, %r\t\t%@ Backtrace structure created\n",
13701 ARM_HARD_FRAME_POINTER_REGNUM, work_register);
13703 else if (l_mask)
13704 thumb_pushpop (f, l_mask, 1, &cfa_offset, l_mask);
13706 high_regs_pushed = bit_count (live_regs_mask & 0x0f00);
13708 if (high_regs_pushed)
13710 int pushable_regs = 0;
13711 int next_hi_reg;
13713 for (next_hi_reg = 12; next_hi_reg > LAST_LO_REGNUM; next_hi_reg--)
13714 if (live_regs_mask & (1 << next_hi_reg))
13715 break;
13717 pushable_regs = l_mask & 0xff;
13719 if (pushable_regs == 0)
13720 pushable_regs = 1 << thumb_find_work_register (live_regs_mask);
13722 while (high_regs_pushed > 0)
13724 int real_regs_mask = 0;
13726 for (regno = LAST_LO_REGNUM; regno >= 0; regno--)
13728 if (pushable_regs & (1 << regno))
13730 asm_fprintf (f, "\tmov\t%r, %r\n", regno, next_hi_reg);
13732 high_regs_pushed--;
13733 real_regs_mask |= (1 << next_hi_reg);
13735 if (high_regs_pushed)
13737 for (next_hi_reg--; next_hi_reg > LAST_LO_REGNUM;
13738 next_hi_reg--)
13739 if (live_regs_mask & (1 << next_hi_reg))
13740 break;
13742 else
13744 pushable_regs &= ~((1 << regno) - 1);
13745 break;
13750 thumb_pushpop (f, pushable_regs, 1, &cfa_offset, real_regs_mask);
13755 /* Handle the case of a double word load into a low register from
13756 a computed memory address. The computed address may involve a
13757 register which is overwritten by the load. */
13758 const char *
13759 thumb_load_double_from_address (rtx *operands)
13761 rtx addr;
13762 rtx base;
13763 rtx offset;
13764 rtx arg1;
13765 rtx arg2;
13767 if (GET_CODE (operands[0]) != REG)
13768 abort ();
13770 if (GET_CODE (operands[1]) != MEM)
13771 abort ();
13773 /* Get the memory address. */
13774 addr = XEXP (operands[1], 0);
13776 /* Work out how the memory address is computed. */
13777 switch (GET_CODE (addr))
13779 case REG:
13780 operands[2] = gen_rtx_MEM (SImode,
13781 plus_constant (XEXP (operands[1], 0), 4));
13783 if (REGNO (operands[0]) == REGNO (addr))
13785 output_asm_insn ("ldr\t%H0, %2", operands);
13786 output_asm_insn ("ldr\t%0, %1", operands);
13788 else
13790 output_asm_insn ("ldr\t%0, %1", operands);
13791 output_asm_insn ("ldr\t%H0, %2", operands);
13793 break;
13795 case CONST:
13796 /* Compute <address> + 4 for the high order load. */
13797 operands[2] = gen_rtx_MEM (SImode,
13798 plus_constant (XEXP (operands[1], 0), 4));
13800 output_asm_insn ("ldr\t%0, %1", operands);
13801 output_asm_insn ("ldr\t%H0, %2", operands);
13802 break;
13804 case PLUS:
13805 arg1 = XEXP (addr, 0);
13806 arg2 = XEXP (addr, 1);
13808 if (CONSTANT_P (arg1))
13809 base = arg2, offset = arg1;
13810 else
13811 base = arg1, offset = arg2;
13813 if (GET_CODE (base) != REG)
13814 abort ();
13816 /* Catch the case of <address> = <reg> + <reg> */
13817 if (GET_CODE (offset) == REG)
13819 int reg_offset = REGNO (offset);
13820 int reg_base = REGNO (base);
13821 int reg_dest = REGNO (operands[0]);
13823 /* Add the base and offset registers together into the
13824 higher destination register. */
13825 asm_fprintf (asm_out_file, "\tadd\t%r, %r, %r",
13826 reg_dest + 1, reg_base, reg_offset);
13828 /* Load the lower destination register from the address in
13829 the higher destination register. */
13830 asm_fprintf (asm_out_file, "\tldr\t%r, [%r, #0]",
13831 reg_dest, reg_dest + 1);
13833 /* Load the higher destination register from its own address
13834 plus 4. */
13835 asm_fprintf (asm_out_file, "\tldr\t%r, [%r, #4]",
13836 reg_dest + 1, reg_dest + 1);
13838 else
13840 /* Compute <address> + 4 for the high order load. */
13841 operands[2] = gen_rtx_MEM (SImode,
13842 plus_constant (XEXP (operands[1], 0), 4));
13844 /* If the computed address is held in the low order register
13845 then load the high order register first, otherwise always
13846 load the low order register first. */
13847 if (REGNO (operands[0]) == REGNO (base))
13849 output_asm_insn ("ldr\t%H0, %2", operands);
13850 output_asm_insn ("ldr\t%0, %1", operands);
13852 else
13854 output_asm_insn ("ldr\t%0, %1", operands);
13855 output_asm_insn ("ldr\t%H0, %2", operands);
13858 break;
13860 case LABEL_REF:
13861 /* With no registers to worry about we can just load the value
13862 directly. */
13863 operands[2] = gen_rtx_MEM (SImode,
13864 plus_constant (XEXP (operands[1], 0), 4));
13866 output_asm_insn ("ldr\t%H0, %2", operands);
13867 output_asm_insn ("ldr\t%0, %1", operands);
13868 break;
13870 default:
13871 abort ();
13872 break;
13875 return "";
13878 const char *
13879 thumb_output_move_mem_multiple (int n, rtx *operands)
13881 rtx tmp;
13883 switch (n)
13885 case 2:
13886 if (REGNO (operands[4]) > REGNO (operands[5]))
13888 tmp = operands[4];
13889 operands[4] = operands[5];
13890 operands[5] = tmp;
13892 output_asm_insn ("ldmia\t%1!, {%4, %5}", operands);
13893 output_asm_insn ("stmia\t%0!, {%4, %5}", operands);
13894 break;
13896 case 3:
13897 if (REGNO (operands[4]) > REGNO (operands[5]))
13899 tmp = operands[4];
13900 operands[4] = operands[5];
13901 operands[5] = tmp;
13903 if (REGNO (operands[5]) > REGNO (operands[6]))
13905 tmp = operands[5];
13906 operands[5] = operands[6];
13907 operands[6] = tmp;
13909 if (REGNO (operands[4]) > REGNO (operands[5]))
13911 tmp = operands[4];
13912 operands[4] = operands[5];
13913 operands[5] = tmp;
13916 output_asm_insn ("ldmia\t%1!, {%4, %5, %6}", operands);
13917 output_asm_insn ("stmia\t%0!, {%4, %5, %6}", operands);
13918 break;
13920 default:
13921 abort ();
13924 return "";
13927 /* Routines for generating rtl. */
13928 void
13929 thumb_expand_movmemqi (rtx *operands)
13931 rtx out = copy_to_mode_reg (SImode, XEXP (operands[0], 0));
13932 rtx in = copy_to_mode_reg (SImode, XEXP (operands[1], 0));
13933 HOST_WIDE_INT len = INTVAL (operands[2]);
13934 HOST_WIDE_INT offset = 0;
13936 while (len >= 12)
13938 emit_insn (gen_movmem12b (out, in, out, in));
13939 len -= 12;
13942 if (len >= 8)
13944 emit_insn (gen_movmem8b (out, in, out, in));
13945 len -= 8;
13948 if (len >= 4)
13950 rtx reg = gen_reg_rtx (SImode);
13951 emit_insn (gen_movsi (reg, gen_rtx_MEM (SImode, in)));
13952 emit_insn (gen_movsi (gen_rtx_MEM (SImode, out), reg));
13953 len -= 4;
13954 offset += 4;
13957 if (len >= 2)
13959 rtx reg = gen_reg_rtx (HImode);
13960 emit_insn (gen_movhi (reg, gen_rtx_MEM (HImode,
13961 plus_constant (in, offset))));
13962 emit_insn (gen_movhi (gen_rtx_MEM (HImode, plus_constant (out, offset)),
13963 reg));
13964 len -= 2;
13965 offset += 2;
13968 if (len)
13970 rtx reg = gen_reg_rtx (QImode);
13971 emit_insn (gen_movqi (reg, gen_rtx_MEM (QImode,
13972 plus_constant (in, offset))));
13973 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (out, offset)),
13974 reg));
13979 thumb_cmp_operand (rtx op, enum machine_mode mode)
13981 return ((GET_CODE (op) == CONST_INT
13982 && INTVAL (op) < 256
13983 && INTVAL (op) >= 0)
13984 || s_register_operand (op, mode));
13988 thumb_cmpneg_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
13990 return (GET_CODE (op) == CONST_INT
13991 && INTVAL (op) < 0
13992 && INTVAL (op) > -256);
13995 /* Return TRUE if a result can be stored in OP without clobbering the
13996 condition code register. Prior to reload we only accept a
13997 register. After reload we have to be able to handle memory as
13998 well, since a pseudo may not get a hard reg and reload cannot
13999 handle output-reloads on jump insns.
14001 We could possibly handle mem before reload as well, but that might
14002 complicate things with the need to handle increment
14003 side-effects. */
14006 thumb_cbrch_target_operand (rtx op, enum machine_mode mode)
14008 return (s_register_operand (op, mode)
14009 || ((reload_in_progress || reload_completed)
14010 && memory_operand (op, mode)));
14013 /* Handle storing a half-word to memory during reload. */
14014 void
14015 thumb_reload_out_hi (rtx *operands)
14017 emit_insn (gen_thumb_movhi_clobber (operands[0], operands[1], operands[2]));
14020 /* Handle reading a half-word from memory during reload. */
14021 void
14022 thumb_reload_in_hi (rtx *operands ATTRIBUTE_UNUSED)
14024 abort ();
14027 /* Return the length of a function name prefix
14028 that starts with the character 'c'. */
14029 static int
14030 arm_get_strip_length (int c)
14032 switch (c)
14034 ARM_NAME_ENCODING_LENGTHS
14035 default: return 0;
14039 /* Return a pointer to a function's name with any
14040 and all prefix encodings stripped from it. */
14041 const char *
14042 arm_strip_name_encoding (const char *name)
14044 int skip;
14046 while ((skip = arm_get_strip_length (* name)))
14047 name += skip;
14049 return name;
14052 /* If there is a '*' anywhere in the name's prefix, then
14053 emit the stripped name verbatim, otherwise prepend an
14054 underscore if leading underscores are being used. */
14055 void
14056 arm_asm_output_labelref (FILE *stream, const char *name)
14058 int skip;
14059 int verbatim = 0;
14061 while ((skip = arm_get_strip_length (* name)))
14063 verbatim |= (*name == '*');
14064 name += skip;
14067 if (verbatim)
14068 fputs (name, stream);
14069 else
14070 asm_fprintf (stream, "%U%s", name);
14073 rtx aof_pic_label;
14075 #ifdef AOF_ASSEMBLER
14076 /* Special functions only needed when producing AOF syntax assembler. */
14078 struct pic_chain
14080 struct pic_chain * next;
14081 const char * symname;
14084 static struct pic_chain * aof_pic_chain = NULL;
14087 aof_pic_entry (rtx x)
14089 struct pic_chain ** chainp;
14090 int offset;
14092 if (aof_pic_label == NULL_RTX)
14094 aof_pic_label = gen_rtx_SYMBOL_REF (Pmode, "x$adcons");
14097 for (offset = 0, chainp = &aof_pic_chain; *chainp;
14098 offset += 4, chainp = &(*chainp)->next)
14099 if ((*chainp)->symname == XSTR (x, 0))
14100 return plus_constant (aof_pic_label, offset);
14102 *chainp = (struct pic_chain *) xmalloc (sizeof (struct pic_chain));
14103 (*chainp)->next = NULL;
14104 (*chainp)->symname = XSTR (x, 0);
14105 return plus_constant (aof_pic_label, offset);
14108 void
14109 aof_dump_pic_table (FILE *f)
14111 struct pic_chain * chain;
14113 if (aof_pic_chain == NULL)
14114 return;
14116 asm_fprintf (f, "\tAREA |%r$$adcons|, BASED %r\n",
14117 PIC_OFFSET_TABLE_REGNUM,
14118 PIC_OFFSET_TABLE_REGNUM);
14119 fputs ("|x$adcons|\n", f);
14121 for (chain = aof_pic_chain; chain; chain = chain->next)
14123 fputs ("\tDCD\t", f);
14124 assemble_name (f, chain->symname);
14125 fputs ("\n", f);
14129 int arm_text_section_count = 1;
14131 char *
14132 aof_text_section (void )
14134 static char buf[100];
14135 sprintf (buf, "\tAREA |C$$code%d|, CODE, READONLY",
14136 arm_text_section_count++);
14137 if (flag_pic)
14138 strcat (buf, ", PIC, REENTRANT");
14139 return buf;
14142 static int arm_data_section_count = 1;
14144 char *
14145 aof_data_section (void)
14147 static char buf[100];
14148 sprintf (buf, "\tAREA |C$$data%d|, DATA", arm_data_section_count++);
14149 return buf;
14152 /* The AOF assembler is religiously strict about declarations of
14153 imported and exported symbols, so that it is impossible to declare
14154 a function as imported near the beginning of the file, and then to
14155 export it later on. It is, however, possible to delay the decision
14156 until all the functions in the file have been compiled. To get
14157 around this, we maintain a list of the imports and exports, and
14158 delete from it any that are subsequently defined. At the end of
14159 compilation we spit the remainder of the list out before the END
14160 directive. */
14162 struct import
14164 struct import * next;
14165 const char * name;
14168 static struct import * imports_list = NULL;
14170 void
14171 aof_add_import (const char *name)
14173 struct import * new;
14175 for (new = imports_list; new; new = new->next)
14176 if (new->name == name)
14177 return;
14179 new = (struct import *) xmalloc (sizeof (struct import));
14180 new->next = imports_list;
14181 imports_list = new;
14182 new->name = name;
14185 void
14186 aof_delete_import (const char *name)
14188 struct import ** old;
14190 for (old = &imports_list; *old; old = & (*old)->next)
14192 if ((*old)->name == name)
14194 *old = (*old)->next;
14195 return;
14200 int arm_main_function = 0;
14202 static void
14203 aof_dump_imports (FILE *f)
14205 /* The AOF assembler needs this to cause the startup code to be extracted
14206 from the library. Brining in __main causes the whole thing to work
14207 automagically. */
14208 if (arm_main_function)
14210 text_section ();
14211 fputs ("\tIMPORT __main\n", f);
14212 fputs ("\tDCD __main\n", f);
14215 /* Now dump the remaining imports. */
14216 while (imports_list)
14218 fprintf (f, "\tIMPORT\t");
14219 assemble_name (f, imports_list->name);
14220 fputc ('\n', f);
14221 imports_list = imports_list->next;
14225 static void
14226 aof_globalize_label (FILE *stream, const char *name)
14228 default_globalize_label (stream, name);
14229 if (! strcmp (name, "main"))
14230 arm_main_function = 1;
14233 static void
14234 aof_file_start (void)
14236 fputs ("__r0\tRN\t0\n", asm_out_file);
14237 fputs ("__a1\tRN\t0\n", asm_out_file);
14238 fputs ("__a2\tRN\t1\n", asm_out_file);
14239 fputs ("__a3\tRN\t2\n", asm_out_file);
14240 fputs ("__a4\tRN\t3\n", asm_out_file);
14241 fputs ("__v1\tRN\t4\n", asm_out_file);
14242 fputs ("__v2\tRN\t5\n", asm_out_file);
14243 fputs ("__v3\tRN\t6\n", asm_out_file);
14244 fputs ("__v4\tRN\t7\n", asm_out_file);
14245 fputs ("__v5\tRN\t8\n", asm_out_file);
14246 fputs ("__v6\tRN\t9\n", asm_out_file);
14247 fputs ("__sl\tRN\t10\n", asm_out_file);
14248 fputs ("__fp\tRN\t11\n", asm_out_file);
14249 fputs ("__ip\tRN\t12\n", asm_out_file);
14250 fputs ("__sp\tRN\t13\n", asm_out_file);
14251 fputs ("__lr\tRN\t14\n", asm_out_file);
14252 fputs ("__pc\tRN\t15\n", asm_out_file);
14253 fputs ("__f0\tFN\t0\n", asm_out_file);
14254 fputs ("__f1\tFN\t1\n", asm_out_file);
14255 fputs ("__f2\tFN\t2\n", asm_out_file);
14256 fputs ("__f3\tFN\t3\n", asm_out_file);
14257 fputs ("__f4\tFN\t4\n", asm_out_file);
14258 fputs ("__f5\tFN\t5\n", asm_out_file);
14259 fputs ("__f6\tFN\t6\n", asm_out_file);
14260 fputs ("__f7\tFN\t7\n", asm_out_file);
14261 text_section ();
14264 static void
14265 aof_file_end (void)
14267 if (flag_pic)
14268 aof_dump_pic_table (asm_out_file);
14269 aof_dump_imports (asm_out_file);
14270 fputs ("\tEND\n", asm_out_file);
14272 #endif /* AOF_ASSEMBLER */
14274 #ifdef OBJECT_FORMAT_ELF
14275 /* Switch to an arbitrary section NAME with attributes as specified
14276 by FLAGS. ALIGN specifies any known alignment requirements for
14277 the section; 0 if the default should be used.
14279 Differs from the default elf version only in the prefix character
14280 used before the section type. */
14282 static void
14283 arm_elf_asm_named_section (const char *name, unsigned int flags)
14285 char flagchars[10], *f = flagchars;
14287 if (! named_section_first_declaration (name))
14289 fprintf (asm_out_file, "\t.section\t%s\n", name);
14290 return;
14293 if (!(flags & SECTION_DEBUG))
14294 *f++ = 'a';
14295 if (flags & SECTION_WRITE)
14296 *f++ = 'w';
14297 if (flags & SECTION_CODE)
14298 *f++ = 'x';
14299 if (flags & SECTION_SMALL)
14300 *f++ = 's';
14301 if (flags & SECTION_MERGE)
14302 *f++ = 'M';
14303 if (flags & SECTION_STRINGS)
14304 *f++ = 'S';
14305 if (flags & SECTION_TLS)
14306 *f++ = 'T';
14307 *f = '\0';
14309 fprintf (asm_out_file, "\t.section\t%s,\"%s\"", name, flagchars);
14311 if (!(flags & SECTION_NOTYPE))
14313 const char *type;
14315 if (flags & SECTION_BSS)
14316 type = "nobits";
14317 else
14318 type = "progbits";
14320 fprintf (asm_out_file, ",%%%s", type);
14322 if (flags & SECTION_ENTSIZE)
14323 fprintf (asm_out_file, ",%d", flags & SECTION_ENTSIZE);
14326 putc ('\n', asm_out_file);
14328 #endif
14330 #ifndef ARM_PE
14331 /* Symbols in the text segment can be accessed without indirecting via the
14332 constant pool; it may take an extra binary operation, but this is still
14333 faster than indirecting via memory. Don't do this when not optimizing,
14334 since we won't be calculating al of the offsets necessary to do this
14335 simplification. */
14337 static void
14338 arm_encode_section_info (tree decl, rtx rtl, int first)
14340 /* This doesn't work with AOF syntax, since the string table may be in
14341 a different AREA. */
14342 #ifndef AOF_ASSEMBLER
14343 if (optimize > 0 && TREE_CONSTANT (decl))
14344 SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
14345 #endif
14347 /* If we are referencing a function that is weak then encode a long call
14348 flag in the function name, otherwise if the function is static or
14349 or known to be defined in this file then encode a short call flag. */
14350 if (first && TREE_CODE_CLASS (TREE_CODE (decl)) == 'd')
14352 if (TREE_CODE (decl) == FUNCTION_DECL && DECL_WEAK (decl))
14353 arm_encode_call_attribute (decl, LONG_CALL_FLAG_CHAR);
14354 else if (! TREE_PUBLIC (decl))
14355 arm_encode_call_attribute (decl, SHORT_CALL_FLAG_CHAR);
14358 #endif /* !ARM_PE */
14360 static void
14361 arm_internal_label (FILE *stream, const char *prefix, unsigned long labelno)
14363 if (arm_ccfsm_state == 3 && (unsigned) arm_target_label == labelno
14364 && !strcmp (prefix, "L"))
14366 arm_ccfsm_state = 0;
14367 arm_target_insn = NULL;
14369 default_internal_label (stream, prefix, labelno);
14372 /* Output code to add DELTA to the first argument, and then jump
14373 to FUNCTION. Used for C++ multiple inheritance. */
14374 static void
14375 arm_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
14376 HOST_WIDE_INT delta,
14377 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
14378 tree function)
14380 static int thunk_label = 0;
14381 char label[256];
14382 int mi_delta = delta;
14383 const char *const mi_op = mi_delta < 0 ? "sub" : "add";
14384 int shift = 0;
14385 int this_regno = (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function)
14386 ? 1 : 0);
14387 if (mi_delta < 0)
14388 mi_delta = - mi_delta;
14389 if (TARGET_THUMB)
14391 int labelno = thunk_label++;
14392 ASM_GENERATE_INTERNAL_LABEL (label, "LTHUMBFUNC", labelno);
14393 fputs ("\tldr\tr12, ", file);
14394 assemble_name (file, label);
14395 fputc ('\n', file);
14397 while (mi_delta != 0)
14399 if ((mi_delta & (3 << shift)) == 0)
14400 shift += 2;
14401 else
14403 asm_fprintf (file, "\t%s\t%r, %r, #%d\n",
14404 mi_op, this_regno, this_regno,
14405 mi_delta & (0xff << shift));
14406 mi_delta &= ~(0xff << shift);
14407 shift += 8;
14410 if (TARGET_THUMB)
14412 fprintf (file, "\tbx\tr12\n");
14413 ASM_OUTPUT_ALIGN (file, 2);
14414 assemble_name (file, label);
14415 fputs (":\n", file);
14416 assemble_integer (XEXP (DECL_RTL (function), 0), 4, BITS_PER_WORD, 1);
14418 else
14420 fputs ("\tb\t", file);
14421 assemble_name (file, XSTR (XEXP (DECL_RTL (function), 0), 0));
14422 if (NEED_PLT_RELOC)
14423 fputs ("(PLT)", file);
14424 fputc ('\n', file);
14429 arm_emit_vector_const (FILE *file, rtx x)
14431 int i;
14432 const char * pattern;
14434 if (GET_CODE (x) != CONST_VECTOR)
14435 abort ();
14437 switch (GET_MODE (x))
14439 case V2SImode: pattern = "%08x"; break;
14440 case V4HImode: pattern = "%04x"; break;
14441 case V8QImode: pattern = "%02x"; break;
14442 default: abort ();
14445 fprintf (file, "0x");
14446 for (i = CONST_VECTOR_NUNITS (x); i--;)
14448 rtx element;
14450 element = CONST_VECTOR_ELT (x, i);
14451 fprintf (file, pattern, INTVAL (element));
14454 return 1;
14457 const char *
14458 arm_output_load_gr (rtx *operands)
14460 rtx reg;
14461 rtx offset;
14462 rtx wcgr;
14463 rtx sum;
14465 if (GET_CODE (operands [1]) != MEM
14466 || GET_CODE (sum = XEXP (operands [1], 0)) != PLUS
14467 || GET_CODE (reg = XEXP (sum, 0)) != REG
14468 || GET_CODE (offset = XEXP (sum, 1)) != CONST_INT
14469 || ((INTVAL (offset) < 1024) && (INTVAL (offset) > -1024)))
14470 return "wldrw%?\t%0, %1";
14472 /* Fix up an out-of-range load of a GR register. */
14473 output_asm_insn ("str%?\t%0, [sp, #-4]!\t@ Start of GR load expansion", & reg);
14474 wcgr = operands[0];
14475 operands[0] = reg;
14476 output_asm_insn ("ldr%?\t%0, %1", operands);
14478 operands[0] = wcgr;
14479 operands[1] = reg;
14480 output_asm_insn ("tmcr%?\t%0, %1", operands);
14481 output_asm_insn ("ldr%?\t%0, [sp], #4\t@ End of GR load expansion", & reg);
14483 return "";
14486 static rtx
14487 arm_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
14488 int incoming ATTRIBUTE_UNUSED)
14490 #if 0
14491 /* FIXME: The ARM backend has special code to handle structure
14492 returns, and will reserve its own hidden first argument. So
14493 if this macro is enabled a *second* hidden argument will be
14494 reserved, which will break binary compatibility with old
14495 toolchains and also thunk handling. One day this should be
14496 fixed. */
14497 return 0;
14498 #else
14499 /* Register in which address to store a structure value
14500 is passed to a function. */
14501 return gen_rtx_REG (Pmode, ARG_REGISTER (1));
14502 #endif
14505 /* Worker function for TARGET_SETUP_INCOMING_VARARGS.
14507 On the ARM, PRETEND_SIZE is set in order to have the prologue push the last
14508 named arg and all anonymous args onto the stack.
14509 XXX I know the prologue shouldn't be pushing registers, but it is faster
14510 that way. */
14512 static void
14513 arm_setup_incoming_varargs (CUMULATIVE_ARGS *cum,
14514 enum machine_mode mode ATTRIBUTE_UNUSED,
14515 tree type ATTRIBUTE_UNUSED,
14516 int *pretend_size,
14517 int second_time ATTRIBUTE_UNUSED)
14519 cfun->machine->uses_anonymous_args = 1;
14520 if (cum->nregs < NUM_ARG_REGS)
14521 *pretend_size = (NUM_ARG_REGS - cum->nregs) * UNITS_PER_WORD;
14524 /* Return nonzero if the CONSUMER instruction (a store) does not need
14525 PRODUCER's value to calculate the address. */
14528 arm_no_early_store_addr_dep (rtx producer, rtx consumer)
14530 rtx value = PATTERN (producer);
14531 rtx addr = PATTERN (consumer);
14533 if (GET_CODE (value) == COND_EXEC)
14534 value = COND_EXEC_CODE (value);
14535 if (GET_CODE (value) == PARALLEL)
14536 value = XVECEXP (value, 0, 0);
14537 value = XEXP (value, 0);
14538 if (GET_CODE (addr) == COND_EXEC)
14539 addr = COND_EXEC_CODE (addr);
14540 if (GET_CODE (addr) == PARALLEL)
14541 addr = XVECEXP (addr, 0, 0);
14542 addr = XEXP (addr, 0);
14544 return !reg_overlap_mentioned_p (value, addr);
14547 /* Return nonzero if the CONSUMER instruction (an ALU op) does not
14548 have an early register shift value or amount dependency on the
14549 result of PRODUCER. */
14552 arm_no_early_alu_shift_dep (rtx producer, rtx consumer)
14554 rtx value = PATTERN (producer);
14555 rtx op = PATTERN (consumer);
14556 rtx early_op;
14558 if (GET_CODE (value) == COND_EXEC)
14559 value = COND_EXEC_CODE (value);
14560 if (GET_CODE (value) == PARALLEL)
14561 value = XVECEXP (value, 0, 0);
14562 value = XEXP (value, 0);
14563 if (GET_CODE (op) == COND_EXEC)
14564 op = COND_EXEC_CODE (op);
14565 if (GET_CODE (op) == PARALLEL)
14566 op = XVECEXP (op, 0, 0);
14567 op = XEXP (op, 1);
14569 early_op = XEXP (op, 0);
14570 /* This is either an actual independent shift, or a shift applied to
14571 the first operand of another operation. We want the whole shift
14572 operation. */
14573 if (GET_CODE (early_op) == REG)
14574 early_op = op;
14576 return !reg_overlap_mentioned_p (value, early_op);
14579 /* Return nonzero if the CONSUMER instruction (an ALU op) does not
14580 have an early register shift value dependency on the result of
14581 PRODUCER. */
14584 arm_no_early_alu_shift_value_dep (rtx producer, rtx consumer)
14586 rtx value = PATTERN (producer);
14587 rtx op = PATTERN (consumer);
14588 rtx early_op;
14590 if (GET_CODE (value) == COND_EXEC)
14591 value = COND_EXEC_CODE (value);
14592 if (GET_CODE (value) == PARALLEL)
14593 value = XVECEXP (value, 0, 0);
14594 value = XEXP (value, 0);
14595 if (GET_CODE (op) == COND_EXEC)
14596 op = COND_EXEC_CODE (op);
14597 if (GET_CODE (op) == PARALLEL)
14598 op = XVECEXP (op, 0, 0);
14599 op = XEXP (op, 1);
14601 early_op = XEXP (op, 0);
14603 /* This is either an actual independent shift, or a shift applied to
14604 the first operand of another operation. We want the value being
14605 shifted, in either case. */
14606 if (GET_CODE (early_op) != REG)
14607 early_op = XEXP (early_op, 0);
14609 return !reg_overlap_mentioned_p (value, early_op);
14612 /* Return nonzero if the CONSUMER (a mul or mac op) does not
14613 have an early register mult dependency on the result of
14614 PRODUCER. */
14617 arm_no_early_mul_dep (rtx producer, rtx consumer)
14619 rtx value = PATTERN (producer);
14620 rtx op = PATTERN (consumer);
14622 if (GET_CODE (value) == COND_EXEC)
14623 value = COND_EXEC_CODE (value);
14624 if (GET_CODE (value) == PARALLEL)
14625 value = XVECEXP (value, 0, 0);
14626 value = XEXP (value, 0);
14627 if (GET_CODE (op) == COND_EXEC)
14628 op = COND_EXEC_CODE (op);
14629 if (GET_CODE (op) == PARALLEL)
14630 op = XVECEXP (op, 0, 0);
14631 op = XEXP (op, 1);
14633 return (GET_CODE (op) == PLUS
14634 && !reg_overlap_mentioned_p (value, XEXP (op, 0)));
14638 /* We can't rely on the caller doing the proper promotion when
14639 using APCS or ATPCS. */
14641 static bool
14642 arm_promote_prototypes (tree t ATTRIBUTE_UNUSED)
14644 return !TARGET_AAPCS_BASED;
14648 /* AAPCS based ABIs use short enums by default. */
14650 static bool
14651 arm_default_short_enums (void)
14653 return TARGET_AAPCS_BASED;
14657 /* AAPCS requires that anonymous bitfields affect structure alignment. */
14659 static bool
14660 arm_align_anon_bitfield (void)
14662 return TARGET_AAPCS_BASED;
14666 /* The generic C++ ABI says 64-bit (long long). The EABI says 32-bit. */
14668 static tree
14669 arm_cxx_guard_type (void)
14671 return TARGET_AAPCS_BASED ? integer_type_node : long_long_integer_type_node;
14675 /* The EABI says test the least significan bit of a guard variable. */
14677 static bool
14678 arm_cxx_guard_mask_bit (void)
14680 return TARGET_AAPCS_BASED;
14684 /* The EABI specifies that all array cookies are 8 bytes long. */
14686 static tree
14687 arm_get_cookie_size (tree type)
14689 tree size;
14691 if (!TARGET_AAPCS_BASED)
14692 return default_cxx_get_cookie_size (type);
14694 size = build_int_cst (sizetype, 8, 0);
14695 return size;
14699 /* The EABI says that array cookies should also contain the element size. */
14701 static bool
14702 arm_cookie_has_size (void)
14704 return TARGET_AAPCS_BASED;
14708 /* The EABI says constructors and destructors should return a pointer to
14709 the object constructed/destroyed. */
14711 static bool
14712 arm_cxx_cdtor_returns_this (void)
14714 return TARGET_AAPCS_BASED;
14718 void
14719 arm_set_return_address (rtx source, rtx scratch)
14721 arm_stack_offsets *offsets;
14722 HOST_WIDE_INT delta;
14723 rtx addr;
14724 unsigned long saved_regs;
14726 saved_regs = arm_compute_save_reg_mask ();
14728 if ((saved_regs & (1 << LR_REGNUM)) == 0)
14729 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNUM), source);
14730 else
14732 if (frame_pointer_needed)
14733 addr = plus_constant(hard_frame_pointer_rtx, -4);
14734 else
14736 /* LR will be the first saved register. */
14737 offsets = arm_get_frame_offsets ();
14738 delta = offsets->outgoing_args - (offsets->frame + 4);
14741 if (delta >= 4096)
14743 emit_insn (gen_addsi3 (scratch, stack_pointer_rtx,
14744 GEN_INT (delta & ~4095)));
14745 addr = scratch;
14746 delta &= 4095;
14748 else
14749 addr = stack_pointer_rtx;
14751 addr = plus_constant (addr, delta);
14753 emit_move_insn (gen_rtx_MEM (Pmode, addr), source);
14758 void
14759 thumb_set_return_address (rtx source, rtx scratch)
14761 arm_stack_offsets *offsets;
14762 HOST_WIDE_INT delta;
14763 int reg;
14764 rtx addr;
14765 unsigned long mask;
14767 emit_insn (gen_rtx_USE (VOIDmode, source));
14769 mask = thumb_compute_save_reg_mask ();
14770 if (mask & (1 << LR_REGNUM))
14772 offsets = arm_get_frame_offsets ();
14774 /* Find the saved regs. */
14775 if (frame_pointer_needed)
14777 delta = offsets->soft_frame - offsets->saved_args;
14778 reg = THUMB_HARD_FRAME_POINTER_REGNUM;
14780 else
14782 delta = offsets->outgoing_args - offsets->saved_args;
14783 reg = SP_REGNUM;
14785 /* Allow for the stack frame. */
14786 if (TARGET_BACKTRACE)
14787 delta -= 16;
14788 /* The link register is always the first saved register. */
14789 delta -= 4;
14791 /* Construct the address. */
14792 addr = gen_rtx_REG (SImode, reg);
14793 if ((reg != SP_REGNUM && delta >= 128)
14794 || delta >= 1024)
14796 emit_insn (gen_movsi (scratch, GEN_INT (delta)));
14797 emit_insn (gen_addsi3 (scratch, scratch, stack_pointer_rtx));
14798 addr = scratch;
14800 else
14801 addr = plus_constant (addr, delta);
14803 emit_move_insn (gen_rtx_MEM (Pmode, addr), source);
14805 else
14806 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNUM), source);