* config/arm/arm.c (TARGET_SETUP_INCOMING_VARARGS): New.
[official-gcc.git] / gcc / config / arm / arm.c
blob768678d1b51b6982af251c403a5e4c2b96a2b58a
1 /* Output routines for GCC for ARM.
2 Copyright (C) 1991, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004 Free Software Foundation, Inc.
4 Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
5 and Martin Simmons (@harleqn.co.uk).
6 More major hacks by Richard Earnshaw (rearnsha@arm.com).
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it
11 under the terms of the GNU General Public License as published
12 by the Free Software Foundation; either version 2, or (at your
13 option) any later version.
15 GCC is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
17 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
18 License for more details.
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING. If not, write to
22 the Free Software Foundation, 59 Temple Place - Suite 330,
23 Boston, MA 02111-1307, USA. */
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "tm.h"
29 #include "rtl.h"
30 #include "tree.h"
31 #include "obstack.h"
32 #include "regs.h"
33 #include "hard-reg-set.h"
34 #include "real.h"
35 #include "insn-config.h"
36 #include "conditions.h"
37 #include "output.h"
38 #include "insn-attr.h"
39 #include "flags.h"
40 #include "reload.h"
41 #include "function.h"
42 #include "expr.h"
43 #include "optabs.h"
44 #include "toplev.h"
45 #include "recog.h"
46 #include "ggc.h"
47 #include "except.h"
48 #include "c-pragma.h"
49 #include "integrate.h"
50 #include "tm_p.h"
51 #include "target.h"
52 #include "target-def.h"
53 #include "debug.h"
55 /* Forward definitions of types. */
56 typedef struct minipool_node Mnode;
57 typedef struct minipool_fixup Mfix;
59 const struct attribute_spec arm_attribute_table[];
61 /* Forward function declarations. */
62 static void arm_add_gc_roots (void);
63 static int arm_gen_constant (enum rtx_code, enum machine_mode, HOST_WIDE_INT,
64 rtx, rtx, int, int);
65 static unsigned bit_count (unsigned long);
66 static int arm_address_register_rtx_p (rtx, int);
67 static int arm_legitimate_index_p (enum machine_mode, rtx, int);
68 static int thumb_base_register_rtx_p (rtx, enum machine_mode, int);
69 inline static int thumb_index_register_rtx_p (rtx, int);
70 static int const_ok_for_op (HOST_WIDE_INT, enum rtx_code);
71 static rtx emit_multi_reg_push (int);
72 static rtx emit_sfm (int, int);
73 #ifndef AOF_ASSEMBLER
74 static bool arm_assemble_integer (rtx, unsigned int, int);
75 #endif
76 static const char *fp_const_from_val (REAL_VALUE_TYPE *);
77 static arm_cc get_arm_condition_code (rtx);
78 static void init_fpa_table (void);
79 static HOST_WIDE_INT int_log2 (HOST_WIDE_INT);
80 static rtx is_jump_table (rtx);
81 static const char *output_multi_immediate (rtx *, const char *, const char *,
82 int, HOST_WIDE_INT);
83 static void print_multi_reg (FILE *, const char *, int, int);
84 static const char *shift_op (rtx, HOST_WIDE_INT *);
85 static struct machine_function *arm_init_machine_status (void);
86 static int number_of_first_bit_set (int);
87 static void replace_symbols_in_block (tree, rtx, rtx);
88 static void thumb_exit (FILE *, int, rtx);
89 static void thumb_pushpop (FILE *, int, int, int *, int);
90 static rtx is_jump_table (rtx);
91 static HOST_WIDE_INT get_jump_table_size (rtx);
92 static Mnode *move_minipool_fix_forward_ref (Mnode *, Mnode *, HOST_WIDE_INT);
93 static Mnode *add_minipool_forward_ref (Mfix *);
94 static Mnode *move_minipool_fix_backward_ref (Mnode *, Mnode *, HOST_WIDE_INT);
95 static Mnode *add_minipool_backward_ref (Mfix *);
96 static void assign_minipool_offsets (Mfix *);
97 static void arm_print_value (FILE *, rtx);
98 static void dump_minipool (rtx);
99 static int arm_barrier_cost (rtx);
100 static Mfix *create_fix_barrier (Mfix *, HOST_WIDE_INT);
101 static void push_minipool_barrier (rtx, HOST_WIDE_INT);
102 static void push_minipool_fix (rtx, HOST_WIDE_INT, rtx *, enum machine_mode,
103 rtx);
104 static void arm_reorg (void);
105 static bool note_invalid_constants (rtx, HOST_WIDE_INT, int);
106 static int current_file_function_operand (rtx);
107 static unsigned long arm_compute_save_reg0_reg12_mask (void);
108 static unsigned long arm_compute_save_reg_mask (void);
109 static unsigned long arm_isr_value (tree);
110 static unsigned long arm_compute_func_type (void);
111 static tree arm_handle_fndecl_attribute (tree *, tree, tree, int, bool *);
112 static tree arm_handle_isr_attribute (tree *, tree, tree, int, bool *);
113 static void arm_output_function_epilogue (FILE *, HOST_WIDE_INT);
114 static void arm_output_function_prologue (FILE *, HOST_WIDE_INT);
115 static void thumb_output_function_prologue (FILE *, HOST_WIDE_INT);
116 static int arm_comp_type_attributes (tree, tree);
117 static void arm_set_default_type_attributes (tree);
118 static int arm_adjust_cost (rtx, rtx, rtx, int);
119 static int arm_use_dfa_pipeline_interface (void);
120 static int count_insns_for_constant (HOST_WIDE_INT, int);
121 static int arm_get_strip_length (int);
122 static bool arm_function_ok_for_sibcall (tree, tree);
123 static void arm_internal_label (FILE *, const char *, unsigned long);
124 static void arm_output_mi_thunk (FILE *, tree, HOST_WIDE_INT, HOST_WIDE_INT,
125 tree);
126 static int arm_rtx_costs_1 (rtx, enum rtx_code, enum rtx_code);
127 static bool arm_rtx_costs (rtx, int, int, int *);
128 static int arm_address_cost (rtx);
129 static bool arm_memory_load_p (rtx);
130 static bool arm_cirrus_insn_p (rtx);
131 static void cirrus_reorg (rtx);
132 static void arm_init_builtins (void);
133 static rtx arm_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
134 static void arm_init_iwmmxt_builtins (void);
135 static rtx safe_vector_operand (rtx, enum machine_mode);
136 static rtx arm_expand_binop_builtin (enum insn_code, tree, rtx);
137 static rtx arm_expand_unop_builtin (enum insn_code, tree, rtx, int);
138 static rtx arm_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
140 #ifdef OBJECT_FORMAT_ELF
141 static void arm_elf_asm_named_section (const char *, unsigned int);
142 #endif
143 #ifndef ARM_PE
144 static void arm_encode_section_info (tree, rtx, int);
145 #endif
146 #ifdef AOF_ASSEMBLER
147 static void aof_globalize_label (FILE *, const char *);
148 static void aof_dump_imports (FILE *);
149 static void aof_dump_pic_table (FILE *);
150 static void aof_file_start (void);
151 static void aof_file_end (void);
152 #endif
153 static rtx arm_struct_value_rtx (tree, int);
154 static void arm_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
155 tree, int *, int);
158 /* Initialize the GCC target structure. */
159 #ifdef TARGET_DLLIMPORT_DECL_ATTRIBUTES
160 #undef TARGET_MERGE_DECL_ATTRIBUTES
161 #define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
162 #endif
164 #undef TARGET_ATTRIBUTE_TABLE
165 #define TARGET_ATTRIBUTE_TABLE arm_attribute_table
167 #ifdef AOF_ASSEMBLER
168 #undef TARGET_ASM_BYTE_OP
169 #define TARGET_ASM_BYTE_OP "\tDCB\t"
170 #undef TARGET_ASM_ALIGNED_HI_OP
171 #define TARGET_ASM_ALIGNED_HI_OP "\tDCW\t"
172 #undef TARGET_ASM_ALIGNED_SI_OP
173 #define TARGET_ASM_ALIGNED_SI_OP "\tDCD\t"
174 #undef TARGET_ASM_GLOBALIZE_LABEL
175 #define TARGET_ASM_GLOBALIZE_LABEL aof_globalize_label
176 #undef TARGET_ASM_FILE_START
177 #define TARGET_ASM_FILE_START aof_file_start
178 #undef TARGET_ASM_FILE_END
179 #define TARGET_ASM_FILE_END aof_file_end
180 #else
181 #undef TARGET_ASM_ALIGNED_SI_OP
182 #define TARGET_ASM_ALIGNED_SI_OP NULL
183 #undef TARGET_ASM_INTEGER
184 #define TARGET_ASM_INTEGER arm_assemble_integer
185 #endif
187 #undef TARGET_ASM_FUNCTION_PROLOGUE
188 #define TARGET_ASM_FUNCTION_PROLOGUE arm_output_function_prologue
190 #undef TARGET_ASM_FUNCTION_EPILOGUE
191 #define TARGET_ASM_FUNCTION_EPILOGUE arm_output_function_epilogue
193 #undef TARGET_COMP_TYPE_ATTRIBUTES
194 #define TARGET_COMP_TYPE_ATTRIBUTES arm_comp_type_attributes
196 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
197 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES arm_set_default_type_attributes
199 #undef TARGET_SCHED_ADJUST_COST
200 #define TARGET_SCHED_ADJUST_COST arm_adjust_cost
202 #undef TARGET_SCHED_USE_DFA_PIPELINE_INTERFACE
203 #define TARGET_SCHED_USE_DFA_PIPELINE_INTERFACE arm_use_dfa_pipeline_interface
205 #undef TARGET_ENCODE_SECTION_INFO
206 #ifdef ARM_PE
207 #define TARGET_ENCODE_SECTION_INFO arm_pe_encode_section_info
208 #else
209 #define TARGET_ENCODE_SECTION_INFO arm_encode_section_info
210 #endif
212 #undef TARGET_STRIP_NAME_ENCODING
213 #define TARGET_STRIP_NAME_ENCODING arm_strip_name_encoding
215 #undef TARGET_ASM_INTERNAL_LABEL
216 #define TARGET_ASM_INTERNAL_LABEL arm_internal_label
218 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
219 #define TARGET_FUNCTION_OK_FOR_SIBCALL arm_function_ok_for_sibcall
221 #undef TARGET_ASM_OUTPUT_MI_THUNK
222 #define TARGET_ASM_OUTPUT_MI_THUNK arm_output_mi_thunk
223 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
224 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
226 #undef TARGET_RTX_COSTS
227 #define TARGET_RTX_COSTS arm_rtx_costs
228 #undef TARGET_ADDRESS_COST
229 #define TARGET_ADDRESS_COST arm_address_cost
231 #undef TARGET_MACHINE_DEPENDENT_REORG
232 #define TARGET_MACHINE_DEPENDENT_REORG arm_reorg
234 #undef TARGET_INIT_BUILTINS
235 #define TARGET_INIT_BUILTINS arm_init_builtins
236 #undef TARGET_EXPAND_BUILTIN
237 #define TARGET_EXPAND_BUILTIN arm_expand_builtin
239 #undef TARGET_PROMOTE_FUNCTION_ARGS
240 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
241 #undef TARGET_PROMOTE_PROTOTYPES
242 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
244 #undef TARGET_STRUCT_VALUE_RTX
245 #define TARGET_STRUCT_VALUE_RTX arm_struct_value_rtx
247 #undef TARGET_SETUP_INCOMING_VARARGS
248 #define TARGET_SETUP_INCOMING_VARARGS arm_setup_incoming_varargs
250 struct gcc_target targetm = TARGET_INITIALIZER;
252 /* Obstack for minipool constant handling. */
253 static struct obstack minipool_obstack;
254 static char * minipool_startobj;
256 /* The maximum number of insns skipped which
257 will be conditionalised if possible. */
258 static int max_insns_skipped = 5;
260 extern FILE * asm_out_file;
262 /* True if we are currently building a constant table. */
263 int making_const_table;
265 /* Define the information needed to generate branch insns. This is
266 stored from the compare operation. */
267 rtx arm_compare_op0, arm_compare_op1;
269 /* What type of floating point are we tuning for? */
270 enum fputype arm_fpu_tune;
272 /* What type of floating point instructions are available? */
273 enum fputype arm_fpu_arch;
275 /* What program mode is the cpu running in? 26-bit mode or 32-bit mode. */
276 enum prog_mode_type arm_prgmode;
278 /* Set by the -mfp=... option. */
279 const char * target_fp_name = NULL;
281 /* Used to parse -mstructure_size_boundary command line option. */
282 const char * structure_size_string = NULL;
283 int arm_structure_size_boundary = DEFAULT_STRUCTURE_SIZE_BOUNDARY;
285 /* Bit values used to identify processor capabilities. */
286 #define FL_CO_PROC (1 << 0) /* Has external co-processor bus */
287 #define FL_FAST_MULT (1 << 1) /* Fast multiply */
288 #define FL_MODE26 (1 << 2) /* 26-bit mode support */
289 #define FL_MODE32 (1 << 3) /* 32-bit mode support */
290 #define FL_ARCH4 (1 << 4) /* Architecture rel 4 */
291 #define FL_ARCH5 (1 << 5) /* Architecture rel 5 */
292 #define FL_THUMB (1 << 6) /* Thumb aware */
293 #define FL_LDSCHED (1 << 7) /* Load scheduling necessary */
294 #define FL_STRONG (1 << 8) /* StrongARM */
295 #define FL_ARCH5E (1 << 9) /* DSP extensions to v5 */
296 #define FL_XSCALE (1 << 10) /* XScale */
297 #define FL_CIRRUS (1 << 11) /* Cirrus/DSP. */
298 #define FL_IWMMXT (1 << 29) /* XScale v2 or "Intel Wireless MMX technology". */
299 #define FL_ARCH6J (1 << 12) /* Architecture rel 6. Adds
300 media instructions. */
301 #define FL_VFPV2 (1 << 13) /* Vector Floating Point V2. */
303 /* The bits in this mask specify which
304 instructions we are allowed to generate. */
305 static unsigned long insn_flags = 0;
307 /* The bits in this mask specify which instruction scheduling options should
308 be used. Note - there is an overlap with the FL_FAST_MULT. For some
309 hardware we want to be able to generate the multiply instructions, but to
310 tune as if they were not present in the architecture. */
311 static unsigned long tune_flags = 0;
313 /* The following are used in the arm.md file as equivalents to bits
314 in the above two flag variables. */
316 /* Nonzero if this is an "M" variant of the processor. */
317 int arm_fast_multiply = 0;
319 /* Nonzero if this chip supports the ARM Architecture 4 extensions. */
320 int arm_arch4 = 0;
322 /* Nonzero if this chip supports the ARM Architecture 5 extensions. */
323 int arm_arch5 = 0;
325 /* Nonzero if this chip supports the ARM Architecture 5E extensions. */
326 int arm_arch5e = 0;
328 /* Nonzero if this chip can benefit from load scheduling. */
329 int arm_ld_sched = 0;
331 /* Nonzero if this chip is a StrongARM. */
332 int arm_is_strong = 0;
334 /* Nonzero if this chip supports Intel Wireless MMX technology. */
335 int arm_arch_iwmmxt = 0;
337 /* Nonzero if this chip is an XScale. */
338 int arm_arch_xscale = 0;
340 /* Nonzero if tuning for XScale */
341 int arm_tune_xscale = 0;
343 /* Nonzero if this chip is an ARM6 or an ARM7. */
344 int arm_is_6_or_7 = 0;
346 /* Nonzero if this chip is a Cirrus/DSP. */
347 int arm_is_cirrus = 0;
349 /* Nonzero if generating Thumb instructions. */
350 int thumb_code = 0;
352 /* In case of a PRE_INC, POST_INC, PRE_DEC, POST_DEC memory reference, we
353 must report the mode of the memory reference from PRINT_OPERAND to
354 PRINT_OPERAND_ADDRESS. */
355 enum machine_mode output_memory_reference_mode;
357 /* The register number to be used for the PIC offset register. */
358 const char * arm_pic_register_string = NULL;
359 int arm_pic_register = INVALID_REGNUM;
361 /* Set to 1 when a return insn is output, this means that the epilogue
362 is not needed. */
363 int return_used_this_function;
365 /* Set to 1 after arm_reorg has started. Reset to start at the start of
366 the next function. */
367 static int after_arm_reorg = 0;
369 /* The maximum number of insns to be used when loading a constant. */
370 static int arm_constant_limit = 3;
372 /* For an explanation of these variables, see final_prescan_insn below. */
373 int arm_ccfsm_state;
374 enum arm_cond_code arm_current_cc;
375 rtx arm_target_insn;
376 int arm_target_label;
378 /* The condition codes of the ARM, and the inverse function. */
379 static const char * const arm_condition_codes[] =
381 "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc",
382 "hi", "ls", "ge", "lt", "gt", "le", "al", "nv"
385 #define streq(string1, string2) (strcmp (string1, string2) == 0)
387 /* Initialization code. */
389 struct processors
391 const char *const name;
392 const unsigned long flags;
395 /* Not all of these give usefully different compilation alternatives,
396 but there is no simple way of generalizing them. */
397 static const struct processors all_cores[] =
399 /* ARM Cores */
401 {"arm2", FL_CO_PROC | FL_MODE26 },
402 {"arm250", FL_CO_PROC | FL_MODE26 },
403 {"arm3", FL_CO_PROC | FL_MODE26 },
404 {"arm6", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
405 {"arm60", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
406 {"arm600", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
407 {"arm610", FL_MODE26 | FL_MODE32 },
408 {"arm620", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
409 {"arm7", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
410 /* arm7m doesn't exist on its own, but only with D, (and I), but
411 those don't alter the code, so arm7m is sometimes used. */
412 {"arm7m", FL_CO_PROC | FL_MODE26 | FL_MODE32 | FL_FAST_MULT },
413 {"arm7d", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
414 {"arm7dm", FL_CO_PROC | FL_MODE26 | FL_MODE32 | FL_FAST_MULT },
415 {"arm7di", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
416 {"arm7dmi", FL_CO_PROC | FL_MODE26 | FL_MODE32 | FL_FAST_MULT },
417 {"arm70", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
418 {"arm700", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
419 {"arm700i", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
420 {"arm710", FL_MODE26 | FL_MODE32 },
421 {"arm720", FL_MODE26 | FL_MODE32 },
422 {"arm710c", FL_MODE26 | FL_MODE32 },
423 {"arm7100", FL_MODE26 | FL_MODE32 },
424 {"arm7500", FL_MODE26 | FL_MODE32 },
425 /* Doesn't have an external co-proc, but does have embedded fpa. */
426 {"arm7500fe", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
427 /* V4 Architecture Processors */
428 {"arm7tdmi", FL_CO_PROC | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB },
429 {"arm710t", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB },
430 {"arm720t", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB },
431 {"arm740t", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB },
432 {"arm8", FL_MODE26 | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_LDSCHED },
433 {"arm810", FL_MODE26 | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_LDSCHED },
434 {"arm9", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_LDSCHED },
435 {"arm920", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_LDSCHED },
436 {"arm920t", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_LDSCHED },
437 {"arm940t", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_LDSCHED },
438 {"arm9tdmi", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_LDSCHED },
439 {"arm9e", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_LDSCHED },
440 {"ep9312", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_LDSCHED | FL_CIRRUS },
441 {"strongarm", FL_MODE26 | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_LDSCHED | FL_STRONG },
442 {"strongarm110", FL_MODE26 | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_LDSCHED | FL_STRONG },
443 {"strongarm1100", FL_MODE26 | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_LDSCHED | FL_STRONG },
444 {"strongarm1110", FL_MODE26 | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_LDSCHED | FL_STRONG },
445 /* V5 Architecture Processors */
446 {"arm10tdmi", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_LDSCHED | FL_ARCH5 },
447 {"arm1020t", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_LDSCHED | FL_ARCH5 },
448 {"arm926ejs", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_ARCH5 | FL_ARCH5E },
449 {"arm1026ejs", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_ARCH5 | FL_ARCH5E },
450 {"xscale", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_LDSCHED | FL_STRONG | FL_ARCH5 | FL_ARCH5E | FL_XSCALE },
451 {"iwmmxt", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_LDSCHED | FL_STRONG | FL_ARCH5 | FL_ARCH5E | FL_XSCALE | FL_IWMMXT },
452 /* V6 Architecture Processors */
453 {"arm1136js", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_ARCH5 | FL_ARCH5E | FL_ARCH6J },
454 {"arm1136jfs", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_ARCH5 | FL_ARCH5E | FL_ARCH6J | FL_VFPV2 },
455 {NULL, 0}
458 static const struct processors all_architectures[] =
460 /* ARM Architectures */
462 { "armv2", FL_CO_PROC | FL_MODE26 },
463 { "armv2a", FL_CO_PROC | FL_MODE26 },
464 { "armv3", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
465 { "armv3m", FL_CO_PROC | FL_MODE26 | FL_MODE32 | FL_FAST_MULT },
466 { "armv4", FL_CO_PROC | FL_MODE26 | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 },
467 /* Strictly, FL_MODE26 is a permitted option for v4t, but there are no
468 implementations that support it, so we will leave it out for now. */
469 { "armv4t", FL_CO_PROC | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB },
470 { "armv5", FL_CO_PROC | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_ARCH5 },
471 { "armv5t", FL_CO_PROC | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_ARCH5 },
472 { "armv5te", FL_CO_PROC | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_ARCH5 | FL_ARCH5E },
473 { "armv6j", FL_CO_PROC | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_ARCH5 | FL_ARCH5E | FL_ARCH6J },
474 { "ep9312", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_LDSCHED | FL_CIRRUS },
475 {"iwmmxt", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_LDSCHED | FL_STRONG | FL_ARCH5 | FL_ARCH5E | FL_XSCALE | FL_IWMMXT },
476 { NULL, 0 }
479 /* This is a magic structure. The 'string' field is magically filled in
480 with a pointer to the value specified by the user on the command line
481 assuming that the user has specified such a value. */
483 struct arm_cpu_select arm_select[] =
485 /* string name processors */
486 { NULL, "-mcpu=", all_cores },
487 { NULL, "-march=", all_architectures },
488 { NULL, "-mtune=", all_cores }
491 /* Return the number of bits set in VALUE. */
492 static unsigned
493 bit_count (unsigned long value)
495 unsigned long count = 0;
497 while (value)
499 count++;
500 value &= value - 1; /* Clear the least-significant set bit. */
503 return count;
506 /* Fix up any incompatible options that the user has specified.
507 This has now turned into a maze. */
508 void
509 arm_override_options (void)
511 unsigned i;
513 /* Set up the flags based on the cpu/architecture selected by the user. */
514 for (i = ARRAY_SIZE (arm_select); i--;)
516 struct arm_cpu_select * ptr = arm_select + i;
518 if (ptr->string != NULL && ptr->string[0] != '\0')
520 const struct processors * sel;
522 for (sel = ptr->processors; sel->name != NULL; sel++)
523 if (streq (ptr->string, sel->name))
525 if (i == 2)
526 tune_flags = sel->flags;
527 else
529 /* If we have been given an architecture and a processor
530 make sure that they are compatible. We only generate
531 a warning though, and we prefer the CPU over the
532 architecture. */
533 if (insn_flags != 0 && (insn_flags ^ sel->flags))
534 warning ("switch -mcpu=%s conflicts with -march= switch",
535 ptr->string);
537 insn_flags = sel->flags;
540 break;
543 if (sel->name == NULL)
544 error ("bad value (%s) for %s switch", ptr->string, ptr->name);
548 /* If the user did not specify a processor, choose one for them. */
549 if (insn_flags == 0)
551 const struct processors * sel;
552 unsigned int sought;
553 static const struct cpu_default
555 const int cpu;
556 const char *const name;
558 cpu_defaults[] =
560 { TARGET_CPU_arm2, "arm2" },
561 { TARGET_CPU_arm6, "arm6" },
562 { TARGET_CPU_arm610, "arm610" },
563 { TARGET_CPU_arm710, "arm710" },
564 { TARGET_CPU_arm7m, "arm7m" },
565 { TARGET_CPU_arm7500fe, "arm7500fe" },
566 { TARGET_CPU_arm7tdmi, "arm7tdmi" },
567 { TARGET_CPU_arm8, "arm8" },
568 { TARGET_CPU_arm810, "arm810" },
569 { TARGET_CPU_arm9, "arm9" },
570 { TARGET_CPU_strongarm, "strongarm" },
571 { TARGET_CPU_xscale, "xscale" },
572 { TARGET_CPU_ep9312, "ep9312" },
573 { TARGET_CPU_iwmmxt, "iwmmxt" },
574 { TARGET_CPU_arm926ej_s, "arm926ej-s" },
575 { TARGET_CPU_arm1026ej_s, "arm1026ej-s" },
576 { TARGET_CPU_arm1136j_s, "arm1136j_s" },
577 { TARGET_CPU_arm1136jf_s, "arm1136jf_s" },
578 { TARGET_CPU_generic, "arm" },
579 { 0, 0 }
581 const struct cpu_default * def;
583 /* Find the default. */
584 for (def = cpu_defaults; def->name; def++)
585 if (def->cpu == TARGET_CPU_DEFAULT)
586 break;
588 /* Make sure we found the default CPU. */
589 if (def->name == NULL)
590 abort ();
592 /* Find the default CPU's flags. */
593 for (sel = all_cores; sel->name != NULL; sel++)
594 if (streq (def->name, sel->name))
595 break;
597 if (sel->name == NULL)
598 abort ();
600 insn_flags = sel->flags;
602 /* Now check to see if the user has specified some command line
603 switch that require certain abilities from the cpu. */
604 sought = 0;
606 if (TARGET_INTERWORK || TARGET_THUMB)
608 sought |= (FL_THUMB | FL_MODE32);
610 /* Force apcs-32 to be used for interworking. */
611 target_flags |= ARM_FLAG_APCS_32;
613 /* There are no ARM processors that support both APCS-26 and
614 interworking. Therefore we force FL_MODE26 to be removed
615 from insn_flags here (if it was set), so that the search
616 below will always be able to find a compatible processor. */
617 insn_flags &= ~FL_MODE26;
619 else if (!TARGET_APCS_32)
620 sought |= FL_MODE26;
622 if (sought != 0 && ((sought & insn_flags) != sought))
624 /* Try to locate a CPU type that supports all of the abilities
625 of the default CPU, plus the extra abilities requested by
626 the user. */
627 for (sel = all_cores; sel->name != NULL; sel++)
628 if ((sel->flags & sought) == (sought | insn_flags))
629 break;
631 if (sel->name == NULL)
633 unsigned current_bit_count = 0;
634 const struct processors * best_fit = NULL;
636 /* Ideally we would like to issue an error message here
637 saying that it was not possible to find a CPU compatible
638 with the default CPU, but which also supports the command
639 line options specified by the programmer, and so they
640 ought to use the -mcpu=<name> command line option to
641 override the default CPU type.
643 Unfortunately this does not work with multilibing. We
644 need to be able to support multilibs for -mapcs-26 and for
645 -mthumb-interwork and there is no CPU that can support both
646 options. Instead if we cannot find a cpu that has both the
647 characteristics of the default cpu and the given command line
648 options we scan the array again looking for a best match. */
649 for (sel = all_cores; sel->name != NULL; sel++)
650 if ((sel->flags & sought) == sought)
652 unsigned count;
654 count = bit_count (sel->flags & insn_flags);
656 if (count >= current_bit_count)
658 best_fit = sel;
659 current_bit_count = count;
663 if (best_fit == NULL)
664 abort ();
665 else
666 sel = best_fit;
669 insn_flags = sel->flags;
673 /* If tuning has not been specified, tune for whichever processor or
674 architecture has been selected. */
675 if (tune_flags == 0)
676 tune_flags = insn_flags;
678 /* Make sure that the processor choice does not conflict with any of the
679 other command line choices. */
680 if (TARGET_APCS_32 && !(insn_flags & FL_MODE32))
682 /* If APCS-32 was not the default then it must have been set by the
683 user, so issue a warning message. If the user has specified
684 "-mapcs-32 -mcpu=arm2" then we loose here. */
685 if ((TARGET_DEFAULT & ARM_FLAG_APCS_32) == 0)
686 warning ("target CPU does not support APCS-32" );
687 target_flags &= ~ARM_FLAG_APCS_32;
689 else if (!TARGET_APCS_32 && !(insn_flags & FL_MODE26))
691 warning ("target CPU does not support APCS-26" );
692 target_flags |= ARM_FLAG_APCS_32;
695 if (TARGET_INTERWORK && !(insn_flags & FL_THUMB))
697 warning ("target CPU does not support interworking" );
698 target_flags &= ~ARM_FLAG_INTERWORK;
701 if (TARGET_THUMB && !(insn_flags & FL_THUMB))
703 warning ("target CPU does not support THUMB instructions");
704 target_flags &= ~ARM_FLAG_THUMB;
707 if (TARGET_APCS_FRAME && TARGET_THUMB)
709 /* warning ("ignoring -mapcs-frame because -mthumb was used"); */
710 target_flags &= ~ARM_FLAG_APCS_FRAME;
713 /* TARGET_BACKTRACE calls leaf_function_p, which causes a crash if done
714 from here where no function is being compiled currently. */
715 if ((target_flags & (THUMB_FLAG_LEAF_BACKTRACE | THUMB_FLAG_BACKTRACE))
716 && TARGET_ARM)
717 warning ("enabling backtrace support is only meaningful when compiling for the Thumb");
719 if (TARGET_ARM && TARGET_CALLEE_INTERWORKING)
720 warning ("enabling callee interworking support is only meaningful when compiling for the Thumb");
722 if (TARGET_ARM && TARGET_CALLER_INTERWORKING)
723 warning ("enabling caller interworking support is only meaningful when compiling for the Thumb");
725 /* If interworking is enabled then APCS-32 must be selected as well. */
726 if (TARGET_INTERWORK)
728 if (!TARGET_APCS_32)
729 warning ("interworking forces APCS-32 to be used" );
730 target_flags |= ARM_FLAG_APCS_32;
733 if (TARGET_APCS_STACK && !TARGET_APCS_FRAME)
735 warning ("-mapcs-stack-check incompatible with -mno-apcs-frame");
736 target_flags |= ARM_FLAG_APCS_FRAME;
739 if (TARGET_POKE_FUNCTION_NAME)
740 target_flags |= ARM_FLAG_APCS_FRAME;
742 if (TARGET_APCS_REENT && flag_pic)
743 error ("-fpic and -mapcs-reent are incompatible");
745 if (TARGET_APCS_REENT)
746 warning ("APCS reentrant code not supported. Ignored");
748 /* If this target is normally configured to use APCS frames, warn if they
749 are turned off and debugging is turned on. */
750 if (TARGET_ARM
751 && write_symbols != NO_DEBUG
752 && !TARGET_APCS_FRAME
753 && (TARGET_DEFAULT & ARM_FLAG_APCS_FRAME))
754 warning ("-g with -mno-apcs-frame may not give sensible debugging");
756 /* If stack checking is disabled, we can use r10 as the PIC register,
757 which keeps r9 available. */
758 if (flag_pic)
759 arm_pic_register = TARGET_APCS_STACK ? 9 : 10;
761 if (TARGET_APCS_FLOAT)
762 warning ("passing floating point arguments in fp regs not yet supported");
764 /* Initialize boolean versions of the flags, for use in the arm.md file. */
765 arm_fast_multiply = (insn_flags & FL_FAST_MULT) != 0;
766 arm_arch4 = (insn_flags & FL_ARCH4) != 0;
767 arm_arch5 = (insn_flags & FL_ARCH5) != 0;
768 arm_arch5e = (insn_flags & FL_ARCH5E) != 0;
769 arm_arch_xscale = (insn_flags & FL_XSCALE) != 0;
771 arm_ld_sched = (tune_flags & FL_LDSCHED) != 0;
772 arm_is_strong = (tune_flags & FL_STRONG) != 0;
773 thumb_code = (TARGET_ARM == 0);
774 arm_is_6_or_7 = (((tune_flags & (FL_MODE26 | FL_MODE32))
775 && !(tune_flags & FL_ARCH4))) != 0;
776 arm_tune_xscale = (tune_flags & FL_XSCALE) != 0;
777 arm_is_cirrus = (tune_flags & FL_CIRRUS) != 0;
778 arm_arch_iwmmxt = (insn_flags & FL_IWMMXT) != 0;
780 if (TARGET_IWMMXT && (! TARGET_ATPCS))
781 target_flags |= ARM_FLAG_ATPCS;
783 if (arm_is_cirrus)
785 arm_fpu_tune = FPUTYPE_MAVERICK;
787 /* Ignore -mhard-float if -mcpu=ep9312. */
788 if (TARGET_HARD_FLOAT)
789 target_flags ^= ARM_FLAG_SOFT_FLOAT;
791 else
792 /* Default value for floating point code... if no co-processor
793 bus, then schedule for emulated floating point. Otherwise,
794 assume the user has an FPA.
795 Note: this does not prevent use of floating point instructions,
796 -msoft-float does that. */
797 arm_fpu_tune = (tune_flags & FL_CO_PROC) ? FPUTYPE_FPA : FPUTYPE_FPA_EMU3;
799 if (target_fp_name)
801 if (streq (target_fp_name, "2"))
802 arm_fpu_arch = FPUTYPE_FPA_EMU2;
803 else if (streq (target_fp_name, "3"))
804 arm_fpu_arch = FPUTYPE_FPA_EMU3;
805 else
806 error ("invalid floating point emulation option: -mfpe-%s",
807 target_fp_name);
809 else
810 arm_fpu_arch = FPUTYPE_DEFAULT;
812 if (TARGET_FPE)
814 if (arm_fpu_tune == FPUTYPE_FPA_EMU3)
815 arm_fpu_tune = FPUTYPE_FPA_EMU2;
816 else if (arm_fpu_tune == FPUTYPE_MAVERICK)
817 warning ("-mfpe switch not supported by ep9312 target cpu - ignored.");
818 else if (arm_fpu_tune != FPUTYPE_FPA)
819 arm_fpu_tune = FPUTYPE_FPA_EMU2;
822 /* For arm2/3 there is no need to do any scheduling if there is only
823 a floating point emulator, or we are doing software floating-point. */
824 if ((TARGET_SOFT_FLOAT || arm_fpu_tune != FPUTYPE_FPA)
825 && (tune_flags & FL_MODE32) == 0)
826 flag_schedule_insns = flag_schedule_insns_after_reload = 0;
828 arm_prgmode = TARGET_APCS_32 ? PROG_MODE_PROG32 : PROG_MODE_PROG26;
830 if (structure_size_string != NULL)
832 int size = strtol (structure_size_string, NULL, 0);
834 if (size == 8 || size == 32)
835 arm_structure_size_boundary = size;
836 else
837 warning ("structure size boundary can only be set to 8 or 32");
840 if (arm_pic_register_string != NULL)
842 int pic_register = decode_reg_name (arm_pic_register_string);
844 if (!flag_pic)
845 warning ("-mpic-register= is useless without -fpic");
847 /* Prevent the user from choosing an obviously stupid PIC register. */
848 else if (pic_register < 0 || call_used_regs[pic_register]
849 || pic_register == HARD_FRAME_POINTER_REGNUM
850 || pic_register == STACK_POINTER_REGNUM
851 || pic_register >= PC_REGNUM)
852 error ("unable to use '%s' for PIC register", arm_pic_register_string);
853 else
854 arm_pic_register = pic_register;
857 if (TARGET_THUMB && flag_schedule_insns)
859 /* Don't warn since it's on by default in -O2. */
860 flag_schedule_insns = 0;
863 if (optimize_size)
865 /* There's some dispute as to whether this should be 1 or 2. However,
866 experiments seem to show that in pathological cases a setting of
867 1 degrades less severely than a setting of 2. This could change if
868 other parts of the compiler change their behavior. */
869 arm_constant_limit = 1;
871 /* If optimizing for size, bump the number of instructions that we
872 are prepared to conditionally execute (even on a StrongARM). */
873 max_insns_skipped = 6;
875 else
877 /* For processors with load scheduling, it never costs more than
878 2 cycles to load a constant, and the load scheduler may well
879 reduce that to 1. */
880 if (tune_flags & FL_LDSCHED)
881 arm_constant_limit = 1;
883 /* On XScale the longer latency of a load makes it more difficult
884 to achieve a good schedule, so it's faster to synthesize
885 constants that can be done in two insns. */
886 if (arm_tune_xscale)
887 arm_constant_limit = 2;
889 /* StrongARM has early execution of branches, so a sequence
890 that is worth skipping is shorter. */
891 if (arm_is_strong)
892 max_insns_skipped = 3;
895 /* Register global variables with the garbage collector. */
896 arm_add_gc_roots ();
899 static void
900 arm_add_gc_roots (void)
902 gcc_obstack_init(&minipool_obstack);
903 minipool_startobj = (char *) obstack_alloc (&minipool_obstack, 0);
906 /* A table of known ARM exception types.
907 For use with the interrupt function attribute. */
909 typedef struct
911 const char *const arg;
912 const unsigned long return_value;
914 isr_attribute_arg;
916 static const isr_attribute_arg isr_attribute_args [] =
918 { "IRQ", ARM_FT_ISR },
919 { "irq", ARM_FT_ISR },
920 { "FIQ", ARM_FT_FIQ },
921 { "fiq", ARM_FT_FIQ },
922 { "ABORT", ARM_FT_ISR },
923 { "abort", ARM_FT_ISR },
924 { "ABORT", ARM_FT_ISR },
925 { "abort", ARM_FT_ISR },
926 { "UNDEF", ARM_FT_EXCEPTION },
927 { "undef", ARM_FT_EXCEPTION },
928 { "SWI", ARM_FT_EXCEPTION },
929 { "swi", ARM_FT_EXCEPTION },
930 { NULL, ARM_FT_NORMAL }
933 /* Returns the (interrupt) function type of the current
934 function, or ARM_FT_UNKNOWN if the type cannot be determined. */
936 static unsigned long
937 arm_isr_value (tree argument)
939 const isr_attribute_arg * ptr;
940 const char * arg;
942 /* No argument - default to IRQ. */
943 if (argument == NULL_TREE)
944 return ARM_FT_ISR;
946 /* Get the value of the argument. */
947 if (TREE_VALUE (argument) == NULL_TREE
948 || TREE_CODE (TREE_VALUE (argument)) != STRING_CST)
949 return ARM_FT_UNKNOWN;
951 arg = TREE_STRING_POINTER (TREE_VALUE (argument));
953 /* Check it against the list of known arguments. */
954 for (ptr = isr_attribute_args; ptr->arg != NULL; ptr++)
955 if (streq (arg, ptr->arg))
956 return ptr->return_value;
958 /* An unrecognized interrupt type. */
959 return ARM_FT_UNKNOWN;
962 /* Computes the type of the current function. */
964 static unsigned long
965 arm_compute_func_type (void)
967 unsigned long type = ARM_FT_UNKNOWN;
968 tree a;
969 tree attr;
971 if (TREE_CODE (current_function_decl) != FUNCTION_DECL)
972 abort ();
974 /* Decide if the current function is volatile. Such functions
975 never return, and many memory cycles can be saved by not storing
976 register values that will never be needed again. This optimization
977 was added to speed up context switching in a kernel application. */
978 if (optimize > 0
979 && current_function_nothrow
980 && TREE_THIS_VOLATILE (current_function_decl))
981 type |= ARM_FT_VOLATILE;
983 if (current_function_needs_context)
984 type |= ARM_FT_NESTED;
986 attr = DECL_ATTRIBUTES (current_function_decl);
988 a = lookup_attribute ("naked", attr);
989 if (a != NULL_TREE)
990 type |= ARM_FT_NAKED;
992 if (cfun->machine->eh_epilogue_sp_ofs != NULL_RTX)
993 type |= ARM_FT_EXCEPTION_HANDLER;
994 else
996 a = lookup_attribute ("isr", attr);
997 if (a == NULL_TREE)
998 a = lookup_attribute ("interrupt", attr);
1000 if (a == NULL_TREE)
1001 type |= TARGET_INTERWORK ? ARM_FT_INTERWORKED : ARM_FT_NORMAL;
1002 else
1003 type |= arm_isr_value (TREE_VALUE (a));
1006 return type;
1009 /* Returns the type of the current function. */
1011 unsigned long
1012 arm_current_func_type (void)
1014 if (ARM_FUNC_TYPE (cfun->machine->func_type) == ARM_FT_UNKNOWN)
1015 cfun->machine->func_type = arm_compute_func_type ();
1017 return cfun->machine->func_type;
1020 /* Return 1 if it is possible to return using a single instruction.
1021 If SIBLING is non-null, this is a test for a return before a sibling
1022 call. SIBLING is the call insn, so we can examine its register usage. */
1025 use_return_insn (int iscond, rtx sibling)
1027 int regno;
1028 unsigned int func_type;
1029 unsigned long saved_int_regs;
1030 unsigned HOST_WIDE_INT stack_adjust;
1032 /* Never use a return instruction before reload has run. */
1033 if (!reload_completed)
1034 return 0;
1036 func_type = arm_current_func_type ();
1038 /* Naked functions and volatile functions need special
1039 consideration. */
1040 if (func_type & (ARM_FT_VOLATILE | ARM_FT_NAKED))
1041 return 0;
1043 /* So do interrupt functions that use the frame pointer. */
1044 if (IS_INTERRUPT (func_type) && frame_pointer_needed)
1045 return 0;
1047 stack_adjust = arm_get_frame_size () + current_function_outgoing_args_size;
1049 /* As do variadic functions. */
1050 if (current_function_pretend_args_size
1051 || cfun->machine->uses_anonymous_args
1052 /* Or if the function calls __builtin_eh_return () */
1053 || ARM_FUNC_TYPE (func_type) == ARM_FT_EXCEPTION_HANDLER
1054 /* Or if the function calls alloca */
1055 || current_function_calls_alloca
1056 /* Or if there is a stack adjustment. However, if the stack pointer
1057 is saved on the stack, we can use a pre-incrementing stack load. */
1058 || !(stack_adjust == 0 || (frame_pointer_needed && stack_adjust == 4)))
1059 return 0;
1061 saved_int_regs = arm_compute_save_reg_mask ();
1063 /* Unfortunately, the insn
1065 ldmib sp, {..., sp, ...}
1067 triggers a bug on most SA-110 based devices, such that the stack
1068 pointer won't be correctly restored if the instruction takes a
1069 page fault. We work around this problem by popping r3 along with
1070 the other registers, since that is never slower than executing
1071 another instruction.
1073 We test for !arm_arch5 here, because code for any architecture
1074 less than this could potentially be run on one of the buggy
1075 chips. */
1076 if (stack_adjust == 4 && !arm_arch5)
1078 /* Validate that r3 is a call-clobbered register (always true in
1079 the default abi) ... */
1080 if (!call_used_regs[3])
1081 return 0;
1083 /* ... that it isn't being used for a return value (always true
1084 until we implement return-in-regs), or for a tail-call
1085 argument ... */
1086 if (sibling)
1088 if (GET_CODE (sibling) != CALL_INSN)
1089 abort ();
1091 if (find_regno_fusage (sibling, USE, 3))
1092 return 0;
1095 /* ... and that there are no call-saved registers in r0-r2
1096 (always true in the default ABI). */
1097 if (saved_int_regs & 0x7)
1098 return 0;
1101 /* Can't be done if interworking with Thumb, and any registers have been
1102 stacked. */
1103 if (TARGET_INTERWORK && saved_int_regs != 0)
1104 return 0;
1106 /* On StrongARM, conditional returns are expensive if they aren't
1107 taken and multiple registers have been stacked. */
1108 if (iscond && arm_is_strong)
1110 /* Conditional return when just the LR is stored is a simple
1111 conditional-load instruction, that's not expensive. */
1112 if (saved_int_regs != 0 && saved_int_regs != (1 << LR_REGNUM))
1113 return 0;
1115 if (flag_pic && regs_ever_live[PIC_OFFSET_TABLE_REGNUM])
1116 return 0;
1119 /* If there are saved registers but the LR isn't saved, then we need
1120 two instructions for the return. */
1121 if (saved_int_regs && !(saved_int_regs & (1 << LR_REGNUM)))
1122 return 0;
1124 /* Can't be done if any of the FPA regs are pushed,
1125 since this also requires an insn. */
1126 if (TARGET_HARD_FLOAT)
1127 for (regno = FIRST_ARM_FP_REGNUM; regno <= LAST_ARM_FP_REGNUM; regno++)
1128 if (regs_ever_live[regno] && !call_used_regs[regno])
1129 return 0;
1131 if (TARGET_REALLY_IWMMXT)
1132 for (regno = FIRST_IWMMXT_REGNUM; regno <= LAST_IWMMXT_REGNUM; regno++)
1133 if (regs_ever_live[regno] && ! call_used_regs [regno])
1134 return 0;
1136 return 1;
1139 /* Return TRUE if int I is a valid immediate ARM constant. */
1142 const_ok_for_arm (HOST_WIDE_INT i)
1144 unsigned HOST_WIDE_INT mask = ~(unsigned HOST_WIDE_INT)0xFF;
1146 /* For machines with >32 bit HOST_WIDE_INT, the bits above bit 31 must
1147 be all zero, or all one. */
1148 if ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff) != 0
1149 && ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff)
1150 != ((~(unsigned HOST_WIDE_INT) 0)
1151 & ~(unsigned HOST_WIDE_INT) 0xffffffff)))
1152 return FALSE;
1154 /* Fast return for 0 and powers of 2 */
1155 if ((i & (i - 1)) == 0)
1156 return TRUE;
1160 if ((i & mask & (unsigned HOST_WIDE_INT) 0xffffffff) == 0)
1161 return TRUE;
1162 mask =
1163 (mask << 2) | ((mask & (unsigned HOST_WIDE_INT) 0xffffffff)
1164 >> (32 - 2)) | ~(unsigned HOST_WIDE_INT) 0xffffffff;
1166 while (mask != ~(unsigned HOST_WIDE_INT) 0xFF);
1168 return FALSE;
1171 /* Return true if I is a valid constant for the operation CODE. */
1172 static int
1173 const_ok_for_op (HOST_WIDE_INT i, enum rtx_code code)
1175 if (const_ok_for_arm (i))
1176 return 1;
1178 switch (code)
1180 case PLUS:
1181 return const_ok_for_arm (ARM_SIGN_EXTEND (-i));
1183 case MINUS: /* Should only occur with (MINUS I reg) => rsb */
1184 case XOR:
1185 case IOR:
1186 return 0;
1188 case AND:
1189 return const_ok_for_arm (ARM_SIGN_EXTEND (~i));
1191 default:
1192 abort ();
1196 /* Emit a sequence of insns to handle a large constant.
1197 CODE is the code of the operation required, it can be any of SET, PLUS,
1198 IOR, AND, XOR, MINUS;
1199 MODE is the mode in which the operation is being performed;
1200 VAL is the integer to operate on;
1201 SOURCE is the other operand (a register, or a null-pointer for SET);
1202 SUBTARGETS means it is safe to create scratch registers if that will
1203 either produce a simpler sequence, or we will want to cse the values.
1204 Return value is the number of insns emitted. */
1207 arm_split_constant (enum rtx_code code, enum machine_mode mode,
1208 HOST_WIDE_INT val, rtx target, rtx source, int subtargets)
1210 if (subtargets || code == SET
1211 || (GET_CODE (target) == REG && GET_CODE (source) == REG
1212 && REGNO (target) != REGNO (source)))
1214 /* After arm_reorg has been called, we can't fix up expensive
1215 constants by pushing them into memory so we must synthesize
1216 them in-line, regardless of the cost. This is only likely to
1217 be more costly on chips that have load delay slots and we are
1218 compiling without running the scheduler (so no splitting
1219 occurred before the final instruction emission).
1221 Ref: gcc -O1 -mcpu=strongarm gcc.c-torture/compile/980506-2.c
1223 if (!after_arm_reorg
1224 && (arm_gen_constant (code, mode, val, target, source, 1, 0)
1225 > arm_constant_limit + (code != SET)))
1227 if (code == SET)
1229 /* Currently SET is the only monadic value for CODE, all
1230 the rest are diadic. */
1231 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (val)));
1232 return 1;
1234 else
1236 rtx temp = subtargets ? gen_reg_rtx (mode) : target;
1238 emit_insn (gen_rtx_SET (VOIDmode, temp, GEN_INT (val)));
1239 /* For MINUS, the value is subtracted from, since we never
1240 have subtraction of a constant. */
1241 if (code == MINUS)
1242 emit_insn (gen_rtx_SET (VOIDmode, target,
1243 gen_rtx_MINUS (mode, temp, source)));
1244 else
1245 emit_insn (gen_rtx_SET (VOIDmode, target,
1246 gen_rtx (code, mode, source, temp)));
1247 return 2;
1252 return arm_gen_constant (code, mode, val, target, source, subtargets, 1);
1255 static int
1256 count_insns_for_constant (HOST_WIDE_INT remainder, int i)
1258 HOST_WIDE_INT temp1;
1259 int num_insns = 0;
1262 int end;
1264 if (i <= 0)
1265 i += 32;
1266 if (remainder & (3 << (i - 2)))
1268 end = i - 8;
1269 if (end < 0)
1270 end += 32;
1271 temp1 = remainder & ((0x0ff << end)
1272 | ((i < end) ? (0xff >> (32 - end)) : 0));
1273 remainder &= ~temp1;
1274 num_insns++;
1275 i -= 6;
1277 i -= 2;
1278 } while (remainder);
1279 return num_insns;
1282 /* As above, but extra parameter GENERATE which, if clear, suppresses
1283 RTL generation. */
1285 static int
1286 arm_gen_constant (enum rtx_code code, enum machine_mode mode,
1287 HOST_WIDE_INT val, rtx target, rtx source, int subtargets,
1288 int generate)
1290 int can_invert = 0;
1291 int can_negate = 0;
1292 int can_negate_initial = 0;
1293 int can_shift = 0;
1294 int i;
1295 int num_bits_set = 0;
1296 int set_sign_bit_copies = 0;
1297 int clear_sign_bit_copies = 0;
1298 int clear_zero_bit_copies = 0;
1299 int set_zero_bit_copies = 0;
1300 int insns = 0;
1301 unsigned HOST_WIDE_INT temp1, temp2;
1302 unsigned HOST_WIDE_INT remainder = val & 0xffffffff;
1304 /* Find out which operations are safe for a given CODE. Also do a quick
1305 check for degenerate cases; these can occur when DImode operations
1306 are split. */
1307 switch (code)
1309 case SET:
1310 can_invert = 1;
1311 can_shift = 1;
1312 can_negate = 1;
1313 break;
1315 case PLUS:
1316 can_negate = 1;
1317 can_negate_initial = 1;
1318 break;
1320 case IOR:
1321 if (remainder == 0xffffffff)
1323 if (generate)
1324 emit_insn (gen_rtx_SET (VOIDmode, target,
1325 GEN_INT (ARM_SIGN_EXTEND (val))));
1326 return 1;
1328 if (remainder == 0)
1330 if (reload_completed && rtx_equal_p (target, source))
1331 return 0;
1332 if (generate)
1333 emit_insn (gen_rtx_SET (VOIDmode, target, source));
1334 return 1;
1336 break;
1338 case AND:
1339 if (remainder == 0)
1341 if (generate)
1342 emit_insn (gen_rtx_SET (VOIDmode, target, const0_rtx));
1343 return 1;
1345 if (remainder == 0xffffffff)
1347 if (reload_completed && rtx_equal_p (target, source))
1348 return 0;
1349 if (generate)
1350 emit_insn (gen_rtx_SET (VOIDmode, target, source));
1351 return 1;
1353 can_invert = 1;
1354 break;
1356 case XOR:
1357 if (remainder == 0)
1359 if (reload_completed && rtx_equal_p (target, source))
1360 return 0;
1361 if (generate)
1362 emit_insn (gen_rtx_SET (VOIDmode, target, source));
1363 return 1;
1365 if (remainder == 0xffffffff)
1367 if (generate)
1368 emit_insn (gen_rtx_SET (VOIDmode, target,
1369 gen_rtx_NOT (mode, source)));
1370 return 1;
1373 /* We don't know how to handle this yet below. */
1374 abort ();
1376 case MINUS:
1377 /* We treat MINUS as (val - source), since (source - val) is always
1378 passed as (source + (-val)). */
1379 if (remainder == 0)
1381 if (generate)
1382 emit_insn (gen_rtx_SET (VOIDmode, target,
1383 gen_rtx_NEG (mode, source)));
1384 return 1;
1386 if (const_ok_for_arm (val))
1388 if (generate)
1389 emit_insn (gen_rtx_SET (VOIDmode, target,
1390 gen_rtx_MINUS (mode, GEN_INT (val),
1391 source)));
1392 return 1;
1394 can_negate = 1;
1396 break;
1398 default:
1399 abort ();
1402 /* If we can do it in one insn get out quickly. */
1403 if (const_ok_for_arm (val)
1404 || (can_negate_initial && const_ok_for_arm (-val))
1405 || (can_invert && const_ok_for_arm (~val)))
1407 if (generate)
1408 emit_insn (gen_rtx_SET (VOIDmode, target,
1409 (source ? gen_rtx (code, mode, source,
1410 GEN_INT (val))
1411 : GEN_INT (val))));
1412 return 1;
1415 /* Calculate a few attributes that may be useful for specific
1416 optimizations. */
1417 for (i = 31; i >= 0; i--)
1419 if ((remainder & (1 << i)) == 0)
1420 clear_sign_bit_copies++;
1421 else
1422 break;
1425 for (i = 31; i >= 0; i--)
1427 if ((remainder & (1 << i)) != 0)
1428 set_sign_bit_copies++;
1429 else
1430 break;
1433 for (i = 0; i <= 31; i++)
1435 if ((remainder & (1 << i)) == 0)
1436 clear_zero_bit_copies++;
1437 else
1438 break;
1441 for (i = 0; i <= 31; i++)
1443 if ((remainder & (1 << i)) != 0)
1444 set_zero_bit_copies++;
1445 else
1446 break;
1449 switch (code)
1451 case SET:
1452 /* See if we can do this by sign_extending a constant that is known
1453 to be negative. This is a good, way of doing it, since the shift
1454 may well merge into a subsequent insn. */
1455 if (set_sign_bit_copies > 1)
1457 if (const_ok_for_arm
1458 (temp1 = ARM_SIGN_EXTEND (remainder
1459 << (set_sign_bit_copies - 1))))
1461 if (generate)
1463 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1464 emit_insn (gen_rtx_SET (VOIDmode, new_src,
1465 GEN_INT (temp1)));
1466 emit_insn (gen_ashrsi3 (target, new_src,
1467 GEN_INT (set_sign_bit_copies - 1)));
1469 return 2;
1471 /* For an inverted constant, we will need to set the low bits,
1472 these will be shifted out of harm's way. */
1473 temp1 |= (1 << (set_sign_bit_copies - 1)) - 1;
1474 if (const_ok_for_arm (~temp1))
1476 if (generate)
1478 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1479 emit_insn (gen_rtx_SET (VOIDmode, new_src,
1480 GEN_INT (temp1)));
1481 emit_insn (gen_ashrsi3 (target, new_src,
1482 GEN_INT (set_sign_bit_copies - 1)));
1484 return 2;
1488 /* See if we can generate this by setting the bottom (or the top)
1489 16 bits, and then shifting these into the other half of the
1490 word. We only look for the simplest cases, to do more would cost
1491 too much. Be careful, however, not to generate this when the
1492 alternative would take fewer insns. */
1493 if (val & 0xffff0000)
1495 temp1 = remainder & 0xffff0000;
1496 temp2 = remainder & 0x0000ffff;
1498 /* Overlaps outside this range are best done using other methods. */
1499 for (i = 9; i < 24; i++)
1501 if ((((temp2 | (temp2 << i)) & 0xffffffff) == remainder)
1502 && !const_ok_for_arm (temp2))
1504 rtx new_src = (subtargets
1505 ? (generate ? gen_reg_rtx (mode) : NULL_RTX)
1506 : target);
1507 insns = arm_gen_constant (code, mode, temp2, new_src,
1508 source, subtargets, generate);
1509 source = new_src;
1510 if (generate)
1511 emit_insn (gen_rtx_SET
1512 (VOIDmode, target,
1513 gen_rtx_IOR (mode,
1514 gen_rtx_ASHIFT (mode, source,
1515 GEN_INT (i)),
1516 source)));
1517 return insns + 1;
1521 /* Don't duplicate cases already considered. */
1522 for (i = 17; i < 24; i++)
1524 if (((temp1 | (temp1 >> i)) == remainder)
1525 && !const_ok_for_arm (temp1))
1527 rtx new_src = (subtargets
1528 ? (generate ? gen_reg_rtx (mode) : NULL_RTX)
1529 : target);
1530 insns = arm_gen_constant (code, mode, temp1, new_src,
1531 source, subtargets, generate);
1532 source = new_src;
1533 if (generate)
1534 emit_insn
1535 (gen_rtx_SET (VOIDmode, target,
1536 gen_rtx_IOR
1537 (mode,
1538 gen_rtx_LSHIFTRT (mode, source,
1539 GEN_INT (i)),
1540 source)));
1541 return insns + 1;
1545 break;
1547 case IOR:
1548 case XOR:
1549 /* If we have IOR or XOR, and the constant can be loaded in a
1550 single instruction, and we can find a temporary to put it in,
1551 then this can be done in two instructions instead of 3-4. */
1552 if (subtargets
1553 /* TARGET can't be NULL if SUBTARGETS is 0 */
1554 || (reload_completed && !reg_mentioned_p (target, source)))
1556 if (const_ok_for_arm (ARM_SIGN_EXTEND (~val)))
1558 if (generate)
1560 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1562 emit_insn (gen_rtx_SET (VOIDmode, sub, GEN_INT (val)));
1563 emit_insn (gen_rtx_SET (VOIDmode, target,
1564 gen_rtx (code, mode, source, sub)));
1566 return 2;
1570 if (code == XOR)
1571 break;
1573 if (set_sign_bit_copies > 8
1574 && (val & (-1 << (32 - set_sign_bit_copies))) == val)
1576 if (generate)
1578 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1579 rtx shift = GEN_INT (set_sign_bit_copies);
1581 emit_insn (gen_rtx_SET (VOIDmode, sub,
1582 gen_rtx_NOT (mode,
1583 gen_rtx_ASHIFT (mode,
1584 source,
1585 shift))));
1586 emit_insn (gen_rtx_SET (VOIDmode, target,
1587 gen_rtx_NOT (mode,
1588 gen_rtx_LSHIFTRT (mode, sub,
1589 shift))));
1591 return 2;
1594 if (set_zero_bit_copies > 8
1595 && (remainder & ((1 << set_zero_bit_copies) - 1)) == remainder)
1597 if (generate)
1599 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1600 rtx shift = GEN_INT (set_zero_bit_copies);
1602 emit_insn (gen_rtx_SET (VOIDmode, sub,
1603 gen_rtx_NOT (mode,
1604 gen_rtx_LSHIFTRT (mode,
1605 source,
1606 shift))));
1607 emit_insn (gen_rtx_SET (VOIDmode, target,
1608 gen_rtx_NOT (mode,
1609 gen_rtx_ASHIFT (mode, sub,
1610 shift))));
1612 return 2;
1615 if (const_ok_for_arm (temp1 = ARM_SIGN_EXTEND (~val)))
1617 if (generate)
1619 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1620 emit_insn (gen_rtx_SET (VOIDmode, sub,
1621 gen_rtx_NOT (mode, source)));
1622 source = sub;
1623 if (subtargets)
1624 sub = gen_reg_rtx (mode);
1625 emit_insn (gen_rtx_SET (VOIDmode, sub,
1626 gen_rtx_AND (mode, source,
1627 GEN_INT (temp1))));
1628 emit_insn (gen_rtx_SET (VOIDmode, target,
1629 gen_rtx_NOT (mode, sub)));
1631 return 3;
1633 break;
1635 case AND:
1636 /* See if two shifts will do 2 or more insn's worth of work. */
1637 if (clear_sign_bit_copies >= 16 && clear_sign_bit_copies < 24)
1639 HOST_WIDE_INT shift_mask = ((0xffffffff
1640 << (32 - clear_sign_bit_copies))
1641 & 0xffffffff);
1643 if ((remainder | shift_mask) != 0xffffffff)
1645 if (generate)
1647 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1648 insns = arm_gen_constant (AND, mode, remainder | shift_mask,
1649 new_src, source, subtargets, 1);
1650 source = new_src;
1652 else
1654 rtx targ = subtargets ? NULL_RTX : target;
1655 insns = arm_gen_constant (AND, mode, remainder | shift_mask,
1656 targ, source, subtargets, 0);
1660 if (generate)
1662 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1663 rtx shift = GEN_INT (clear_sign_bit_copies);
1665 emit_insn (gen_ashlsi3 (new_src, source, shift));
1666 emit_insn (gen_lshrsi3 (target, new_src, shift));
1669 return insns + 2;
1672 if (clear_zero_bit_copies >= 16 && clear_zero_bit_copies < 24)
1674 HOST_WIDE_INT shift_mask = (1 << clear_zero_bit_copies) - 1;
1676 if ((remainder | shift_mask) != 0xffffffff)
1678 if (generate)
1680 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1682 insns = arm_gen_constant (AND, mode, remainder | shift_mask,
1683 new_src, source, subtargets, 1);
1684 source = new_src;
1686 else
1688 rtx targ = subtargets ? NULL_RTX : target;
1690 insns = arm_gen_constant (AND, mode, remainder | shift_mask,
1691 targ, source, subtargets, 0);
1695 if (generate)
1697 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1698 rtx shift = GEN_INT (clear_zero_bit_copies);
1700 emit_insn (gen_lshrsi3 (new_src, source, shift));
1701 emit_insn (gen_ashlsi3 (target, new_src, shift));
1704 return insns + 2;
1707 break;
1709 default:
1710 break;
1713 for (i = 0; i < 32; i++)
1714 if (remainder & (1 << i))
1715 num_bits_set++;
1717 if (code == AND || (can_invert && num_bits_set > 16))
1718 remainder = (~remainder) & 0xffffffff;
1719 else if (code == PLUS && num_bits_set > 16)
1720 remainder = (-remainder) & 0xffffffff;
1721 else
1723 can_invert = 0;
1724 can_negate = 0;
1727 /* Now try and find a way of doing the job in either two or three
1728 instructions.
1729 We start by looking for the largest block of zeros that are aligned on
1730 a 2-bit boundary, we then fill up the temps, wrapping around to the
1731 top of the word when we drop off the bottom.
1732 In the worst case this code should produce no more than four insns. */
1734 int best_start = 0;
1735 int best_consecutive_zeros = 0;
1737 for (i = 0; i < 32; i += 2)
1739 int consecutive_zeros = 0;
1741 if (!(remainder & (3 << i)))
1743 while ((i < 32) && !(remainder & (3 << i)))
1745 consecutive_zeros += 2;
1746 i += 2;
1748 if (consecutive_zeros > best_consecutive_zeros)
1750 best_consecutive_zeros = consecutive_zeros;
1751 best_start = i - consecutive_zeros;
1753 i -= 2;
1757 /* So long as it won't require any more insns to do so, it's
1758 desirable to emit a small constant (in bits 0...9) in the last
1759 insn. This way there is more chance that it can be combined with
1760 a later addressing insn to form a pre-indexed load or store
1761 operation. Consider:
1763 *((volatile int *)0xe0000100) = 1;
1764 *((volatile int *)0xe0000110) = 2;
1766 We want this to wind up as:
1768 mov rA, #0xe0000000
1769 mov rB, #1
1770 str rB, [rA, #0x100]
1771 mov rB, #2
1772 str rB, [rA, #0x110]
1774 rather than having to synthesize both large constants from scratch.
1776 Therefore, we calculate how many insns would be required to emit
1777 the constant starting from `best_start', and also starting from
1778 zero (ie with bit 31 first to be output). If `best_start' doesn't
1779 yield a shorter sequence, we may as well use zero. */
1780 if (best_start != 0
1781 && ((((unsigned HOST_WIDE_INT) 1) << best_start) < remainder)
1782 && (count_insns_for_constant (remainder, 0) <=
1783 count_insns_for_constant (remainder, best_start)))
1784 best_start = 0;
1786 /* Now start emitting the insns. */
1787 i = best_start;
1790 int end;
1792 if (i <= 0)
1793 i += 32;
1794 if (remainder & (3 << (i - 2)))
1796 end = i - 8;
1797 if (end < 0)
1798 end += 32;
1799 temp1 = remainder & ((0x0ff << end)
1800 | ((i < end) ? (0xff >> (32 - end)) : 0));
1801 remainder &= ~temp1;
1803 if (generate)
1805 rtx new_src, temp1_rtx;
1807 if (code == SET || code == MINUS)
1809 new_src = (subtargets ? gen_reg_rtx (mode) : target);
1810 if (can_invert && code != MINUS)
1811 temp1 = ~temp1;
1813 else
1815 if (remainder && subtargets)
1816 new_src = gen_reg_rtx (mode);
1817 else
1818 new_src = target;
1819 if (can_invert)
1820 temp1 = ~temp1;
1821 else if (can_negate)
1822 temp1 = -temp1;
1825 temp1 = trunc_int_for_mode (temp1, mode);
1826 temp1_rtx = GEN_INT (temp1);
1828 if (code == SET)
1830 else if (code == MINUS)
1831 temp1_rtx = gen_rtx_MINUS (mode, temp1_rtx, source);
1832 else
1833 temp1_rtx = gen_rtx_fmt_ee (code, mode, source, temp1_rtx);
1835 emit_insn (gen_rtx_SET (VOIDmode, new_src, temp1_rtx));
1836 source = new_src;
1839 if (code == SET)
1841 can_invert = 0;
1842 code = PLUS;
1844 else if (code == MINUS)
1845 code = PLUS;
1847 insns++;
1848 i -= 6;
1850 i -= 2;
1852 while (remainder);
1855 return insns;
1858 /* Canonicalize a comparison so that we are more likely to recognize it.
1859 This can be done for a few constant compares, where we can make the
1860 immediate value easier to load. */
1862 enum rtx_code
1863 arm_canonicalize_comparison (enum rtx_code code, rtx * op1)
1865 unsigned HOST_WIDE_INT i = INTVAL (*op1);
1867 switch (code)
1869 case EQ:
1870 case NE:
1871 return code;
1873 case GT:
1874 case LE:
1875 if (i != ((((unsigned HOST_WIDE_INT) 1) << (HOST_BITS_PER_WIDE_INT - 1)) - 1)
1876 && (const_ok_for_arm (i + 1) || const_ok_for_arm (-(i + 1))))
1878 *op1 = GEN_INT (i + 1);
1879 return code == GT ? GE : LT;
1881 break;
1883 case GE:
1884 case LT:
1885 if (i != (((unsigned HOST_WIDE_INT) 1) << (HOST_BITS_PER_WIDE_INT - 1))
1886 && (const_ok_for_arm (i - 1) || const_ok_for_arm (-(i - 1))))
1888 *op1 = GEN_INT (i - 1);
1889 return code == GE ? GT : LE;
1891 break;
1893 case GTU:
1894 case LEU:
1895 if (i != ~((unsigned HOST_WIDE_INT) 0)
1896 && (const_ok_for_arm (i + 1) || const_ok_for_arm (-(i + 1))))
1898 *op1 = GEN_INT (i + 1);
1899 return code == GTU ? GEU : LTU;
1901 break;
1903 case GEU:
1904 case LTU:
1905 if (i != 0
1906 && (const_ok_for_arm (i - 1) || const_ok_for_arm (-(i - 1))))
1908 *op1 = GEN_INT (i - 1);
1909 return code == GEU ? GTU : LEU;
1911 break;
1913 default:
1914 abort ();
1917 return code;
1920 /* Decide whether a type should be returned in memory (true)
1921 or in a register (false). This is called by the macro
1922 RETURN_IN_MEMORY. */
1924 arm_return_in_memory (tree type)
1926 HOST_WIDE_INT size;
1928 if (!AGGREGATE_TYPE_P (type))
1929 /* All simple types are returned in registers. */
1930 return 0;
1932 size = int_size_in_bytes (type);
1934 if (TARGET_ATPCS)
1936 /* ATPCS returns aggregate types in memory only if they are
1937 larger than a word (or are variable size). */
1938 return (size < 0 || size > UNITS_PER_WORD);
1941 /* For the arm-wince targets we choose to be compatible with Microsoft's
1942 ARM and Thumb compilers, which always return aggregates in memory. */
1943 #ifndef ARM_WINCE
1944 /* All structures/unions bigger than one word are returned in memory.
1945 Also catch the case where int_size_in_bytes returns -1. In this case
1946 the aggregate is either huge or of variable size, and in either case
1947 we will want to return it via memory and not in a register. */
1948 if (size < 0 || size > UNITS_PER_WORD)
1949 return 1;
1951 if (TREE_CODE (type) == RECORD_TYPE)
1953 tree field;
1955 /* For a struct the APCS says that we only return in a register
1956 if the type is 'integer like' and every addressable element
1957 has an offset of zero. For practical purposes this means
1958 that the structure can have at most one non bit-field element
1959 and that this element must be the first one in the structure. */
1961 /* Find the first field, ignoring non FIELD_DECL things which will
1962 have been created by C++. */
1963 for (field = TYPE_FIELDS (type);
1964 field && TREE_CODE (field) != FIELD_DECL;
1965 field = TREE_CHAIN (field))
1966 continue;
1968 if (field == NULL)
1969 return 0; /* An empty structure. Allowed by an extension to ANSI C. */
1971 /* Check that the first field is valid for returning in a register. */
1973 /* ... Floats are not allowed */
1974 if (FLOAT_TYPE_P (TREE_TYPE (field)))
1975 return 1;
1977 /* ... Aggregates that are not themselves valid for returning in
1978 a register are not allowed. */
1979 if (RETURN_IN_MEMORY (TREE_TYPE (field)))
1980 return 1;
1982 /* Now check the remaining fields, if any. Only bitfields are allowed,
1983 since they are not addressable. */
1984 for (field = TREE_CHAIN (field);
1985 field;
1986 field = TREE_CHAIN (field))
1988 if (TREE_CODE (field) != FIELD_DECL)
1989 continue;
1991 if (!DECL_BIT_FIELD_TYPE (field))
1992 return 1;
1995 return 0;
1998 if (TREE_CODE (type) == UNION_TYPE)
2000 tree field;
2002 /* Unions can be returned in registers if every element is
2003 integral, or can be returned in an integer register. */
2004 for (field = TYPE_FIELDS (type);
2005 field;
2006 field = TREE_CHAIN (field))
2008 if (TREE_CODE (field) != FIELD_DECL)
2009 continue;
2011 if (FLOAT_TYPE_P (TREE_TYPE (field)))
2012 return 1;
2014 if (RETURN_IN_MEMORY (TREE_TYPE (field)))
2015 return 1;
2018 return 0;
2020 #endif /* not ARM_WINCE */
2022 /* Return all other types in memory. */
2023 return 1;
2026 /* Indicate whether or not words of a double are in big-endian order. */
2029 arm_float_words_big_endian (void)
2031 if (TARGET_CIRRUS)
2032 return 0;
2034 /* For FPA, float words are always big-endian. For VFP, floats words
2035 follow the memory system mode. */
2037 if (TARGET_HARD_FLOAT)
2039 /* FIXME: TARGET_HARD_FLOAT currently implies FPA. */
2040 return 1;
2043 if (TARGET_VFP)
2044 return (TARGET_BIG_END ? 1 : 0);
2046 return 1;
2049 /* Initialize a variable CUM of type CUMULATIVE_ARGS
2050 for a call to a function whose data type is FNTYPE.
2051 For a library call, FNTYPE is NULL. */
2052 void
2053 arm_init_cumulative_args (CUMULATIVE_ARGS *pcum, tree fntype,
2054 rtx libname ATTRIBUTE_UNUSED,
2055 tree fndecl ATTRIBUTE_UNUSED)
2057 /* On the ARM, the offset starts at 0. */
2058 pcum->nregs = ((fntype && aggregate_value_p (TREE_TYPE (fntype), fntype)) ? 1 : 0);
2059 pcum->iwmmxt_nregs = 0;
2061 pcum->call_cookie = CALL_NORMAL;
2063 if (TARGET_LONG_CALLS)
2064 pcum->call_cookie = CALL_LONG;
2066 /* Check for long call/short call attributes. The attributes
2067 override any command line option. */
2068 if (fntype)
2070 if (lookup_attribute ("short_call", TYPE_ATTRIBUTES (fntype)))
2071 pcum->call_cookie = CALL_SHORT;
2072 else if (lookup_attribute ("long_call", TYPE_ATTRIBUTES (fntype)))
2073 pcum->call_cookie = CALL_LONG;
2076 /* Varargs vectors are treated the same as long long.
2077 named_count avoids having to change the way arm handles 'named' */
2078 pcum->named_count = 0;
2079 pcum->nargs = 0;
2081 if (TARGET_REALLY_IWMMXT && fntype)
2083 tree fn_arg;
2085 for (fn_arg = TYPE_ARG_TYPES (fntype);
2086 fn_arg;
2087 fn_arg = TREE_CHAIN (fn_arg))
2088 pcum->named_count += 1;
2090 if (! pcum->named_count)
2091 pcum->named_count = INT_MAX;
2095 /* Determine where to put an argument to a function.
2096 Value is zero to push the argument on the stack,
2097 or a hard register in which to store the argument.
2099 MODE is the argument's machine mode.
2100 TYPE is the data type of the argument (as a tree).
2101 This is null for libcalls where that information may
2102 not be available.
2103 CUM is a variable of type CUMULATIVE_ARGS which gives info about
2104 the preceding args and about the function being called.
2105 NAMED is nonzero if this argument is a named parameter
2106 (otherwise it is an extra parameter matching an ellipsis). */
2109 arm_function_arg (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
2110 tree type ATTRIBUTE_UNUSED, int named)
2112 if (TARGET_REALLY_IWMMXT)
2114 if (VECTOR_MODE_SUPPORTED_P (mode))
2116 /* varargs vectors are treated the same as long long.
2117 named_count avoids having to change the way arm handles 'named' */
2118 if (pcum->named_count <= pcum->nargs + 1)
2120 if (pcum->nregs == 1)
2121 pcum->nregs += 1;
2122 if (pcum->nregs <= 2)
2123 return gen_rtx_REG (mode, pcum->nregs);
2124 else
2125 return NULL_RTX;
2127 else if (pcum->iwmmxt_nregs <= 9)
2128 return gen_rtx_REG (mode, pcum->iwmmxt_nregs + FIRST_IWMMXT_REGNUM);
2129 else
2130 return NULL_RTX;
2132 else if ((mode == DImode || mode == DFmode) && pcum->nregs & 1)
2133 pcum->nregs += 1;
2136 if (mode == VOIDmode)
2137 /* Compute operand 2 of the call insn. */
2138 return GEN_INT (pcum->call_cookie);
2140 if (!named || pcum->nregs >= NUM_ARG_REGS)
2141 return NULL_RTX;
2143 return gen_rtx_REG (mode, pcum->nregs);
2146 /* Variable sized types are passed by reference. This is a GCC
2147 extension to the ARM ABI. */
2150 arm_function_arg_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
2151 enum machine_mode mode ATTRIBUTE_UNUSED,
2152 tree type, int named ATTRIBUTE_UNUSED)
2154 return type && TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST;
2157 /* Implement va_arg. */
2160 arm_va_arg (tree valist, tree type)
2162 /* Variable sized types are passed by reference. */
2163 if (TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
2165 rtx addr = std_expand_builtin_va_arg (valist, build_pointer_type (type));
2166 return gen_rtx_MEM (ptr_mode, force_reg (Pmode, addr));
2169 if (FUNCTION_ARG_BOUNDARY (TYPE_MODE (type), NULL) == IWMMXT_ALIGNMENT)
2171 tree minus_eight;
2172 tree t;
2174 /* Maintain 64-bit alignment of the valist pointer by
2175 constructing: valist = ((valist + (8 - 1)) & -8). */
2176 minus_eight = build_int_2 (- (IWMMXT_ALIGNMENT / BITS_PER_UNIT), -1);
2177 t = build_int_2 ((IWMMXT_ALIGNMENT / BITS_PER_UNIT) - 1, 0);
2178 t = build (PLUS_EXPR, TREE_TYPE (valist), valist, t);
2179 t = build (BIT_AND_EXPR, TREE_TYPE (t), t, minus_eight);
2180 t = build (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
2181 TREE_SIDE_EFFECTS (t) = 1;
2182 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
2184 /* This is to stop the combine pass optimizing
2185 away the alignment adjustment. */
2186 mark_reg_pointer (arg_pointer_rtx, PARM_BOUNDARY);
2189 return std_expand_builtin_va_arg (valist, type);
2192 /* Encode the current state of the #pragma [no_]long_calls. */
2193 typedef enum
2195 OFF, /* No #pramgma [no_]long_calls is in effect. */
2196 LONG, /* #pragma long_calls is in effect. */
2197 SHORT /* #pragma no_long_calls is in effect. */
2198 } arm_pragma_enum;
2200 static arm_pragma_enum arm_pragma_long_calls = OFF;
2202 void
2203 arm_pr_long_calls (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2205 arm_pragma_long_calls = LONG;
2208 void
2209 arm_pr_no_long_calls (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2211 arm_pragma_long_calls = SHORT;
2214 void
2215 arm_pr_long_calls_off (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2217 arm_pragma_long_calls = OFF;
2220 /* Table of machine attributes. */
2221 const struct attribute_spec arm_attribute_table[] =
2223 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
2224 /* Function calls made to this symbol must be done indirectly, because
2225 it may lie outside of the 26 bit addressing range of a normal function
2226 call. */
2227 { "long_call", 0, 0, false, true, true, NULL },
2228 /* Whereas these functions are always known to reside within the 26 bit
2229 addressing range. */
2230 { "short_call", 0, 0, false, true, true, NULL },
2231 /* Interrupt Service Routines have special prologue and epilogue requirements. */
2232 { "isr", 0, 1, false, false, false, arm_handle_isr_attribute },
2233 { "interrupt", 0, 1, false, false, false, arm_handle_isr_attribute },
2234 { "naked", 0, 0, true, false, false, arm_handle_fndecl_attribute },
2235 #ifdef ARM_PE
2236 /* ARM/PE has three new attributes:
2237 interfacearm - ?
2238 dllexport - for exporting a function/variable that will live in a dll
2239 dllimport - for importing a function/variable from a dll
2241 Microsoft allows multiple declspecs in one __declspec, separating
2242 them with spaces. We do NOT support this. Instead, use __declspec
2243 multiple times.
2245 { "dllimport", 0, 0, true, false, false, NULL },
2246 { "dllexport", 0, 0, true, false, false, NULL },
2247 { "interfacearm", 0, 0, true, false, false, arm_handle_fndecl_attribute },
2248 #endif
2249 { NULL, 0, 0, false, false, false, NULL }
2252 /* Handle an attribute requiring a FUNCTION_DECL;
2253 arguments as in struct attribute_spec.handler. */
2254 static tree
2255 arm_handle_fndecl_attribute (tree *node, tree name, tree args ATTRIBUTE_UNUSED,
2256 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
2258 if (TREE_CODE (*node) != FUNCTION_DECL)
2260 warning ("`%s' attribute only applies to functions",
2261 IDENTIFIER_POINTER (name));
2262 *no_add_attrs = true;
2265 return NULL_TREE;
2268 /* Handle an "interrupt" or "isr" attribute;
2269 arguments as in struct attribute_spec.handler. */
2270 static tree
2271 arm_handle_isr_attribute (tree *node, tree name, tree args, int flags,
2272 bool *no_add_attrs)
2274 if (DECL_P (*node))
2276 if (TREE_CODE (*node) != FUNCTION_DECL)
2278 warning ("`%s' attribute only applies to functions",
2279 IDENTIFIER_POINTER (name));
2280 *no_add_attrs = true;
2282 /* FIXME: the argument if any is checked for type attributes;
2283 should it be checked for decl ones? */
2285 else
2287 if (TREE_CODE (*node) == FUNCTION_TYPE
2288 || TREE_CODE (*node) == METHOD_TYPE)
2290 if (arm_isr_value (args) == ARM_FT_UNKNOWN)
2292 warning ("`%s' attribute ignored", IDENTIFIER_POINTER (name));
2293 *no_add_attrs = true;
2296 else if (TREE_CODE (*node) == POINTER_TYPE
2297 && (TREE_CODE (TREE_TYPE (*node)) == FUNCTION_TYPE
2298 || TREE_CODE (TREE_TYPE (*node)) == METHOD_TYPE)
2299 && arm_isr_value (args) != ARM_FT_UNKNOWN)
2301 *node = build_type_copy (*node);
2302 TREE_TYPE (*node) = build_type_attribute_variant
2303 (TREE_TYPE (*node),
2304 tree_cons (name, args, TYPE_ATTRIBUTES (TREE_TYPE (*node))));
2305 *no_add_attrs = true;
2307 else
2309 /* Possibly pass this attribute on from the type to a decl. */
2310 if (flags & ((int) ATTR_FLAG_DECL_NEXT
2311 | (int) ATTR_FLAG_FUNCTION_NEXT
2312 | (int) ATTR_FLAG_ARRAY_NEXT))
2314 *no_add_attrs = true;
2315 return tree_cons (name, args, NULL_TREE);
2317 else
2319 warning ("`%s' attribute ignored", IDENTIFIER_POINTER (name));
2324 return NULL_TREE;
2327 /* Return 0 if the attributes for two types are incompatible, 1 if they
2328 are compatible, and 2 if they are nearly compatible (which causes a
2329 warning to be generated). */
2330 static int
2331 arm_comp_type_attributes (tree type1, tree type2)
2333 int l1, l2, s1, s2;
2335 /* Check for mismatch of non-default calling convention. */
2336 if (TREE_CODE (type1) != FUNCTION_TYPE)
2337 return 1;
2339 /* Check for mismatched call attributes. */
2340 l1 = lookup_attribute ("long_call", TYPE_ATTRIBUTES (type1)) != NULL;
2341 l2 = lookup_attribute ("long_call", TYPE_ATTRIBUTES (type2)) != NULL;
2342 s1 = lookup_attribute ("short_call", TYPE_ATTRIBUTES (type1)) != NULL;
2343 s2 = lookup_attribute ("short_call", TYPE_ATTRIBUTES (type2)) != NULL;
2345 /* Only bother to check if an attribute is defined. */
2346 if (l1 | l2 | s1 | s2)
2348 /* If one type has an attribute, the other must have the same attribute. */
2349 if ((l1 != l2) || (s1 != s2))
2350 return 0;
2352 /* Disallow mixed attributes. */
2353 if ((l1 & s2) || (l2 & s1))
2354 return 0;
2357 /* Check for mismatched ISR attribute. */
2358 l1 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type1)) != NULL;
2359 if (! l1)
2360 l1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type1)) != NULL;
2361 l2 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type2)) != NULL;
2362 if (! l2)
2363 l1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type2)) != NULL;
2364 if (l1 != l2)
2365 return 0;
2367 return 1;
2370 /* Encode long_call or short_call attribute by prefixing
2371 symbol name in DECL with a special character FLAG. */
2372 void
2373 arm_encode_call_attribute (tree decl, int flag)
2375 const char * str = XSTR (XEXP (DECL_RTL (decl), 0), 0);
2376 int len = strlen (str);
2377 char * newstr;
2379 /* Do not allow weak functions to be treated as short call. */
2380 if (DECL_WEAK (decl) && flag == SHORT_CALL_FLAG_CHAR)
2381 return;
2383 newstr = alloca (len + 2);
2384 newstr[0] = flag;
2385 strcpy (newstr + 1, str);
2387 newstr = (char *) ggc_alloc_string (newstr, len + 1);
2388 XSTR (XEXP (DECL_RTL (decl), 0), 0) = newstr;
2391 /* Assigns default attributes to newly defined type. This is used to
2392 set short_call/long_call attributes for function types of
2393 functions defined inside corresponding #pragma scopes. */
2394 static void
2395 arm_set_default_type_attributes (tree type)
2397 /* Add __attribute__ ((long_call)) to all functions, when
2398 inside #pragma long_calls or __attribute__ ((short_call)),
2399 when inside #pragma no_long_calls. */
2400 if (TREE_CODE (type) == FUNCTION_TYPE || TREE_CODE (type) == METHOD_TYPE)
2402 tree type_attr_list, attr_name;
2403 type_attr_list = TYPE_ATTRIBUTES (type);
2405 if (arm_pragma_long_calls == LONG)
2406 attr_name = get_identifier ("long_call");
2407 else if (arm_pragma_long_calls == SHORT)
2408 attr_name = get_identifier ("short_call");
2409 else
2410 return;
2412 type_attr_list = tree_cons (attr_name, NULL_TREE, type_attr_list);
2413 TYPE_ATTRIBUTES (type) = type_attr_list;
2417 /* Return 1 if the operand is a SYMBOL_REF for a function known to be
2418 defined within the current compilation unit. If this cannot be
2419 determined, then 0 is returned. */
2420 static int
2421 current_file_function_operand (rtx sym_ref)
2423 /* This is a bit of a fib. A function will have a short call flag
2424 applied to its name if it has the short call attribute, or it has
2425 already been defined within the current compilation unit. */
2426 if (ENCODED_SHORT_CALL_ATTR_P (XSTR (sym_ref, 0)))
2427 return 1;
2429 /* The current function is always defined within the current compilation
2430 unit. if it s a weak definition however, then this may not be the real
2431 definition of the function, and so we have to say no. */
2432 if (sym_ref == XEXP (DECL_RTL (current_function_decl), 0)
2433 && !DECL_WEAK (current_function_decl))
2434 return 1;
2436 /* We cannot make the determination - default to returning 0. */
2437 return 0;
2440 /* Return nonzero if a 32 bit "long_call" should be generated for
2441 this call. We generate a long_call if the function:
2443 a. has an __attribute__((long call))
2444 or b. is within the scope of a #pragma long_calls
2445 or c. the -mlong-calls command line switch has been specified
2447 However we do not generate a long call if the function:
2449 d. has an __attribute__ ((short_call))
2450 or e. is inside the scope of a #pragma no_long_calls
2451 or f. has an __attribute__ ((section))
2452 or g. is defined within the current compilation unit.
2454 This function will be called by C fragments contained in the machine
2455 description file. CALL_REF and CALL_COOKIE correspond to the matched
2456 rtl operands. CALL_SYMBOL is used to distinguish between
2457 two different callers of the function. It is set to 1 in the
2458 "call_symbol" and "call_symbol_value" patterns and to 0 in the "call"
2459 and "call_value" patterns. This is because of the difference in the
2460 SYM_REFs passed by these patterns. */
2462 arm_is_longcall_p (rtx sym_ref, int call_cookie, int call_symbol)
2464 if (!call_symbol)
2466 if (GET_CODE (sym_ref) != MEM)
2467 return 0;
2469 sym_ref = XEXP (sym_ref, 0);
2472 if (GET_CODE (sym_ref) != SYMBOL_REF)
2473 return 0;
2475 if (call_cookie & CALL_SHORT)
2476 return 0;
2478 if (TARGET_LONG_CALLS && flag_function_sections)
2479 return 1;
2481 if (current_file_function_operand (sym_ref))
2482 return 0;
2484 return (call_cookie & CALL_LONG)
2485 || ENCODED_LONG_CALL_ATTR_P (XSTR (sym_ref, 0))
2486 || TARGET_LONG_CALLS;
2489 /* Return nonzero if it is ok to make a tail-call to DECL. */
2490 static bool
2491 arm_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
2493 int call_type = TARGET_LONG_CALLS ? CALL_LONG : CALL_NORMAL;
2495 if (cfun->machine->sibcall_blocked)
2496 return false;
2498 /* Never tailcall something for which we have no decl, or if we
2499 are in Thumb mode. */
2500 if (decl == NULL || TARGET_THUMB)
2501 return false;
2503 /* Get the calling method. */
2504 if (lookup_attribute ("short_call", TYPE_ATTRIBUTES (TREE_TYPE (decl))))
2505 call_type = CALL_SHORT;
2506 else if (lookup_attribute ("long_call", TYPE_ATTRIBUTES (TREE_TYPE (decl))))
2507 call_type = CALL_LONG;
2509 /* Cannot tail-call to long calls, since these are out of range of
2510 a branch instruction. However, if not compiling PIC, we know
2511 we can reach the symbol if it is in this compilation unit. */
2512 if (call_type == CALL_LONG && (flag_pic || !TREE_ASM_WRITTEN (decl)))
2513 return false;
2515 /* If we are interworking and the function is not declared static
2516 then we can't tail-call it unless we know that it exists in this
2517 compilation unit (since it might be a Thumb routine). */
2518 if (TARGET_INTERWORK && TREE_PUBLIC (decl) && !TREE_ASM_WRITTEN (decl))
2519 return false;
2521 /* Never tailcall from an ISR routine - it needs a special exit sequence. */
2522 if (IS_INTERRUPT (arm_current_func_type ()))
2523 return false;
2525 /* Everything else is ok. */
2526 return true;
2530 /* Addressing mode support functions. */
2532 /* Return nonzero if X is a legitimate immediate operand when compiling
2533 for PIC. */
2535 legitimate_pic_operand_p (rtx x)
2537 if (CONSTANT_P (x)
2538 && flag_pic
2539 && (GET_CODE (x) == SYMBOL_REF
2540 || (GET_CODE (x) == CONST
2541 && GET_CODE (XEXP (x, 0)) == PLUS
2542 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF)))
2543 return 0;
2545 return 1;
2549 legitimize_pic_address (rtx orig, enum machine_mode mode, rtx reg)
2551 if (GET_CODE (orig) == SYMBOL_REF
2552 || GET_CODE (orig) == LABEL_REF)
2554 #ifndef AOF_ASSEMBLER
2555 rtx pic_ref, address;
2556 #endif
2557 rtx insn;
2558 int subregs = 0;
2560 if (reg == 0)
2562 if (no_new_pseudos)
2563 abort ();
2564 else
2565 reg = gen_reg_rtx (Pmode);
2567 subregs = 1;
2570 #ifdef AOF_ASSEMBLER
2571 /* The AOF assembler can generate relocations for these directly, and
2572 understands that the PIC register has to be added into the offset. */
2573 insn = emit_insn (gen_pic_load_addr_based (reg, orig));
2574 #else
2575 if (subregs)
2576 address = gen_reg_rtx (Pmode);
2577 else
2578 address = reg;
2580 if (TARGET_ARM)
2581 emit_insn (gen_pic_load_addr_arm (address, orig));
2582 else
2583 emit_insn (gen_pic_load_addr_thumb (address, orig));
2585 if ((GET_CODE (orig) == LABEL_REF
2586 || (GET_CODE (orig) == SYMBOL_REF &&
2587 SYMBOL_REF_LOCAL_P (orig)))
2588 && NEED_GOT_RELOC)
2589 pic_ref = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, address);
2590 else
2592 pic_ref = gen_rtx_MEM (Pmode,
2593 gen_rtx_PLUS (Pmode, pic_offset_table_rtx,
2594 address));
2595 RTX_UNCHANGING_P (pic_ref) = 1;
2598 insn = emit_move_insn (reg, pic_ref);
2599 #endif
2600 current_function_uses_pic_offset_table = 1;
2601 /* Put a REG_EQUAL note on this insn, so that it can be optimized
2602 by loop. */
2603 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EQUAL, orig,
2604 REG_NOTES (insn));
2605 return reg;
2607 else if (GET_CODE (orig) == CONST)
2609 rtx base, offset;
2611 if (GET_CODE (XEXP (orig, 0)) == PLUS
2612 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
2613 return orig;
2615 if (reg == 0)
2617 if (no_new_pseudos)
2618 abort ();
2619 else
2620 reg = gen_reg_rtx (Pmode);
2623 if (GET_CODE (XEXP (orig, 0)) == PLUS)
2625 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
2626 offset = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
2627 base == reg ? 0 : reg);
2629 else
2630 abort ();
2632 if (GET_CODE (offset) == CONST_INT)
2634 /* The base register doesn't really matter, we only want to
2635 test the index for the appropriate mode. */
2636 if (!arm_legitimate_index_p (mode, offset, 0))
2638 if (!no_new_pseudos)
2639 offset = force_reg (Pmode, offset);
2640 else
2641 abort ();
2644 if (GET_CODE (offset) == CONST_INT)
2645 return plus_constant (base, INTVAL (offset));
2648 if (GET_MODE_SIZE (mode) > 4
2649 && (GET_MODE_CLASS (mode) == MODE_INT
2650 || TARGET_SOFT_FLOAT))
2652 emit_insn (gen_addsi3 (reg, base, offset));
2653 return reg;
2656 return gen_rtx_PLUS (Pmode, base, offset);
2659 return orig;
2662 /* Generate code to load the PIC register. PROLOGUE is true if
2663 called from arm_expand_prologue (in which case we want the
2664 generated insns at the start of the function); false if called
2665 by an exception receiver that needs the PIC register reloaded
2666 (in which case the insns are just dumped at the current location). */
2667 void
2668 arm_finalize_pic (int prologue ATTRIBUTE_UNUSED)
2670 #ifndef AOF_ASSEMBLER
2671 rtx l1, pic_tmp, pic_tmp2, seq, pic_rtx;
2672 rtx global_offset_table;
2674 if (current_function_uses_pic_offset_table == 0 || TARGET_SINGLE_PIC_BASE)
2675 return;
2677 if (!flag_pic)
2678 abort ();
2680 start_sequence ();
2681 l1 = gen_label_rtx ();
2683 global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
2684 /* On the ARM the PC register contains 'dot + 8' at the time of the
2685 addition, on the Thumb it is 'dot + 4'. */
2686 pic_tmp = plus_constant (gen_rtx_LABEL_REF (Pmode, l1), TARGET_ARM ? 8 : 4);
2687 if (GOT_PCREL)
2688 pic_tmp2 = gen_rtx_CONST (VOIDmode,
2689 gen_rtx_PLUS (Pmode, global_offset_table, pc_rtx));
2690 else
2691 pic_tmp2 = gen_rtx_CONST (VOIDmode, global_offset_table);
2693 pic_rtx = gen_rtx_CONST (Pmode, gen_rtx_MINUS (Pmode, pic_tmp2, pic_tmp));
2695 if (TARGET_ARM)
2697 emit_insn (gen_pic_load_addr_arm (pic_offset_table_rtx, pic_rtx));
2698 emit_insn (gen_pic_add_dot_plus_eight (pic_offset_table_rtx, l1));
2700 else
2702 emit_insn (gen_pic_load_addr_thumb (pic_offset_table_rtx, pic_rtx));
2703 emit_insn (gen_pic_add_dot_plus_four (pic_offset_table_rtx, l1));
2706 seq = get_insns ();
2707 end_sequence ();
2708 if (prologue)
2709 emit_insn_after (seq, get_insns ());
2710 else
2711 emit_insn (seq);
2713 /* Need to emit this whether or not we obey regdecls,
2714 since setjmp/longjmp can cause life info to screw up. */
2715 emit_insn (gen_rtx_USE (VOIDmode, pic_offset_table_rtx));
2716 #endif /* AOF_ASSEMBLER */
2719 /* Return nonzero if X is valid as an ARM state addressing register. */
2720 static int
2721 arm_address_register_rtx_p (rtx x, int strict_p)
2723 int regno;
2725 if (GET_CODE (x) != REG)
2726 return 0;
2728 regno = REGNO (x);
2730 if (strict_p)
2731 return ARM_REGNO_OK_FOR_BASE_P (regno);
2733 return (regno <= LAST_ARM_REGNUM
2734 || regno >= FIRST_PSEUDO_REGISTER
2735 || regno == FRAME_POINTER_REGNUM
2736 || regno == ARG_POINTER_REGNUM);
2739 /* Return nonzero if X is a valid ARM state address operand. */
2741 arm_legitimate_address_p (enum machine_mode mode, rtx x, int strict_p)
2743 if (arm_address_register_rtx_p (x, strict_p))
2744 return 1;
2746 else if (GET_CODE (x) == POST_INC || GET_CODE (x) == PRE_DEC)
2747 return arm_address_register_rtx_p (XEXP (x, 0), strict_p);
2749 else if ((GET_CODE (x) == POST_MODIFY || GET_CODE (x) == PRE_MODIFY)
2750 && GET_MODE_SIZE (mode) <= 4
2751 && arm_address_register_rtx_p (XEXP (x, 0), strict_p)
2752 && GET_CODE (XEXP (x, 1)) == PLUS
2753 && XEXP (XEXP (x, 1), 0) == XEXP (x, 0))
2754 return arm_legitimate_index_p (mode, XEXP (XEXP (x, 1), 1), strict_p);
2756 /* After reload constants split into minipools will have addresses
2757 from a LABEL_REF. */
2758 else if (reload_completed
2759 && (GET_CODE (x) == LABEL_REF
2760 || (GET_CODE (x) == CONST
2761 && GET_CODE (XEXP (x, 0)) == PLUS
2762 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
2763 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
2764 return 1;
2766 else if (mode == TImode)
2767 return 0;
2769 else if (mode == DImode || (TARGET_SOFT_FLOAT && mode == DFmode))
2771 if (GET_CODE (x) == PLUS
2772 && arm_address_register_rtx_p (XEXP (x, 0), strict_p)
2773 && GET_CODE (XEXP (x, 1)) == CONST_INT)
2775 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
2777 if (val == 4 || val == -4 || val == -8)
2778 return 1;
2782 else if (GET_CODE (x) == PLUS)
2784 rtx xop0 = XEXP (x, 0);
2785 rtx xop1 = XEXP (x, 1);
2787 return ((arm_address_register_rtx_p (xop0, strict_p)
2788 && arm_legitimate_index_p (mode, xop1, strict_p))
2789 || (arm_address_register_rtx_p (xop1, strict_p)
2790 && arm_legitimate_index_p (mode, xop0, strict_p)));
2793 #if 0
2794 /* Reload currently can't handle MINUS, so disable this for now */
2795 else if (GET_CODE (x) == MINUS)
2797 rtx xop0 = XEXP (x, 0);
2798 rtx xop1 = XEXP (x, 1);
2800 return (arm_address_register_rtx_p (xop0, strict_p)
2801 && arm_legitimate_index_p (mode, xop1, strict_p));
2803 #endif
2805 else if (GET_MODE_CLASS (mode) != MODE_FLOAT
2806 && GET_CODE (x) == SYMBOL_REF
2807 && CONSTANT_POOL_ADDRESS_P (x)
2808 && ! (flag_pic
2809 && symbol_mentioned_p (get_pool_constant (x))))
2810 return 1;
2812 else if ((GET_CODE (x) == PRE_INC || GET_CODE (x) == POST_DEC)
2813 && (GET_MODE_SIZE (mode) <= 4)
2814 && arm_address_register_rtx_p (XEXP (x, 0), strict_p))
2815 return 1;
2817 return 0;
2820 /* Return nonzero if INDEX is valid for an address index operand in
2821 ARM state. */
2822 static int
2823 arm_legitimate_index_p (enum machine_mode mode, rtx index, int strict_p)
2825 HOST_WIDE_INT range;
2826 enum rtx_code code = GET_CODE (index);
2828 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
2829 return (code == CONST_INT && INTVAL (index) < 1024
2830 && INTVAL (index) > -1024
2831 && (INTVAL (index) & 3) == 0);
2833 if (TARGET_CIRRUS
2834 && (GET_MODE_CLASS (mode) == MODE_FLOAT || mode == DImode))
2835 return (code == CONST_INT
2836 && INTVAL (index) < 255
2837 && INTVAL (index) > -255);
2839 if (arm_address_register_rtx_p (index, strict_p)
2840 && GET_MODE_SIZE (mode) <= 4)
2841 return 1;
2843 if (TARGET_REALLY_IWMMXT && VALID_IWMMXT_REG_MODE (mode))
2844 return (code == CONST_INT
2845 && INTVAL (index) < 256
2846 && INTVAL (index) > -256);
2848 /* XXX What about ldrsb? */
2849 if (GET_MODE_SIZE (mode) <= 4 && code == MULT
2850 && (!arm_arch4 || (mode) != HImode))
2852 rtx xiop0 = XEXP (index, 0);
2853 rtx xiop1 = XEXP (index, 1);
2855 return ((arm_address_register_rtx_p (xiop0, strict_p)
2856 && power_of_two_operand (xiop1, SImode))
2857 || (arm_address_register_rtx_p (xiop1, strict_p)
2858 && power_of_two_operand (xiop0, SImode)));
2861 if (GET_MODE_SIZE (mode) <= 4
2862 && (code == LSHIFTRT || code == ASHIFTRT
2863 || code == ASHIFT || code == ROTATERT)
2864 && (!arm_arch4 || (mode) != HImode))
2866 rtx op = XEXP (index, 1);
2868 return (arm_address_register_rtx_p (XEXP (index, 0), strict_p)
2869 && GET_CODE (op) == CONST_INT
2870 && INTVAL (op) > 0
2871 && INTVAL (op) <= 31);
2874 /* XXX For ARM v4 we may be doing a sign-extend operation during the
2875 load, but that has a restricted addressing range and we are unable
2876 to tell here whether that is the case. To be safe we restrict all
2877 loads to that range. */
2878 range = ((mode) == HImode || (mode) == QImode)
2879 ? (arm_arch4 ? 256 : 4095) : 4096;
2881 return (code == CONST_INT
2882 && INTVAL (index) < range
2883 && INTVAL (index) > -range);
2886 /* Return nonzero if X is valid as a Thumb state base register. */
2887 static int
2888 thumb_base_register_rtx_p (rtx x, enum machine_mode mode, int strict_p)
2890 int regno;
2892 if (GET_CODE (x) != REG)
2893 return 0;
2895 regno = REGNO (x);
2897 if (strict_p)
2898 return THUMB_REGNO_MODE_OK_FOR_BASE_P (regno, mode);
2900 return (regno <= LAST_LO_REGNUM
2901 || regno > LAST_VIRTUAL_REGISTER
2902 || regno == FRAME_POINTER_REGNUM
2903 || (GET_MODE_SIZE (mode) >= 4
2904 && (regno == STACK_POINTER_REGNUM
2905 || regno >= FIRST_PSEUDO_REGISTER
2906 || x == hard_frame_pointer_rtx
2907 || x == arg_pointer_rtx)));
2910 /* Return nonzero if x is a legitimate index register. This is the case
2911 for any base register that can access a QImode object. */
2912 inline static int
2913 thumb_index_register_rtx_p (rtx x, int strict_p)
2915 return thumb_base_register_rtx_p (x, QImode, strict_p);
2918 /* Return nonzero if x is a legitimate Thumb-state address.
2920 The AP may be eliminated to either the SP or the FP, so we use the
2921 least common denominator, e.g. SImode, and offsets from 0 to 64.
2923 ??? Verify whether the above is the right approach.
2925 ??? Also, the FP may be eliminated to the SP, so perhaps that
2926 needs special handling also.
2928 ??? Look at how the mips16 port solves this problem. It probably uses
2929 better ways to solve some of these problems.
2931 Although it is not incorrect, we don't accept QImode and HImode
2932 addresses based on the frame pointer or arg pointer until the
2933 reload pass starts. This is so that eliminating such addresses
2934 into stack based ones won't produce impossible code. */
2936 thumb_legitimate_address_p (enum machine_mode mode, rtx x, int strict_p)
2938 /* ??? Not clear if this is right. Experiment. */
2939 if (GET_MODE_SIZE (mode) < 4
2940 && !(reload_in_progress || reload_completed)
2941 && (reg_mentioned_p (frame_pointer_rtx, x)
2942 || reg_mentioned_p (arg_pointer_rtx, x)
2943 || reg_mentioned_p (virtual_incoming_args_rtx, x)
2944 || reg_mentioned_p (virtual_outgoing_args_rtx, x)
2945 || reg_mentioned_p (virtual_stack_dynamic_rtx, x)
2946 || reg_mentioned_p (virtual_stack_vars_rtx, x)))
2947 return 0;
2949 /* Accept any base register. SP only in SImode or larger. */
2950 else if (thumb_base_register_rtx_p (x, mode, strict_p))
2951 return 1;
2953 /* This is PC relative data before arm_reorg runs. */
2954 else if (GET_MODE_SIZE (mode) >= 4 && CONSTANT_P (x)
2955 && GET_CODE (x) == SYMBOL_REF
2956 && CONSTANT_POOL_ADDRESS_P (x) && ! flag_pic)
2957 return 1;
2959 /* This is PC relative data after arm_reorg runs. */
2960 else if (GET_MODE_SIZE (mode) >= 4 && reload_completed
2961 && (GET_CODE (x) == LABEL_REF
2962 || (GET_CODE (x) == CONST
2963 && GET_CODE (XEXP (x, 0)) == PLUS
2964 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
2965 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
2966 return 1;
2968 /* Post-inc indexing only supported for SImode and larger. */
2969 else if (GET_CODE (x) == POST_INC && GET_MODE_SIZE (mode) >= 4
2970 && thumb_index_register_rtx_p (XEXP (x, 0), strict_p))
2971 return 1;
2973 else if (GET_CODE (x) == PLUS)
2975 /* REG+REG address can be any two index registers. */
2976 /* We disallow FRAME+REG addressing since we know that FRAME
2977 will be replaced with STACK, and SP relative addressing only
2978 permits SP+OFFSET. */
2979 if (GET_MODE_SIZE (mode) <= 4
2980 && XEXP (x, 0) != frame_pointer_rtx
2981 && XEXP (x, 1) != frame_pointer_rtx
2982 && thumb_index_register_rtx_p (XEXP (x, 0), strict_p)
2983 && thumb_index_register_rtx_p (XEXP (x, 1), strict_p))
2984 return 1;
2986 /* REG+const has 5-7 bit offset for non-SP registers. */
2987 else if ((thumb_index_register_rtx_p (XEXP (x, 0), strict_p)
2988 || XEXP (x, 0) == arg_pointer_rtx)
2989 && GET_CODE (XEXP (x, 1)) == CONST_INT
2990 && thumb_legitimate_offset_p (mode, INTVAL (XEXP (x, 1))))
2991 return 1;
2993 /* REG+const has 10 bit offset for SP, but only SImode and
2994 larger is supported. */
2995 /* ??? Should probably check for DI/DFmode overflow here
2996 just like GO_IF_LEGITIMATE_OFFSET does. */
2997 else if (GET_CODE (XEXP (x, 0)) == REG
2998 && REGNO (XEXP (x, 0)) == STACK_POINTER_REGNUM
2999 && GET_MODE_SIZE (mode) >= 4
3000 && GET_CODE (XEXP (x, 1)) == CONST_INT
3001 && INTVAL (XEXP (x, 1)) >= 0
3002 && INTVAL (XEXP (x, 1)) + GET_MODE_SIZE (mode) <= 1024
3003 && (INTVAL (XEXP (x, 1)) & 3) == 0)
3004 return 1;
3006 else if (GET_CODE (XEXP (x, 0)) == REG
3007 && REGNO (XEXP (x, 0)) == FRAME_POINTER_REGNUM
3008 && GET_MODE_SIZE (mode) >= 4
3009 && GET_CODE (XEXP (x, 1)) == CONST_INT
3010 && (INTVAL (XEXP (x, 1)) & 3) == 0)
3011 return 1;
3014 else if (GET_MODE_CLASS (mode) != MODE_FLOAT
3015 && GET_MODE_SIZE (mode) == 4
3016 && GET_CODE (x) == SYMBOL_REF
3017 && CONSTANT_POOL_ADDRESS_P (x)
3018 && !(flag_pic
3019 && symbol_mentioned_p (get_pool_constant (x))))
3020 return 1;
3022 return 0;
3025 /* Return nonzero if VAL can be used as an offset in a Thumb-state address
3026 instruction of mode MODE. */
3028 thumb_legitimate_offset_p (enum machine_mode mode, HOST_WIDE_INT val)
3030 switch (GET_MODE_SIZE (mode))
3032 case 1:
3033 return val >= 0 && val < 32;
3035 case 2:
3036 return val >= 0 && val < 64 && (val & 1) == 0;
3038 default:
3039 return (val >= 0
3040 && (val + GET_MODE_SIZE (mode)) <= 128
3041 && (val & 3) == 0);
3045 /* Try machine-dependent ways of modifying an illegitimate address
3046 to be legitimate. If we find one, return the new, valid address. */
3048 arm_legitimize_address (rtx x, rtx orig_x, enum machine_mode mode)
3050 if (GET_CODE (x) == PLUS)
3052 rtx xop0 = XEXP (x, 0);
3053 rtx xop1 = XEXP (x, 1);
3055 if (CONSTANT_P (xop0) && !symbol_mentioned_p (xop0))
3056 xop0 = force_reg (SImode, xop0);
3058 if (CONSTANT_P (xop1) && !symbol_mentioned_p (xop1))
3059 xop1 = force_reg (SImode, xop1);
3061 if (ARM_BASE_REGISTER_RTX_P (xop0)
3062 && GET_CODE (xop1) == CONST_INT)
3064 HOST_WIDE_INT n, low_n;
3065 rtx base_reg, val;
3066 n = INTVAL (xop1);
3068 if (mode == DImode || (TARGET_SOFT_FLOAT && mode == DFmode))
3070 low_n = n & 0x0f;
3071 n &= ~0x0f;
3072 if (low_n > 4)
3074 n += 16;
3075 low_n -= 16;
3078 else
3080 low_n = ((mode) == TImode ? 0
3081 : n >= 0 ? (n & 0xfff) : -((-n) & 0xfff));
3082 n -= low_n;
3085 base_reg = gen_reg_rtx (SImode);
3086 val = force_operand (gen_rtx_PLUS (SImode, xop0,
3087 GEN_INT (n)), NULL_RTX);
3088 emit_move_insn (base_reg, val);
3089 x = (low_n == 0 ? base_reg
3090 : gen_rtx_PLUS (SImode, base_reg, GEN_INT (low_n)));
3092 else if (xop0 != XEXP (x, 0) || xop1 != XEXP (x, 1))
3093 x = gen_rtx_PLUS (SImode, xop0, xop1);
3096 /* XXX We don't allow MINUS any more -- see comment in
3097 arm_legitimate_address_p (). */
3098 else if (GET_CODE (x) == MINUS)
3100 rtx xop0 = XEXP (x, 0);
3101 rtx xop1 = XEXP (x, 1);
3103 if (CONSTANT_P (xop0))
3104 xop0 = force_reg (SImode, xop0);
3106 if (CONSTANT_P (xop1) && ! symbol_mentioned_p (xop1))
3107 xop1 = force_reg (SImode, xop1);
3109 if (xop0 != XEXP (x, 0) || xop1 != XEXP (x, 1))
3110 x = gen_rtx_MINUS (SImode, xop0, xop1);
3113 if (flag_pic)
3115 /* We need to find and carefully transform any SYMBOL and LABEL
3116 references; so go back to the original address expression. */
3117 rtx new_x = legitimize_pic_address (orig_x, mode, NULL_RTX);
3119 if (new_x != orig_x)
3120 x = new_x;
3123 return x;
3128 #define REG_OR_SUBREG_REG(X) \
3129 (GET_CODE (X) == REG \
3130 || (GET_CODE (X) == SUBREG && GET_CODE (SUBREG_REG (X)) == REG))
3132 #define REG_OR_SUBREG_RTX(X) \
3133 (GET_CODE (X) == REG ? (X) : SUBREG_REG (X))
3135 #ifndef COSTS_N_INSNS
3136 #define COSTS_N_INSNS(N) ((N) * 4 - 2)
3137 #endif
3138 /* Worker routine for arm_rtx_costs. */
3139 static inline int
3140 arm_rtx_costs_1 (rtx x, enum rtx_code code, enum rtx_code outer)
3142 enum machine_mode mode = GET_MODE (x);
3143 enum rtx_code subcode;
3144 int extra_cost;
3146 if (TARGET_THUMB)
3148 switch (code)
3150 case ASHIFT:
3151 case ASHIFTRT:
3152 case LSHIFTRT:
3153 case ROTATERT:
3154 case PLUS:
3155 case MINUS:
3156 case COMPARE:
3157 case NEG:
3158 case NOT:
3159 return COSTS_N_INSNS (1);
3161 case MULT:
3162 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
3164 int cycles = 0;
3165 unsigned HOST_WIDE_INT i = INTVAL (XEXP (x, 1));
3167 while (i)
3169 i >>= 2;
3170 cycles++;
3172 return COSTS_N_INSNS (2) + cycles;
3174 return COSTS_N_INSNS (1) + 16;
3176 case SET:
3177 return (COSTS_N_INSNS (1)
3178 + 4 * ((GET_CODE (SET_SRC (x)) == MEM)
3179 + GET_CODE (SET_DEST (x)) == MEM));
3181 case CONST_INT:
3182 if (outer == SET)
3184 if ((unsigned HOST_WIDE_INT) INTVAL (x) < 256)
3185 return 0;
3186 if (thumb_shiftable_const (INTVAL (x)))
3187 return COSTS_N_INSNS (2);
3188 return COSTS_N_INSNS (3);
3190 else if ((outer == PLUS || outer == COMPARE)
3191 && INTVAL (x) < 256 && INTVAL (x) > -256)
3192 return 0;
3193 else if (outer == AND
3194 && INTVAL (x) < 256 && INTVAL (x) >= -256)
3195 return COSTS_N_INSNS (1);
3196 else if (outer == ASHIFT || outer == ASHIFTRT
3197 || outer == LSHIFTRT)
3198 return 0;
3199 return COSTS_N_INSNS (2);
3201 case CONST:
3202 case CONST_DOUBLE:
3203 case LABEL_REF:
3204 case SYMBOL_REF:
3205 return COSTS_N_INSNS (3);
3207 case UDIV:
3208 case UMOD:
3209 case DIV:
3210 case MOD:
3211 return 100;
3213 case TRUNCATE:
3214 return 99;
3216 case AND:
3217 case XOR:
3218 case IOR:
3219 /* XXX guess. */
3220 return 8;
3222 case ADDRESSOF:
3223 case MEM:
3224 /* XXX another guess. */
3225 /* Memory costs quite a lot for the first word, but subsequent words
3226 load at the equivalent of a single insn each. */
3227 return (10 + 4 * ((GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD)
3228 + ((GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
3229 ? 4 : 0));
3231 case IF_THEN_ELSE:
3232 /* XXX a guess. */
3233 if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
3234 return 14;
3235 return 2;
3237 case ZERO_EXTEND:
3238 /* XXX still guessing. */
3239 switch (GET_MODE (XEXP (x, 0)))
3241 case QImode:
3242 return (1 + (mode == DImode ? 4 : 0)
3243 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3245 case HImode:
3246 return (4 + (mode == DImode ? 4 : 0)
3247 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3249 case SImode:
3250 return (1 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3252 default:
3253 return 99;
3256 default:
3257 return 99;
3261 switch (code)
3263 case MEM:
3264 /* Memory costs quite a lot for the first word, but subsequent words
3265 load at the equivalent of a single insn each. */
3266 return (10 + 4 * ((GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD)
3267 + (GET_CODE (x) == SYMBOL_REF
3268 && CONSTANT_POOL_ADDRESS_P (x) ? 4 : 0));
3270 case DIV:
3271 case MOD:
3272 case UDIV:
3273 case UMOD:
3274 return optimize_size ? COSTS_N_INSNS (2) : 100;
3276 case ROTATE:
3277 if (mode == SImode && GET_CODE (XEXP (x, 1)) == REG)
3278 return 4;
3279 /* Fall through */
3280 case ROTATERT:
3281 if (mode != SImode)
3282 return 8;
3283 /* Fall through */
3284 case ASHIFT: case LSHIFTRT: case ASHIFTRT:
3285 if (mode == DImode)
3286 return (8 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : 8)
3287 + ((GET_CODE (XEXP (x, 0)) == REG
3288 || (GET_CODE (XEXP (x, 0)) == SUBREG
3289 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG))
3290 ? 0 : 8));
3291 return (1 + ((GET_CODE (XEXP (x, 0)) == REG
3292 || (GET_CODE (XEXP (x, 0)) == SUBREG
3293 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG))
3294 ? 0 : 4)
3295 + ((GET_CODE (XEXP (x, 1)) == REG
3296 || (GET_CODE (XEXP (x, 1)) == SUBREG
3297 && GET_CODE (SUBREG_REG (XEXP (x, 1))) == REG)
3298 || (GET_CODE (XEXP (x, 1)) == CONST_INT))
3299 ? 0 : 4));
3301 case MINUS:
3302 if (mode == DImode)
3303 return (4 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 8)
3304 + ((REG_OR_SUBREG_REG (XEXP (x, 0))
3305 || (GET_CODE (XEXP (x, 0)) == CONST_INT
3306 && const_ok_for_arm (INTVAL (XEXP (x, 0)))))
3307 ? 0 : 8));
3309 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
3310 return (2 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
3311 || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
3312 && const_double_rtx_ok_for_fpa (XEXP (x, 1))))
3313 ? 0 : 8)
3314 + ((REG_OR_SUBREG_REG (XEXP (x, 0))
3315 || (GET_CODE (XEXP (x, 0)) == CONST_DOUBLE
3316 && const_double_rtx_ok_for_fpa (XEXP (x, 0))))
3317 ? 0 : 8));
3319 if (((GET_CODE (XEXP (x, 0)) == CONST_INT
3320 && const_ok_for_arm (INTVAL (XEXP (x, 0)))
3321 && REG_OR_SUBREG_REG (XEXP (x, 1))))
3322 || (((subcode = GET_CODE (XEXP (x, 1))) == ASHIFT
3323 || subcode == ASHIFTRT || subcode == LSHIFTRT
3324 || subcode == ROTATE || subcode == ROTATERT
3325 || (subcode == MULT
3326 && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
3327 && ((INTVAL (XEXP (XEXP (x, 1), 1)) &
3328 (INTVAL (XEXP (XEXP (x, 1), 1)) - 1)) == 0)))
3329 && REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 0))
3330 && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 1))
3331 || GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT)
3332 && REG_OR_SUBREG_REG (XEXP (x, 0))))
3333 return 1;
3334 /* Fall through */
3336 case PLUS:
3337 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
3338 return (2 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8)
3339 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
3340 || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
3341 && const_double_rtx_ok_for_fpa (XEXP (x, 1))))
3342 ? 0 : 8));
3344 /* Fall through */
3345 case AND: case XOR: case IOR:
3346 extra_cost = 0;
3348 /* Normally the frame registers will be spilt into reg+const during
3349 reload, so it is a bad idea to combine them with other instructions,
3350 since then they might not be moved outside of loops. As a compromise
3351 we allow integration with ops that have a constant as their second
3352 operand. */
3353 if ((REG_OR_SUBREG_REG (XEXP (x, 0))
3354 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0)))
3355 && GET_CODE (XEXP (x, 1)) != CONST_INT)
3356 || (REG_OR_SUBREG_REG (XEXP (x, 0))
3357 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0)))))
3358 extra_cost = 4;
3360 if (mode == DImode)
3361 return (4 + extra_cost + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8)
3362 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
3363 || (GET_CODE (XEXP (x, 1)) == CONST_INT
3364 && const_ok_for_op (INTVAL (XEXP (x, 1)), code)))
3365 ? 0 : 8));
3367 if (REG_OR_SUBREG_REG (XEXP (x, 0)))
3368 return (1 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : extra_cost)
3369 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
3370 || (GET_CODE (XEXP (x, 1)) == CONST_INT
3371 && const_ok_for_op (INTVAL (XEXP (x, 1)), code)))
3372 ? 0 : 4));
3374 else if (REG_OR_SUBREG_REG (XEXP (x, 1)))
3375 return (1 + extra_cost
3376 + ((((subcode = GET_CODE (XEXP (x, 0))) == ASHIFT
3377 || subcode == LSHIFTRT || subcode == ASHIFTRT
3378 || subcode == ROTATE || subcode == ROTATERT
3379 || (subcode == MULT
3380 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
3381 && ((INTVAL (XEXP (XEXP (x, 0), 1)) &
3382 (INTVAL (XEXP (XEXP (x, 0), 1)) - 1)) == 0)))
3383 && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 0)))
3384 && ((REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 1)))
3385 || GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT))
3386 ? 0 : 4));
3388 return 8;
3390 case MULT:
3391 /* There is no point basing this on the tuning, since it is always the
3392 fast variant if it exists at all. */
3393 if (arm_fast_multiply && mode == DImode
3394 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
3395 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
3396 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
3397 return 8;
3399 if (GET_MODE_CLASS (mode) == MODE_FLOAT
3400 || mode == DImode)
3401 return 30;
3403 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
3405 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
3406 & (unsigned HOST_WIDE_INT) 0xffffffff);
3407 int cost, const_ok = const_ok_for_arm (i);
3408 int j, booth_unit_size;
3410 if (arm_tune_xscale)
3412 unsigned HOST_WIDE_INT masked_const;
3414 /* The cost will be related to two insns.
3415 First a load of the constant (MOV or LDR), then a multiply. */
3416 cost = 2;
3417 if (! const_ok)
3418 cost += 1; /* LDR is probably more expensive because
3419 of longer result latency. */
3420 masked_const = i & 0xffff8000;
3421 if (masked_const != 0 && masked_const != 0xffff8000)
3423 masked_const = i & 0xf8000000;
3424 if (masked_const == 0 || masked_const == 0xf8000000)
3425 cost += 1;
3426 else
3427 cost += 2;
3429 return cost;
3432 /* Tune as appropriate. */
3433 cost = const_ok ? 4 : 8;
3434 booth_unit_size = ((tune_flags & FL_FAST_MULT) ? 8 : 2);
3435 for (j = 0; i && j < 32; j += booth_unit_size)
3437 i >>= booth_unit_size;
3438 cost += 2;
3441 return cost;
3444 return (((tune_flags & FL_FAST_MULT) ? 8 : 30)
3445 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
3446 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4));
3448 case TRUNCATE:
3449 if (arm_fast_multiply && mode == SImode
3450 && GET_CODE (XEXP (x, 0)) == LSHIFTRT
3451 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
3452 && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0))
3453 == GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)))
3454 && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == ZERO_EXTEND
3455 || GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == SIGN_EXTEND))
3456 return 8;
3457 return 99;
3459 case NEG:
3460 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
3461 return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 6);
3462 /* Fall through */
3463 case NOT:
3464 if (mode == DImode)
3465 return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4);
3467 return 1 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4);
3469 case IF_THEN_ELSE:
3470 if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
3471 return 14;
3472 return 2;
3474 case COMPARE:
3475 return 1;
3477 case ABS:
3478 return 4 + (mode == DImode ? 4 : 0);
3480 case SIGN_EXTEND:
3481 if (GET_MODE (XEXP (x, 0)) == QImode)
3482 return (4 + (mode == DImode ? 4 : 0)
3483 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3484 /* Fall through */
3485 case ZERO_EXTEND:
3486 switch (GET_MODE (XEXP (x, 0)))
3488 case QImode:
3489 return (1 + (mode == DImode ? 4 : 0)
3490 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3492 case HImode:
3493 return (4 + (mode == DImode ? 4 : 0)
3494 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3496 case SImode:
3497 return (1 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3499 case V8QImode:
3500 case V4HImode:
3501 case V2SImode:
3502 case V4QImode:
3503 case V2HImode:
3504 return 1;
3506 default:
3507 break;
3509 abort ();
3511 case CONST_INT:
3512 if (const_ok_for_arm (INTVAL (x)))
3513 return outer == SET ? 2 : -1;
3514 else if (outer == AND
3515 && const_ok_for_arm (~INTVAL (x)))
3516 return -1;
3517 else if ((outer == COMPARE
3518 || outer == PLUS || outer == MINUS)
3519 && const_ok_for_arm (-INTVAL (x)))
3520 return -1;
3521 else
3522 return 5;
3524 case CONST:
3525 case LABEL_REF:
3526 case SYMBOL_REF:
3527 return 6;
3529 case CONST_DOUBLE:
3530 if (const_double_rtx_ok_for_fpa (x))
3531 return outer == SET ? 2 : -1;
3532 else if ((outer == COMPARE || outer == PLUS)
3533 && neg_const_double_rtx_ok_for_fpa (x))
3534 return -1;
3535 return 7;
3537 default:
3538 return 99;
3542 static bool
3543 arm_rtx_costs (rtx x, int code, int outer_code, int *total)
3545 *total = arm_rtx_costs_1 (x, code, outer_code);
3546 return true;
3549 /* All address computations that can be done are free, but rtx cost returns
3550 the same for practically all of them. So we weight the different types
3551 of address here in the order (most pref first):
3552 PRE/POST_INC/DEC, SHIFT or NON-INT sum, INT sum, REG, MEM or LABEL. */
3553 static inline int
3554 arm_arm_address_cost (rtx x)
3556 enum rtx_code c = GET_CODE (x);
3558 if (c == PRE_INC || c == PRE_DEC || c == POST_INC || c == POST_DEC)
3559 return 0;
3560 if (c == MEM || c == LABEL_REF || c == SYMBOL_REF)
3561 return 10;
3563 if (c == PLUS || c == MINUS)
3565 char cl0 = GET_RTX_CLASS (GET_CODE (XEXP (x, 0)));
3566 char cl1 = GET_RTX_CLASS (GET_CODE (XEXP (x, 1)));
3568 if (GET_CODE (XEXP (x, 0)) == CONST_INT)
3569 return 2;
3571 if (cl0 == '2' || cl0 == 'c' || cl1 == '2' || cl1 == 'c')
3572 return 3;
3574 return 4;
3577 return 6;
3580 static inline int
3581 arm_thumb_address_cost (rtx x)
3583 enum rtx_code c = GET_CODE (x);
3585 if (c == REG)
3586 return 1;
3587 if (c == PLUS
3588 && GET_CODE (XEXP (x, 0)) == REG
3589 && GET_CODE (XEXP (x, 1)) == CONST_INT)
3590 return 1;
3592 return 2;
3595 static int
3596 arm_address_cost (rtx x)
3598 return TARGET_ARM ? arm_arm_address_cost (x) : arm_thumb_address_cost (x);
3601 static int
3602 arm_use_dfa_pipeline_interface (void)
3604 return true;
3607 static int
3608 arm_adjust_cost (rtx insn, rtx link, rtx dep, int cost)
3610 rtx i_pat, d_pat;
3612 /* Some true dependencies can have a higher cost depending
3613 on precisely how certain input operands are used. */
3614 if (arm_tune_xscale
3615 && REG_NOTE_KIND (link) == 0
3616 && recog_memoized (insn) >= 0
3617 && recog_memoized (dep) >= 0)
3619 int shift_opnum = get_attr_shift (insn);
3620 enum attr_type attr_type = get_attr_type (dep);
3622 /* If nonzero, SHIFT_OPNUM contains the operand number of a shifted
3623 operand for INSN. If we have a shifted input operand and the
3624 instruction we depend on is another ALU instruction, then we may
3625 have to account for an additional stall. */
3626 if (shift_opnum != 0 && attr_type == TYPE_NORMAL)
3628 rtx shifted_operand;
3629 int opno;
3631 /* Get the shifted operand. */
3632 extract_insn (insn);
3633 shifted_operand = recog_data.operand[shift_opnum];
3635 /* Iterate over all the operands in DEP. If we write an operand
3636 that overlaps with SHIFTED_OPERAND, then we have increase the
3637 cost of this dependency. */
3638 extract_insn (dep);
3639 preprocess_constraints ();
3640 for (opno = 0; opno < recog_data.n_operands; opno++)
3642 /* We can ignore strict inputs. */
3643 if (recog_data.operand_type[opno] == OP_IN)
3644 continue;
3646 if (reg_overlap_mentioned_p (recog_data.operand[opno],
3647 shifted_operand))
3648 return 2;
3653 /* XXX This is not strictly true for the FPA. */
3654 if (REG_NOTE_KIND (link) == REG_DEP_ANTI
3655 || REG_NOTE_KIND (link) == REG_DEP_OUTPUT)
3656 return 0;
3658 /* Call insns don't incur a stall, even if they follow a load. */
3659 if (REG_NOTE_KIND (link) == 0
3660 && GET_CODE (insn) == CALL_INSN)
3661 return 1;
3663 if ((i_pat = single_set (insn)) != NULL
3664 && GET_CODE (SET_SRC (i_pat)) == MEM
3665 && (d_pat = single_set (dep)) != NULL
3666 && GET_CODE (SET_DEST (d_pat)) == MEM)
3668 rtx src_mem = XEXP (SET_SRC (i_pat), 0);
3669 /* This is a load after a store, there is no conflict if the load reads
3670 from a cached area. Assume that loads from the stack, and from the
3671 constant pool are cached, and that others will miss. This is a
3672 hack. */
3674 if ((GET_CODE (src_mem) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (src_mem))
3675 || reg_mentioned_p (stack_pointer_rtx, src_mem)
3676 || reg_mentioned_p (frame_pointer_rtx, src_mem)
3677 || reg_mentioned_p (hard_frame_pointer_rtx, src_mem))
3678 return 1;
3681 return cost;
3684 static int fpa_consts_inited = 0;
3686 static const char * const strings_fpa[8] =
3688 "0", "1", "2", "3",
3689 "4", "5", "0.5", "10"
3692 static REAL_VALUE_TYPE values_fpa[8];
3694 static void
3695 init_fpa_table (void)
3697 int i;
3698 REAL_VALUE_TYPE r;
3700 for (i = 0; i < 8; i++)
3702 r = REAL_VALUE_ATOF (strings_fpa[i], DFmode);
3703 values_fpa[i] = r;
3706 fpa_consts_inited = 1;
3709 /* Return TRUE if rtx X is a valid immediate FPA constant. */
3711 const_double_rtx_ok_for_fpa (rtx x)
3713 REAL_VALUE_TYPE r;
3714 int i;
3716 if (!fpa_consts_inited)
3717 init_fpa_table ();
3719 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
3720 if (REAL_VALUE_MINUS_ZERO (r))
3721 return 0;
3723 for (i = 0; i < 8; i++)
3724 if (REAL_VALUES_EQUAL (r, values_fpa[i]))
3725 return 1;
3727 return 0;
3730 /* Return TRUE if rtx X is a valid immediate FPA constant. */
3732 neg_const_double_rtx_ok_for_fpa (rtx x)
3734 REAL_VALUE_TYPE r;
3735 int i;
3737 if (!fpa_consts_inited)
3738 init_fpa_table ();
3740 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
3741 r = REAL_VALUE_NEGATE (r);
3742 if (REAL_VALUE_MINUS_ZERO (r))
3743 return 0;
3745 for (i = 0; i < 8; i++)
3746 if (REAL_VALUES_EQUAL (r, values_fpa[i]))
3747 return 1;
3749 return 0;
3752 /* Predicates for `match_operand' and `match_operator'. */
3754 /* s_register_operand is the same as register_operand, but it doesn't accept
3755 (SUBREG (MEM)...).
3757 This function exists because at the time it was put in it led to better
3758 code. SUBREG(MEM) always needs a reload in the places where
3759 s_register_operand is used, and this seemed to lead to excessive
3760 reloading. */
3762 s_register_operand (rtx op, enum machine_mode mode)
3764 if (GET_MODE (op) != mode && mode != VOIDmode)
3765 return 0;
3767 if (GET_CODE (op) == SUBREG)
3768 op = SUBREG_REG (op);
3770 /* We don't consider registers whose class is NO_REGS
3771 to be a register operand. */
3772 /* XXX might have to check for lo regs only for thumb ??? */
3773 return (GET_CODE (op) == REG
3774 && (REGNO (op) >= FIRST_PSEUDO_REGISTER
3775 || REGNO_REG_CLASS (REGNO (op)) != NO_REGS));
3778 /* A hard register operand (even before reload. */
3780 arm_hard_register_operand (rtx op, enum machine_mode mode)
3782 if (GET_MODE (op) != mode && mode != VOIDmode)
3783 return 0;
3785 return (GET_CODE (op) == REG
3786 && REGNO (op) < FIRST_PSEUDO_REGISTER);
3789 /* Only accept reg, subreg(reg), const_int. */
3791 reg_or_int_operand (rtx op, enum machine_mode mode)
3793 if (GET_CODE (op) == CONST_INT)
3794 return 1;
3796 if (GET_MODE (op) != mode && mode != VOIDmode)
3797 return 0;
3799 if (GET_CODE (op) == SUBREG)
3800 op = SUBREG_REG (op);
3802 /* We don't consider registers whose class is NO_REGS
3803 to be a register operand. */
3804 return (GET_CODE (op) == REG
3805 && (REGNO (op) >= FIRST_PSEUDO_REGISTER
3806 || REGNO_REG_CLASS (REGNO (op)) != NO_REGS));
3809 /* Return 1 if OP is an item in memory, given that we are in reload. */
3811 arm_reload_memory_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3813 int regno = true_regnum (op);
3815 return (!CONSTANT_P (op)
3816 && (regno == -1
3817 || (GET_CODE (op) == REG
3818 && REGNO (op) >= FIRST_PSEUDO_REGISTER)));
3821 /* Return 1 if OP is a valid memory address, but not valid for a signed byte
3822 memory access (architecture V4).
3823 MODE is QImode if called when computing constraints, or VOIDmode when
3824 emitting patterns. In this latter case we cannot use memory_operand()
3825 because it will fail on badly formed MEMs, which is precisely what we are
3826 trying to catch. */
3828 bad_signed_byte_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3830 if (GET_CODE (op) != MEM)
3831 return 0;
3833 op = XEXP (op, 0);
3835 /* A sum of anything more complex than reg + reg or reg + const is bad. */
3836 if ((GET_CODE (op) == PLUS || GET_CODE (op) == MINUS)
3837 && (!s_register_operand (XEXP (op, 0), VOIDmode)
3838 || (!s_register_operand (XEXP (op, 1), VOIDmode)
3839 && GET_CODE (XEXP (op, 1)) != CONST_INT)))
3840 return 1;
3842 /* Big constants are also bad. */
3843 if (GET_CODE (op) == PLUS && GET_CODE (XEXP (op, 1)) == CONST_INT
3844 && (INTVAL (XEXP (op, 1)) > 0xff
3845 || -INTVAL (XEXP (op, 1)) > 0xff))
3846 return 1;
3848 /* Everything else is good, or can will automatically be made so. */
3849 return 0;
3852 /* Return TRUE for valid operands for the rhs of an ARM instruction. */
3854 arm_rhs_operand (rtx op, enum machine_mode mode)
3856 return (s_register_operand (op, mode)
3857 || (GET_CODE (op) == CONST_INT && const_ok_for_arm (INTVAL (op))));
3860 /* Return TRUE for valid operands for the
3861 rhs of an ARM instruction, or a load. */
3863 arm_rhsm_operand (rtx op, enum machine_mode mode)
3865 return (s_register_operand (op, mode)
3866 || (GET_CODE (op) == CONST_INT && const_ok_for_arm (INTVAL (op)))
3867 || memory_operand (op, mode));
3870 /* Return TRUE for valid operands for the rhs of an ARM instruction, or if a
3871 constant that is valid when negated. */
3873 arm_add_operand (rtx op, enum machine_mode mode)
3875 if (TARGET_THUMB)
3876 return thumb_cmp_operand (op, mode);
3878 return (s_register_operand (op, mode)
3879 || (GET_CODE (op) == CONST_INT
3880 && (const_ok_for_arm (INTVAL (op))
3881 || const_ok_for_arm (-INTVAL (op)))));
3884 /* Return TRUE for valid ARM constants (or when valid if negated). */
3886 arm_addimm_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3888 return (GET_CODE (op) == CONST_INT
3889 && (const_ok_for_arm (INTVAL (op))
3890 || const_ok_for_arm (-INTVAL (op))));
3894 arm_not_operand (rtx op, enum machine_mode mode)
3896 return (s_register_operand (op, mode)
3897 || (GET_CODE (op) == CONST_INT
3898 && (const_ok_for_arm (INTVAL (op))
3899 || const_ok_for_arm (~INTVAL (op)))));
3902 /* Return TRUE if the operand is a memory reference which contains an
3903 offsettable address. */
3905 offsettable_memory_operand (rtx op, enum machine_mode mode)
3907 if (mode == VOIDmode)
3908 mode = GET_MODE (op);
3910 return (mode == GET_MODE (op)
3911 && GET_CODE (op) == MEM
3912 && offsettable_address_p (reload_completed | reload_in_progress,
3913 mode, XEXP (op, 0)));
3916 /* Return TRUE if the operand is a memory reference which is, or can be
3917 made word aligned by adjusting the offset. */
3919 alignable_memory_operand (rtx op, enum machine_mode mode)
3921 rtx reg;
3923 if (mode == VOIDmode)
3924 mode = GET_MODE (op);
3926 if (mode != GET_MODE (op) || GET_CODE (op) != MEM)
3927 return 0;
3929 op = XEXP (op, 0);
3931 return ((GET_CODE (reg = op) == REG
3932 || (GET_CODE (op) == SUBREG
3933 && GET_CODE (reg = SUBREG_REG (op)) == REG)
3934 || (GET_CODE (op) == PLUS
3935 && GET_CODE (XEXP (op, 1)) == CONST_INT
3936 && (GET_CODE (reg = XEXP (op, 0)) == REG
3937 || (GET_CODE (XEXP (op, 0)) == SUBREG
3938 && GET_CODE (reg = SUBREG_REG (XEXP (op, 0))) == REG))))
3939 && REGNO_POINTER_ALIGN (REGNO (reg)) >= 32);
3942 /* Similar to s_register_operand, but does not allow hard integer
3943 registers. */
3945 f_register_operand (rtx op, enum machine_mode mode)
3947 if (GET_MODE (op) != mode && mode != VOIDmode)
3948 return 0;
3950 if (GET_CODE (op) == SUBREG)
3951 op = SUBREG_REG (op);
3953 /* We don't consider registers whose class is NO_REGS
3954 to be a register operand. */
3955 return (GET_CODE (op) == REG
3956 && (REGNO (op) >= FIRST_PSEUDO_REGISTER
3957 || REGNO_REG_CLASS (REGNO (op)) == FPA_REGS));
3960 /* Return TRUE for valid operands for the rhs of an FPA instruction. */
3962 fpa_rhs_operand (rtx op, enum machine_mode mode)
3964 if (s_register_operand (op, mode))
3965 return TRUE;
3967 if (GET_MODE (op) != mode && mode != VOIDmode)
3968 return FALSE;
3970 if (GET_CODE (op) == CONST_DOUBLE)
3971 return const_double_rtx_ok_for_fpa (op);
3973 return FALSE;
3977 fpa_add_operand (rtx op, enum machine_mode mode)
3979 if (s_register_operand (op, mode))
3980 return TRUE;
3982 if (GET_MODE (op) != mode && mode != VOIDmode)
3983 return FALSE;
3985 if (GET_CODE (op) == CONST_DOUBLE)
3986 return (const_double_rtx_ok_for_fpa (op)
3987 || neg_const_double_rtx_ok_for_fpa (op));
3989 return FALSE;
3992 /* Return nonzero if OP is a valid Cirrus memory address pattern. */
3994 cirrus_memory_offset (rtx op)
3996 /* Reject eliminable registers. */
3997 if (! (reload_in_progress || reload_completed)
3998 && ( reg_mentioned_p (frame_pointer_rtx, op)
3999 || reg_mentioned_p (arg_pointer_rtx, op)
4000 || reg_mentioned_p (virtual_incoming_args_rtx, op)
4001 || reg_mentioned_p (virtual_outgoing_args_rtx, op)
4002 || reg_mentioned_p (virtual_stack_dynamic_rtx, op)
4003 || reg_mentioned_p (virtual_stack_vars_rtx, op)))
4004 return 0;
4006 if (GET_CODE (op) == MEM)
4008 rtx ind;
4010 ind = XEXP (op, 0);
4012 /* Match: (mem (reg)). */
4013 if (GET_CODE (ind) == REG)
4014 return 1;
4016 /* Match:
4017 (mem (plus (reg)
4018 (const))). */
4019 if (GET_CODE (ind) == PLUS
4020 && GET_CODE (XEXP (ind, 0)) == REG
4021 && REG_MODE_OK_FOR_BASE_P (XEXP (ind, 0), VOIDmode)
4022 && GET_CODE (XEXP (ind, 1)) == CONST_INT)
4023 return 1;
4026 return 0;
4029 /* Return nonzero if OP is a Cirrus or general register. */
4031 cirrus_register_operand (rtx op, enum machine_mode mode)
4033 if (GET_MODE (op) != mode && mode != VOIDmode)
4034 return FALSE;
4036 if (GET_CODE (op) == SUBREG)
4037 op = SUBREG_REG (op);
4039 return (GET_CODE (op) == REG
4040 && (REGNO_REG_CLASS (REGNO (op)) == CIRRUS_REGS
4041 || REGNO_REG_CLASS (REGNO (op)) == GENERAL_REGS));
4044 /* Return nonzero if OP is a cirrus FP register. */
4046 cirrus_fp_register (rtx op, enum machine_mode mode)
4048 if (GET_MODE (op) != mode && mode != VOIDmode)
4049 return FALSE;
4051 if (GET_CODE (op) == SUBREG)
4052 op = SUBREG_REG (op);
4054 return (GET_CODE (op) == REG
4055 && (REGNO (op) >= FIRST_PSEUDO_REGISTER
4056 || REGNO_REG_CLASS (REGNO (op)) == CIRRUS_REGS));
4059 /* Return nonzero if OP is a 6bit constant (0..63). */
4061 cirrus_shift_const (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
4063 return (GET_CODE (op) == CONST_INT
4064 && INTVAL (op) >= 0
4065 && INTVAL (op) < 64);
4068 /* Returns TRUE if INSN is an "LDR REG, ADDR" instruction.
4069 Use by the Cirrus Maverick code which has to workaround
4070 a hardware bug triggered by such instructions. */
4071 static bool
4072 arm_memory_load_p (rtx insn)
4074 rtx body, lhs, rhs;;
4076 if (insn == NULL_RTX || GET_CODE (insn) != INSN)
4077 return false;
4079 body = PATTERN (insn);
4081 if (GET_CODE (body) != SET)
4082 return false;
4084 lhs = XEXP (body, 0);
4085 rhs = XEXP (body, 1);
4087 lhs = REG_OR_SUBREG_RTX (lhs);
4089 /* If the destination is not a general purpose
4090 register we do not have to worry. */
4091 if (GET_CODE (lhs) != REG
4092 || REGNO_REG_CLASS (REGNO (lhs)) != GENERAL_REGS)
4093 return false;
4095 /* As well as loads from memory we also have to react
4096 to loads of invalid constants which will be turned
4097 into loads from the minipool. */
4098 return (GET_CODE (rhs) == MEM
4099 || GET_CODE (rhs) == SYMBOL_REF
4100 || note_invalid_constants (insn, -1, false));
4103 /* Return TRUE if INSN is a Cirrus instruction. */
4104 static bool
4105 arm_cirrus_insn_p (rtx insn)
4107 enum attr_cirrus attr;
4109 /* get_attr aborts on USE and CLOBBER. */
4110 if (!insn
4111 || GET_CODE (insn) != INSN
4112 || GET_CODE (PATTERN (insn)) == USE
4113 || GET_CODE (PATTERN (insn)) == CLOBBER)
4114 return 0;
4116 attr = get_attr_cirrus (insn);
4118 return attr != CIRRUS_NOT;
4121 /* Cirrus reorg for invalid instruction combinations. */
4122 static void
4123 cirrus_reorg (rtx first)
4125 enum attr_cirrus attr;
4126 rtx body = PATTERN (first);
4127 rtx t;
4128 int nops;
4130 /* Any branch must be followed by 2 non Cirrus instructions. */
4131 if (GET_CODE (first) == JUMP_INSN && GET_CODE (body) != RETURN)
4133 nops = 0;
4134 t = next_nonnote_insn (first);
4136 if (arm_cirrus_insn_p (t))
4137 ++ nops;
4139 if (arm_cirrus_insn_p (next_nonnote_insn (t)))
4140 ++ nops;
4142 while (nops --)
4143 emit_insn_after (gen_nop (), first);
4145 return;
4148 /* (float (blah)) is in parallel with a clobber. */
4149 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) > 0)
4150 body = XVECEXP (body, 0, 0);
4152 if (GET_CODE (body) == SET)
4154 rtx lhs = XEXP (body, 0), rhs = XEXP (body, 1);
4156 /* cfldrd, cfldr64, cfstrd, cfstr64 must
4157 be followed by a non Cirrus insn. */
4158 if (get_attr_cirrus (first) == CIRRUS_DOUBLE)
4160 if (arm_cirrus_insn_p (next_nonnote_insn (first)))
4161 emit_insn_after (gen_nop (), first);
4163 return;
4165 else if (arm_memory_load_p (first))
4167 unsigned int arm_regno;
4169 /* Any ldr/cfmvdlr, ldr/cfmvdhr, ldr/cfmvsr, ldr/cfmv64lr,
4170 ldr/cfmv64hr combination where the Rd field is the same
4171 in both instructions must be split with a non Cirrus
4172 insn. Example:
4174 ldr r0, blah
4176 cfmvsr mvf0, r0. */
4178 /* Get Arm register number for ldr insn. */
4179 if (GET_CODE (lhs) == REG)
4180 arm_regno = REGNO (lhs);
4181 else if (GET_CODE (rhs) == REG)
4182 arm_regno = REGNO (rhs);
4183 else
4184 abort ();
4186 /* Next insn. */
4187 first = next_nonnote_insn (first);
4189 if (! arm_cirrus_insn_p (first))
4190 return;
4192 body = PATTERN (first);
4194 /* (float (blah)) is in parallel with a clobber. */
4195 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0))
4196 body = XVECEXP (body, 0, 0);
4198 if (GET_CODE (body) == FLOAT)
4199 body = XEXP (body, 0);
4201 if (get_attr_cirrus (first) == CIRRUS_MOVE
4202 && GET_CODE (XEXP (body, 1)) == REG
4203 && arm_regno == REGNO (XEXP (body, 1)))
4204 emit_insn_after (gen_nop (), first);
4206 return;
4210 /* get_attr aborts on USE and CLOBBER. */
4211 if (!first
4212 || GET_CODE (first) != INSN
4213 || GET_CODE (PATTERN (first)) == USE
4214 || GET_CODE (PATTERN (first)) == CLOBBER)
4215 return;
4217 attr = get_attr_cirrus (first);
4219 /* Any coprocessor compare instruction (cfcmps, cfcmpd, ...)
4220 must be followed by a non-coprocessor instruction. */
4221 if (attr == CIRRUS_COMPARE)
4223 nops = 0;
4225 t = next_nonnote_insn (first);
4227 if (arm_cirrus_insn_p (t))
4228 ++ nops;
4230 if (arm_cirrus_insn_p (next_nonnote_insn (t)))
4231 ++ nops;
4233 while (nops --)
4234 emit_insn_after (gen_nop (), first);
4236 return;
4240 /* Return nonzero if OP is a constant power of two. */
4242 power_of_two_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
4244 if (GET_CODE (op) == CONST_INT)
4246 HOST_WIDE_INT value = INTVAL (op);
4248 return value != 0 && (value & (value - 1)) == 0;
4251 return FALSE;
4254 /* Return TRUE for a valid operand of a DImode operation.
4255 Either: REG, SUBREG, CONST_DOUBLE or MEM(DImode_address).
4256 Note that this disallows MEM(REG+REG), but allows
4257 MEM(PRE/POST_INC/DEC(REG)). */
4259 di_operand (rtx op, enum machine_mode mode)
4261 if (s_register_operand (op, mode))
4262 return TRUE;
4264 if (mode != VOIDmode && GET_MODE (op) != VOIDmode && GET_MODE (op) != DImode)
4265 return FALSE;
4267 if (GET_CODE (op) == SUBREG)
4268 op = SUBREG_REG (op);
4270 switch (GET_CODE (op))
4272 case CONST_DOUBLE:
4273 case CONST_INT:
4274 return TRUE;
4276 case MEM:
4277 return memory_address_p (DImode, XEXP (op, 0));
4279 default:
4280 return FALSE;
4284 /* Like di_operand, but don't accept constants. */
4286 nonimmediate_di_operand (rtx op, enum machine_mode mode)
4288 if (s_register_operand (op, mode))
4289 return TRUE;
4291 if (mode != VOIDmode && GET_MODE (op) != VOIDmode && GET_MODE (op) != DImode)
4292 return FALSE;
4294 if (GET_CODE (op) == SUBREG)
4295 op = SUBREG_REG (op);
4297 if (GET_CODE (op) == MEM)
4298 return memory_address_p (DImode, XEXP (op, 0));
4300 return FALSE;
4303 /* Return TRUE for a valid operand of a DFmode operation when -msoft-float.
4304 Either: REG, SUBREG, CONST_DOUBLE or MEM(DImode_address).
4305 Note that this disallows MEM(REG+REG), but allows
4306 MEM(PRE/POST_INC/DEC(REG)). */
4308 soft_df_operand (rtx op, enum machine_mode mode)
4310 if (s_register_operand (op, mode))
4311 return TRUE;
4313 if (mode != VOIDmode && GET_MODE (op) != mode)
4314 return FALSE;
4316 if (GET_CODE (op) == SUBREG && CONSTANT_P (SUBREG_REG (op)))
4317 return FALSE;
4319 if (GET_CODE (op) == SUBREG)
4320 op = SUBREG_REG (op);
4322 switch (GET_CODE (op))
4324 case CONST_DOUBLE:
4325 return TRUE;
4327 case MEM:
4328 return memory_address_p (DFmode, XEXP (op, 0));
4330 default:
4331 return FALSE;
4335 /* Like soft_df_operand, but don't accept constants. */
4337 nonimmediate_soft_df_operand (rtx op, enum machine_mode mode)
4339 if (s_register_operand (op, mode))
4340 return TRUE;
4342 if (mode != VOIDmode && GET_MODE (op) != mode)
4343 return FALSE;
4345 if (GET_CODE (op) == SUBREG)
4346 op = SUBREG_REG (op);
4348 if (GET_CODE (op) == MEM)
4349 return memory_address_p (DFmode, XEXP (op, 0));
4350 return FALSE;
4353 /* Return TRUE for valid index operands. */
4355 index_operand (rtx op, enum machine_mode mode)
4357 return (s_register_operand (op, mode)
4358 || (immediate_operand (op, mode)
4359 && (GET_CODE (op) != CONST_INT
4360 || (INTVAL (op) < 4096 && INTVAL (op) > -4096))));
4363 /* Return TRUE for valid shifts by a constant. This also accepts any
4364 power of two on the (somewhat overly relaxed) assumption that the
4365 shift operator in this case was a mult. */
4367 const_shift_operand (rtx op, enum machine_mode mode)
4369 return (power_of_two_operand (op, mode)
4370 || (immediate_operand (op, mode)
4371 && (GET_CODE (op) != CONST_INT
4372 || (INTVAL (op) < 32 && INTVAL (op) > 0))));
4375 /* Return TRUE for arithmetic operators which can be combined with a multiply
4376 (shift). */
4378 shiftable_operator (rtx x, enum machine_mode mode)
4380 enum rtx_code code;
4382 if (GET_MODE (x) != mode)
4383 return FALSE;
4385 code = GET_CODE (x);
4387 return (code == PLUS || code == MINUS
4388 || code == IOR || code == XOR || code == AND);
4391 /* Return TRUE for binary logical operators. */
4393 logical_binary_operator (rtx x, enum machine_mode mode)
4395 enum rtx_code code;
4397 if (GET_MODE (x) != mode)
4398 return FALSE;
4400 code = GET_CODE (x);
4402 return (code == IOR || code == XOR || code == AND);
4405 /* Return TRUE for shift operators. */
4407 shift_operator (rtx x,enum machine_mode mode)
4409 enum rtx_code code;
4411 if (GET_MODE (x) != mode)
4412 return FALSE;
4414 code = GET_CODE (x);
4416 if (code == MULT)
4417 return power_of_two_operand (XEXP (x, 1), mode);
4419 return (code == ASHIFT || code == ASHIFTRT || code == LSHIFTRT
4420 || code == ROTATERT);
4423 /* Return TRUE if x is EQ or NE. */
4425 equality_operator (rtx x, enum machine_mode mode ATTRIBUTE_UNUSED)
4427 return GET_CODE (x) == EQ || GET_CODE (x) == NE;
4430 /* Return TRUE if x is a comparison operator other than LTGT or UNEQ. */
4432 arm_comparison_operator (rtx x, enum machine_mode mode)
4434 return (comparison_operator (x, mode)
4435 && GET_CODE (x) != LTGT
4436 && GET_CODE (x) != UNEQ);
4439 /* Return TRUE for SMIN SMAX UMIN UMAX operators. */
4441 minmax_operator (rtx x, enum machine_mode mode)
4443 enum rtx_code code = GET_CODE (x);
4445 if (GET_MODE (x) != mode)
4446 return FALSE;
4448 return code == SMIN || code == SMAX || code == UMIN || code == UMAX;
4451 /* Return TRUE if this is the condition code register, if we aren't given
4452 a mode, accept any class CCmode register. */
4454 cc_register (rtx x, enum machine_mode mode)
4456 if (mode == VOIDmode)
4458 mode = GET_MODE (x);
4460 if (GET_MODE_CLASS (mode) != MODE_CC)
4461 return FALSE;
4464 if ( GET_MODE (x) == mode
4465 && GET_CODE (x) == REG
4466 && REGNO (x) == CC_REGNUM)
4467 return TRUE;
4469 return FALSE;
4472 /* Return TRUE if this is the condition code register, if we aren't given
4473 a mode, accept any class CCmode register which indicates a dominance
4474 expression. */
4476 dominant_cc_register (rtx x, enum machine_mode mode)
4478 if (mode == VOIDmode)
4480 mode = GET_MODE (x);
4482 if (GET_MODE_CLASS (mode) != MODE_CC)
4483 return FALSE;
4486 if (mode != CC_DNEmode && mode != CC_DEQmode
4487 && mode != CC_DLEmode && mode != CC_DLTmode
4488 && mode != CC_DGEmode && mode != CC_DGTmode
4489 && mode != CC_DLEUmode && mode != CC_DLTUmode
4490 && mode != CC_DGEUmode && mode != CC_DGTUmode)
4491 return FALSE;
4493 return cc_register (x, mode);
4496 /* Return TRUE if X references a SYMBOL_REF. */
4498 symbol_mentioned_p (rtx x)
4500 const char * fmt;
4501 int i;
4503 if (GET_CODE (x) == SYMBOL_REF)
4504 return 1;
4506 fmt = GET_RTX_FORMAT (GET_CODE (x));
4508 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
4510 if (fmt[i] == 'E')
4512 int j;
4514 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
4515 if (symbol_mentioned_p (XVECEXP (x, i, j)))
4516 return 1;
4518 else if (fmt[i] == 'e' && symbol_mentioned_p (XEXP (x, i)))
4519 return 1;
4522 return 0;
4525 /* Return TRUE if X references a LABEL_REF. */
4527 label_mentioned_p (rtx x)
4529 const char * fmt;
4530 int i;
4532 if (GET_CODE (x) == LABEL_REF)
4533 return 1;
4535 fmt = GET_RTX_FORMAT (GET_CODE (x));
4536 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
4538 if (fmt[i] == 'E')
4540 int j;
4542 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
4543 if (label_mentioned_p (XVECEXP (x, i, j)))
4544 return 1;
4546 else if (fmt[i] == 'e' && label_mentioned_p (XEXP (x, i)))
4547 return 1;
4550 return 0;
4553 enum rtx_code
4554 minmax_code (rtx x)
4556 enum rtx_code code = GET_CODE (x);
4558 if (code == SMAX)
4559 return GE;
4560 else if (code == SMIN)
4561 return LE;
4562 else if (code == UMIN)
4563 return LEU;
4564 else if (code == UMAX)
4565 return GEU;
4567 abort ();
4570 /* Return 1 if memory locations are adjacent. */
4572 adjacent_mem_locations (rtx a, rtx b)
4574 if ((GET_CODE (XEXP (a, 0)) == REG
4575 || (GET_CODE (XEXP (a, 0)) == PLUS
4576 && GET_CODE (XEXP (XEXP (a, 0), 1)) == CONST_INT))
4577 && (GET_CODE (XEXP (b, 0)) == REG
4578 || (GET_CODE (XEXP (b, 0)) == PLUS
4579 && GET_CODE (XEXP (XEXP (b, 0), 1)) == CONST_INT)))
4581 int val0 = 0, val1 = 0;
4582 int reg0, reg1;
4584 if (GET_CODE (XEXP (a, 0)) == PLUS)
4586 reg0 = REGNO (XEXP (XEXP (a, 0), 0));
4587 val0 = INTVAL (XEXP (XEXP (a, 0), 1));
4589 else
4590 reg0 = REGNO (XEXP (a, 0));
4592 if (GET_CODE (XEXP (b, 0)) == PLUS)
4594 reg1 = REGNO (XEXP (XEXP (b, 0), 0));
4595 val1 = INTVAL (XEXP (XEXP (b, 0), 1));
4597 else
4598 reg1 = REGNO (XEXP (b, 0));
4600 /* Don't accept any offset that will require multiple
4601 instructions to handle, since this would cause the
4602 arith_adjacentmem pattern to output an overlong sequence. */
4603 if (!const_ok_for_op (PLUS, val0) || !const_ok_for_op (PLUS, val1))
4604 return 0;
4606 return (reg0 == reg1) && ((val1 - val0) == 4 || (val0 - val1) == 4);
4608 return 0;
4611 /* Return 1 if OP is a load multiple operation. It is known to be
4612 parallel and the first section will be tested. */
4614 load_multiple_operation (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
4616 HOST_WIDE_INT count = XVECLEN (op, 0);
4617 int dest_regno;
4618 rtx src_addr;
4619 HOST_WIDE_INT i = 1, base = 0;
4620 rtx elt;
4622 if (count <= 1
4623 || GET_CODE (XVECEXP (op, 0, 0)) != SET)
4624 return 0;
4626 /* Check to see if this might be a write-back. */
4627 if (GET_CODE (SET_SRC (elt = XVECEXP (op, 0, 0))) == PLUS)
4629 i++;
4630 base = 1;
4632 /* Now check it more carefully. */
4633 if (GET_CODE (SET_DEST (elt)) != REG
4634 || GET_CODE (XEXP (SET_SRC (elt), 0)) != REG
4635 || REGNO (XEXP (SET_SRC (elt), 0)) != REGNO (SET_DEST (elt))
4636 || GET_CODE (XEXP (SET_SRC (elt), 1)) != CONST_INT
4637 || INTVAL (XEXP (SET_SRC (elt), 1)) != (count - 1) * 4)
4638 return 0;
4641 /* Perform a quick check so we don't blow up below. */
4642 if (count <= i
4643 || GET_CODE (XVECEXP (op, 0, i - 1)) != SET
4644 || GET_CODE (SET_DEST (XVECEXP (op, 0, i - 1))) != REG
4645 || GET_CODE (SET_SRC (XVECEXP (op, 0, i - 1))) != MEM)
4646 return 0;
4648 dest_regno = REGNO (SET_DEST (XVECEXP (op, 0, i - 1)));
4649 src_addr = XEXP (SET_SRC (XVECEXP (op, 0, i - 1)), 0);
4651 for (; i < count; i++)
4653 elt = XVECEXP (op, 0, i);
4655 if (GET_CODE (elt) != SET
4656 || GET_CODE (SET_DEST (elt)) != REG
4657 || GET_MODE (SET_DEST (elt)) != SImode
4658 || REGNO (SET_DEST (elt)) != (unsigned int)(dest_regno + i - base)
4659 || GET_CODE (SET_SRC (elt)) != MEM
4660 || GET_MODE (SET_SRC (elt)) != SImode
4661 || GET_CODE (XEXP (SET_SRC (elt), 0)) != PLUS
4662 || !rtx_equal_p (XEXP (XEXP (SET_SRC (elt), 0), 0), src_addr)
4663 || GET_CODE (XEXP (XEXP (SET_SRC (elt), 0), 1)) != CONST_INT
4664 || INTVAL (XEXP (XEXP (SET_SRC (elt), 0), 1)) != (i - base) * 4)
4665 return 0;
4668 return 1;
4671 /* Return 1 if OP is a store multiple operation. It is known to be
4672 parallel and the first section will be tested. */
4674 store_multiple_operation (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
4676 HOST_WIDE_INT count = XVECLEN (op, 0);
4677 int src_regno;
4678 rtx dest_addr;
4679 HOST_WIDE_INT i = 1, base = 0;
4680 rtx elt;
4682 if (count <= 1
4683 || GET_CODE (XVECEXP (op, 0, 0)) != SET)
4684 return 0;
4686 /* Check to see if this might be a write-back. */
4687 if (GET_CODE (SET_SRC (elt = XVECEXP (op, 0, 0))) == PLUS)
4689 i++;
4690 base = 1;
4692 /* Now check it more carefully. */
4693 if (GET_CODE (SET_DEST (elt)) != REG
4694 || GET_CODE (XEXP (SET_SRC (elt), 0)) != REG
4695 || REGNO (XEXP (SET_SRC (elt), 0)) != REGNO (SET_DEST (elt))
4696 || GET_CODE (XEXP (SET_SRC (elt), 1)) != CONST_INT
4697 || INTVAL (XEXP (SET_SRC (elt), 1)) != (count - 1) * 4)
4698 return 0;
4701 /* Perform a quick check so we don't blow up below. */
4702 if (count <= i
4703 || GET_CODE (XVECEXP (op, 0, i - 1)) != SET
4704 || GET_CODE (SET_DEST (XVECEXP (op, 0, i - 1))) != MEM
4705 || GET_CODE (SET_SRC (XVECEXP (op, 0, i - 1))) != REG)
4706 return 0;
4708 src_regno = REGNO (SET_SRC (XVECEXP (op, 0, i - 1)));
4709 dest_addr = XEXP (SET_DEST (XVECEXP (op, 0, i - 1)), 0);
4711 for (; i < count; i++)
4713 elt = XVECEXP (op, 0, i);
4715 if (GET_CODE (elt) != SET
4716 || GET_CODE (SET_SRC (elt)) != REG
4717 || GET_MODE (SET_SRC (elt)) != SImode
4718 || REGNO (SET_SRC (elt)) != (unsigned int)(src_regno + i - base)
4719 || GET_CODE (SET_DEST (elt)) != MEM
4720 || GET_MODE (SET_DEST (elt)) != SImode
4721 || GET_CODE (XEXP (SET_DEST (elt), 0)) != PLUS
4722 || !rtx_equal_p (XEXP (XEXP (SET_DEST (elt), 0), 0), dest_addr)
4723 || GET_CODE (XEXP (XEXP (SET_DEST (elt), 0), 1)) != CONST_INT
4724 || INTVAL (XEXP (XEXP (SET_DEST (elt), 0), 1)) != (i - base) * 4)
4725 return 0;
4728 return 1;
4732 load_multiple_sequence (rtx *operands, int nops, int *regs, int *base,
4733 HOST_WIDE_INT *load_offset)
4735 int unsorted_regs[4];
4736 HOST_WIDE_INT unsorted_offsets[4];
4737 int order[4];
4738 int base_reg = -1;
4739 int i;
4741 /* Can only handle 2, 3, or 4 insns at present,
4742 though could be easily extended if required. */
4743 if (nops < 2 || nops > 4)
4744 abort ();
4746 /* Loop over the operands and check that the memory references are
4747 suitable (ie immediate offsets from the same base register). At
4748 the same time, extract the target register, and the memory
4749 offsets. */
4750 for (i = 0; i < nops; i++)
4752 rtx reg;
4753 rtx offset;
4755 /* Convert a subreg of a mem into the mem itself. */
4756 if (GET_CODE (operands[nops + i]) == SUBREG)
4757 operands[nops + i] = alter_subreg (operands + (nops + i));
4759 if (GET_CODE (operands[nops + i]) != MEM)
4760 abort ();
4762 /* Don't reorder volatile memory references; it doesn't seem worth
4763 looking for the case where the order is ok anyway. */
4764 if (MEM_VOLATILE_P (operands[nops + i]))
4765 return 0;
4767 offset = const0_rtx;
4769 if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG
4770 || (GET_CODE (reg) == SUBREG
4771 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
4772 || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
4773 && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0))
4774 == REG)
4775 || (GET_CODE (reg) == SUBREG
4776 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
4777 && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1))
4778 == CONST_INT)))
4780 if (i == 0)
4782 base_reg = REGNO (reg);
4783 unsorted_regs[0] = (GET_CODE (operands[i]) == REG
4784 ? REGNO (operands[i])
4785 : REGNO (SUBREG_REG (operands[i])));
4786 order[0] = 0;
4788 else
4790 if (base_reg != (int) REGNO (reg))
4791 /* Not addressed from the same base register. */
4792 return 0;
4794 unsorted_regs[i] = (GET_CODE (operands[i]) == REG
4795 ? REGNO (operands[i])
4796 : REGNO (SUBREG_REG (operands[i])));
4797 if (unsorted_regs[i] < unsorted_regs[order[0]])
4798 order[0] = i;
4801 /* If it isn't an integer register, or if it overwrites the
4802 base register but isn't the last insn in the list, then
4803 we can't do this. */
4804 if (unsorted_regs[i] < 0 || unsorted_regs[i] > 14
4805 || (i != nops - 1 && unsorted_regs[i] == base_reg))
4806 return 0;
4808 unsorted_offsets[i] = INTVAL (offset);
4810 else
4811 /* Not a suitable memory address. */
4812 return 0;
4815 /* All the useful information has now been extracted from the
4816 operands into unsorted_regs and unsorted_offsets; additionally,
4817 order[0] has been set to the lowest numbered register in the
4818 list. Sort the registers into order, and check that the memory
4819 offsets are ascending and adjacent. */
4821 for (i = 1; i < nops; i++)
4823 int j;
4825 order[i] = order[i - 1];
4826 for (j = 0; j < nops; j++)
4827 if (unsorted_regs[j] > unsorted_regs[order[i - 1]]
4828 && (order[i] == order[i - 1]
4829 || unsorted_regs[j] < unsorted_regs[order[i]]))
4830 order[i] = j;
4832 /* Have we found a suitable register? if not, one must be used more
4833 than once. */
4834 if (order[i] == order[i - 1])
4835 return 0;
4837 /* Is the memory address adjacent and ascending? */
4838 if (unsorted_offsets[order[i]] != unsorted_offsets[order[i - 1]] + 4)
4839 return 0;
4842 if (base)
4844 *base = base_reg;
4846 for (i = 0; i < nops; i++)
4847 regs[i] = unsorted_regs[order[i]];
4849 *load_offset = unsorted_offsets[order[0]];
4852 if (unsorted_offsets[order[0]] == 0)
4853 return 1; /* ldmia */
4855 if (unsorted_offsets[order[0]] == 4)
4856 return 2; /* ldmib */
4858 if (unsorted_offsets[order[nops - 1]] == 0)
4859 return 3; /* ldmda */
4861 if (unsorted_offsets[order[nops - 1]] == -4)
4862 return 4; /* ldmdb */
4864 /* For ARM8,9 & StrongARM, 2 ldr instructions are faster than an ldm
4865 if the offset isn't small enough. The reason 2 ldrs are faster
4866 is because these ARMs are able to do more than one cache access
4867 in a single cycle. The ARM9 and StrongARM have Harvard caches,
4868 whilst the ARM8 has a double bandwidth cache. This means that
4869 these cores can do both an instruction fetch and a data fetch in
4870 a single cycle, so the trick of calculating the address into a
4871 scratch register (one of the result regs) and then doing a load
4872 multiple actually becomes slower (and no smaller in code size).
4873 That is the transformation
4875 ldr rd1, [rbase + offset]
4876 ldr rd2, [rbase + offset + 4]
4880 add rd1, rbase, offset
4881 ldmia rd1, {rd1, rd2}
4883 produces worse code -- '3 cycles + any stalls on rd2' instead of
4884 '2 cycles + any stalls on rd2'. On ARMs with only one cache
4885 access per cycle, the first sequence could never complete in less
4886 than 6 cycles, whereas the ldm sequence would only take 5 and
4887 would make better use of sequential accesses if not hitting the
4888 cache.
4890 We cheat here and test 'arm_ld_sched' which we currently know to
4891 only be true for the ARM8, ARM9 and StrongARM. If this ever
4892 changes, then the test below needs to be reworked. */
4893 if (nops == 2 && arm_ld_sched)
4894 return 0;
4896 /* Can't do it without setting up the offset, only do this if it takes
4897 no more than one insn. */
4898 return (const_ok_for_arm (unsorted_offsets[order[0]])
4899 || const_ok_for_arm (-unsorted_offsets[order[0]])) ? 5 : 0;
4902 const char *
4903 emit_ldm_seq (rtx *operands, int nops)
4905 int regs[4];
4906 int base_reg;
4907 HOST_WIDE_INT offset;
4908 char buf[100];
4909 int i;
4911 switch (load_multiple_sequence (operands, nops, regs, &base_reg, &offset))
4913 case 1:
4914 strcpy (buf, "ldm%?ia\t");
4915 break;
4917 case 2:
4918 strcpy (buf, "ldm%?ib\t");
4919 break;
4921 case 3:
4922 strcpy (buf, "ldm%?da\t");
4923 break;
4925 case 4:
4926 strcpy (buf, "ldm%?db\t");
4927 break;
4929 case 5:
4930 if (offset >= 0)
4931 sprintf (buf, "add%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX,
4932 reg_names[regs[0]], REGISTER_PREFIX, reg_names[base_reg],
4933 (long) offset);
4934 else
4935 sprintf (buf, "sub%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX,
4936 reg_names[regs[0]], REGISTER_PREFIX, reg_names[base_reg],
4937 (long) -offset);
4938 output_asm_insn (buf, operands);
4939 base_reg = regs[0];
4940 strcpy (buf, "ldm%?ia\t");
4941 break;
4943 default:
4944 abort ();
4947 sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
4948 reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
4950 for (i = 1; i < nops; i++)
4951 sprintf (buf + strlen (buf), ", %s%s", REGISTER_PREFIX,
4952 reg_names[regs[i]]);
4954 strcat (buf, "}\t%@ phole ldm");
4956 output_asm_insn (buf, operands);
4957 return "";
4961 store_multiple_sequence (rtx *operands, int nops, int *regs, int *base,
4962 HOST_WIDE_INT * load_offset)
4964 int unsorted_regs[4];
4965 HOST_WIDE_INT unsorted_offsets[4];
4966 int order[4];
4967 int base_reg = -1;
4968 int i;
4970 /* Can only handle 2, 3, or 4 insns at present, though could be easily
4971 extended if required. */
4972 if (nops < 2 || nops > 4)
4973 abort ();
4975 /* Loop over the operands and check that the memory references are
4976 suitable (ie immediate offsets from the same base register). At
4977 the same time, extract the target register, and the memory
4978 offsets. */
4979 for (i = 0; i < nops; i++)
4981 rtx reg;
4982 rtx offset;
4984 /* Convert a subreg of a mem into the mem itself. */
4985 if (GET_CODE (operands[nops + i]) == SUBREG)
4986 operands[nops + i] = alter_subreg (operands + (nops + i));
4988 if (GET_CODE (operands[nops + i]) != MEM)
4989 abort ();
4991 /* Don't reorder volatile memory references; it doesn't seem worth
4992 looking for the case where the order is ok anyway. */
4993 if (MEM_VOLATILE_P (operands[nops + i]))
4994 return 0;
4996 offset = const0_rtx;
4998 if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG
4999 || (GET_CODE (reg) == SUBREG
5000 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5001 || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
5002 && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0))
5003 == REG)
5004 || (GET_CODE (reg) == SUBREG
5005 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5006 && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1))
5007 == CONST_INT)))
5009 if (i == 0)
5011 base_reg = REGNO (reg);
5012 unsorted_regs[0] = (GET_CODE (operands[i]) == REG
5013 ? REGNO (operands[i])
5014 : REGNO (SUBREG_REG (operands[i])));
5015 order[0] = 0;
5017 else
5019 if (base_reg != (int) REGNO (reg))
5020 /* Not addressed from the same base register. */
5021 return 0;
5023 unsorted_regs[i] = (GET_CODE (operands[i]) == REG
5024 ? REGNO (operands[i])
5025 : REGNO (SUBREG_REG (operands[i])));
5026 if (unsorted_regs[i] < unsorted_regs[order[0]])
5027 order[0] = i;
5030 /* If it isn't an integer register, then we can't do this. */
5031 if (unsorted_regs[i] < 0 || unsorted_regs[i] > 14)
5032 return 0;
5034 unsorted_offsets[i] = INTVAL (offset);
5036 else
5037 /* Not a suitable memory address. */
5038 return 0;
5041 /* All the useful information has now been extracted from the
5042 operands into unsorted_regs and unsorted_offsets; additionally,
5043 order[0] has been set to the lowest numbered register in the
5044 list. Sort the registers into order, and check that the memory
5045 offsets are ascending and adjacent. */
5047 for (i = 1; i < nops; i++)
5049 int j;
5051 order[i] = order[i - 1];
5052 for (j = 0; j < nops; j++)
5053 if (unsorted_regs[j] > unsorted_regs[order[i - 1]]
5054 && (order[i] == order[i - 1]
5055 || unsorted_regs[j] < unsorted_regs[order[i]]))
5056 order[i] = j;
5058 /* Have we found a suitable register? if not, one must be used more
5059 than once. */
5060 if (order[i] == order[i - 1])
5061 return 0;
5063 /* Is the memory address adjacent and ascending? */
5064 if (unsorted_offsets[order[i]] != unsorted_offsets[order[i - 1]] + 4)
5065 return 0;
5068 if (base)
5070 *base = base_reg;
5072 for (i = 0; i < nops; i++)
5073 regs[i] = unsorted_regs[order[i]];
5075 *load_offset = unsorted_offsets[order[0]];
5078 if (unsorted_offsets[order[0]] == 0)
5079 return 1; /* stmia */
5081 if (unsorted_offsets[order[0]] == 4)
5082 return 2; /* stmib */
5084 if (unsorted_offsets[order[nops - 1]] == 0)
5085 return 3; /* stmda */
5087 if (unsorted_offsets[order[nops - 1]] == -4)
5088 return 4; /* stmdb */
5090 return 0;
5093 const char *
5094 emit_stm_seq (rtx *operands, int nops)
5096 int regs[4];
5097 int base_reg;
5098 HOST_WIDE_INT offset;
5099 char buf[100];
5100 int i;
5102 switch (store_multiple_sequence (operands, nops, regs, &base_reg, &offset))
5104 case 1:
5105 strcpy (buf, "stm%?ia\t");
5106 break;
5108 case 2:
5109 strcpy (buf, "stm%?ib\t");
5110 break;
5112 case 3:
5113 strcpy (buf, "stm%?da\t");
5114 break;
5116 case 4:
5117 strcpy (buf, "stm%?db\t");
5118 break;
5120 default:
5121 abort ();
5124 sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
5125 reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
5127 for (i = 1; i < nops; i++)
5128 sprintf (buf + strlen (buf), ", %s%s", REGISTER_PREFIX,
5129 reg_names[regs[i]]);
5131 strcat (buf, "}\t%@ phole stm");
5133 output_asm_insn (buf, operands);
5134 return "";
5138 multi_register_push (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
5140 if (GET_CODE (op) != PARALLEL
5141 || (GET_CODE (XVECEXP (op, 0, 0)) != SET)
5142 || (GET_CODE (SET_SRC (XVECEXP (op, 0, 0))) != UNSPEC)
5143 || (XINT (SET_SRC (XVECEXP (op, 0, 0)), 1) != UNSPEC_PUSH_MULT))
5144 return 0;
5146 return 1;
5149 /* Routines for use in generating RTL. */
5152 arm_gen_load_multiple (int base_regno, int count, rtx from, int up,
5153 int write_back, int unchanging_p, int in_struct_p,
5154 int scalar_p)
5156 int i = 0, j;
5157 rtx result;
5158 int sign = up ? 1 : -1;
5159 rtx mem;
5161 /* XScale has load-store double instructions, but they have stricter
5162 alignment requirements than load-store multiple, so we can not
5163 use them.
5165 For XScale ldm requires 2 + NREGS cycles to complete and blocks
5166 the pipeline until completion.
5168 NREGS CYCLES
5174 An ldr instruction takes 1-3 cycles, but does not block the
5175 pipeline.
5177 NREGS CYCLES
5178 1 1-3
5179 2 2-6
5180 3 3-9
5181 4 4-12
5183 Best case ldr will always win. However, the more ldr instructions
5184 we issue, the less likely we are to be able to schedule them well.
5185 Using ldr instructions also increases code size.
5187 As a compromise, we use ldr for counts of 1 or 2 regs, and ldm
5188 for counts of 3 or 4 regs. */
5189 if (arm_tune_xscale && count <= 2 && ! optimize_size)
5191 rtx seq;
5193 start_sequence ();
5195 for (i = 0; i < count; i++)
5197 mem = gen_rtx_MEM (SImode, plus_constant (from, i * 4 * sign));
5198 RTX_UNCHANGING_P (mem) = unchanging_p;
5199 MEM_IN_STRUCT_P (mem) = in_struct_p;
5200 MEM_SCALAR_P (mem) = scalar_p;
5201 emit_move_insn (gen_rtx_REG (SImode, base_regno + i), mem);
5204 if (write_back)
5205 emit_move_insn (from, plus_constant (from, count * 4 * sign));
5207 seq = get_insns ();
5208 end_sequence ();
5210 return seq;
5213 result = gen_rtx_PARALLEL (VOIDmode,
5214 rtvec_alloc (count + (write_back ? 1 : 0)));
5215 if (write_back)
5217 XVECEXP (result, 0, 0)
5218 = gen_rtx_SET (GET_MODE (from), from,
5219 plus_constant (from, count * 4 * sign));
5220 i = 1;
5221 count++;
5224 for (j = 0; i < count; i++, j++)
5226 mem = gen_rtx_MEM (SImode, plus_constant (from, j * 4 * sign));
5227 RTX_UNCHANGING_P (mem) = unchanging_p;
5228 MEM_IN_STRUCT_P (mem) = in_struct_p;
5229 MEM_SCALAR_P (mem) = scalar_p;
5230 XVECEXP (result, 0, i)
5231 = gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, base_regno + j), mem);
5234 return result;
5238 arm_gen_store_multiple (int base_regno, int count, rtx to, int up,
5239 int write_back, int unchanging_p, int in_struct_p,
5240 int scalar_p)
5242 int i = 0, j;
5243 rtx result;
5244 int sign = up ? 1 : -1;
5245 rtx mem;
5247 /* See arm_gen_load_multiple for discussion of
5248 the pros/cons of ldm/stm usage for XScale. */
5249 if (arm_tune_xscale && count <= 2 && ! optimize_size)
5251 rtx seq;
5253 start_sequence ();
5255 for (i = 0; i < count; i++)
5257 mem = gen_rtx_MEM (SImode, plus_constant (to, i * 4 * sign));
5258 RTX_UNCHANGING_P (mem) = unchanging_p;
5259 MEM_IN_STRUCT_P (mem) = in_struct_p;
5260 MEM_SCALAR_P (mem) = scalar_p;
5261 emit_move_insn (mem, gen_rtx_REG (SImode, base_regno + i));
5264 if (write_back)
5265 emit_move_insn (to, plus_constant (to, count * 4 * sign));
5267 seq = get_insns ();
5268 end_sequence ();
5270 return seq;
5273 result = gen_rtx_PARALLEL (VOIDmode,
5274 rtvec_alloc (count + (write_back ? 1 : 0)));
5275 if (write_back)
5277 XVECEXP (result, 0, 0)
5278 = gen_rtx_SET (GET_MODE (to), to,
5279 plus_constant (to, count * 4 * sign));
5280 i = 1;
5281 count++;
5284 for (j = 0; i < count; i++, j++)
5286 mem = gen_rtx_MEM (SImode, plus_constant (to, j * 4 * sign));
5287 RTX_UNCHANGING_P (mem) = unchanging_p;
5288 MEM_IN_STRUCT_P (mem) = in_struct_p;
5289 MEM_SCALAR_P (mem) = scalar_p;
5291 XVECEXP (result, 0, i)
5292 = gen_rtx_SET (VOIDmode, mem, gen_rtx_REG (SImode, base_regno + j));
5295 return result;
5299 arm_gen_movstrqi (rtx *operands)
5301 HOST_WIDE_INT in_words_to_go, out_words_to_go, last_bytes;
5302 int i;
5303 rtx src, dst;
5304 rtx st_src, st_dst, fin_src, fin_dst;
5305 rtx part_bytes_reg = NULL;
5306 rtx mem;
5307 int dst_unchanging_p, dst_in_struct_p, src_unchanging_p, src_in_struct_p;
5308 int dst_scalar_p, src_scalar_p;
5310 if (GET_CODE (operands[2]) != CONST_INT
5311 || GET_CODE (operands[3]) != CONST_INT
5312 || INTVAL (operands[2]) > 64
5313 || INTVAL (operands[3]) & 3)
5314 return 0;
5316 st_dst = XEXP (operands[0], 0);
5317 st_src = XEXP (operands[1], 0);
5319 dst_unchanging_p = RTX_UNCHANGING_P (operands[0]);
5320 dst_in_struct_p = MEM_IN_STRUCT_P (operands[0]);
5321 dst_scalar_p = MEM_SCALAR_P (operands[0]);
5322 src_unchanging_p = RTX_UNCHANGING_P (operands[1]);
5323 src_in_struct_p = MEM_IN_STRUCT_P (operands[1]);
5324 src_scalar_p = MEM_SCALAR_P (operands[1]);
5326 fin_dst = dst = copy_to_mode_reg (SImode, st_dst);
5327 fin_src = src = copy_to_mode_reg (SImode, st_src);
5329 in_words_to_go = ARM_NUM_INTS (INTVAL (operands[2]));
5330 out_words_to_go = INTVAL (operands[2]) / 4;
5331 last_bytes = INTVAL (operands[2]) & 3;
5333 if (out_words_to_go != in_words_to_go && ((in_words_to_go - 1) & 3) != 0)
5334 part_bytes_reg = gen_rtx_REG (SImode, (in_words_to_go - 1) & 3);
5336 for (i = 0; in_words_to_go >= 2; i+=4)
5338 if (in_words_to_go > 4)
5339 emit_insn (arm_gen_load_multiple (0, 4, src, TRUE, TRUE,
5340 src_unchanging_p,
5341 src_in_struct_p,
5342 src_scalar_p));
5343 else
5344 emit_insn (arm_gen_load_multiple (0, in_words_to_go, src, TRUE,
5345 FALSE, src_unchanging_p,
5346 src_in_struct_p, src_scalar_p));
5348 if (out_words_to_go)
5350 if (out_words_to_go > 4)
5351 emit_insn (arm_gen_store_multiple (0, 4, dst, TRUE, TRUE,
5352 dst_unchanging_p,
5353 dst_in_struct_p,
5354 dst_scalar_p));
5355 else if (out_words_to_go != 1)
5356 emit_insn (arm_gen_store_multiple (0, out_words_to_go,
5357 dst, TRUE,
5358 (last_bytes == 0
5359 ? FALSE : TRUE),
5360 dst_unchanging_p,
5361 dst_in_struct_p,
5362 dst_scalar_p));
5363 else
5365 mem = gen_rtx_MEM (SImode, dst);
5366 RTX_UNCHANGING_P (mem) = dst_unchanging_p;
5367 MEM_IN_STRUCT_P (mem) = dst_in_struct_p;
5368 MEM_SCALAR_P (mem) = dst_scalar_p;
5369 emit_move_insn (mem, gen_rtx_REG (SImode, 0));
5370 if (last_bytes != 0)
5371 emit_insn (gen_addsi3 (dst, dst, GEN_INT (4)));
5375 in_words_to_go -= in_words_to_go < 4 ? in_words_to_go : 4;
5376 out_words_to_go -= out_words_to_go < 4 ? out_words_to_go : 4;
5379 /* OUT_WORDS_TO_GO will be zero here if there are byte stores to do. */
5380 if (out_words_to_go)
5382 rtx sreg;
5384 mem = gen_rtx_MEM (SImode, src);
5385 RTX_UNCHANGING_P (mem) = src_unchanging_p;
5386 MEM_IN_STRUCT_P (mem) = src_in_struct_p;
5387 MEM_SCALAR_P (mem) = src_scalar_p;
5388 emit_move_insn (sreg = gen_reg_rtx (SImode), mem);
5389 emit_move_insn (fin_src = gen_reg_rtx (SImode), plus_constant (src, 4));
5391 mem = gen_rtx_MEM (SImode, dst);
5392 RTX_UNCHANGING_P (mem) = dst_unchanging_p;
5393 MEM_IN_STRUCT_P (mem) = dst_in_struct_p;
5394 MEM_SCALAR_P (mem) = dst_scalar_p;
5395 emit_move_insn (mem, sreg);
5396 emit_move_insn (fin_dst = gen_reg_rtx (SImode), plus_constant (dst, 4));
5397 in_words_to_go--;
5399 if (in_words_to_go) /* Sanity check */
5400 abort ();
5403 if (in_words_to_go)
5405 if (in_words_to_go < 0)
5406 abort ();
5408 mem = gen_rtx_MEM (SImode, src);
5409 RTX_UNCHANGING_P (mem) = src_unchanging_p;
5410 MEM_IN_STRUCT_P (mem) = src_in_struct_p;
5411 MEM_SCALAR_P (mem) = src_scalar_p;
5412 part_bytes_reg = copy_to_mode_reg (SImode, mem);
5415 if (last_bytes && part_bytes_reg == NULL)
5416 abort ();
5418 if (BYTES_BIG_ENDIAN && last_bytes)
5420 rtx tmp = gen_reg_rtx (SImode);
5422 /* The bytes we want are in the top end of the word. */
5423 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg,
5424 GEN_INT (8 * (4 - last_bytes))));
5425 part_bytes_reg = tmp;
5427 while (last_bytes)
5429 mem = gen_rtx_MEM (QImode, plus_constant (dst, last_bytes - 1));
5430 RTX_UNCHANGING_P (mem) = dst_unchanging_p;
5431 MEM_IN_STRUCT_P (mem) = dst_in_struct_p;
5432 MEM_SCALAR_P (mem) = dst_scalar_p;
5433 emit_move_insn (mem, gen_lowpart (QImode, part_bytes_reg));
5435 if (--last_bytes)
5437 tmp = gen_reg_rtx (SImode);
5438 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (8)));
5439 part_bytes_reg = tmp;
5444 else
5446 if (last_bytes > 1)
5448 mem = gen_rtx_MEM (HImode, dst);
5449 RTX_UNCHANGING_P (mem) = dst_unchanging_p;
5450 MEM_IN_STRUCT_P (mem) = dst_in_struct_p;
5451 MEM_SCALAR_P (mem) = dst_scalar_p;
5452 emit_move_insn (mem, gen_lowpart (HImode, part_bytes_reg));
5453 last_bytes -= 2;
5454 if (last_bytes)
5456 rtx tmp = gen_reg_rtx (SImode);
5458 emit_insn (gen_addsi3 (dst, dst, GEN_INT (2)));
5459 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (16)));
5460 part_bytes_reg = tmp;
5464 if (last_bytes)
5466 mem = gen_rtx_MEM (QImode, dst);
5467 RTX_UNCHANGING_P (mem) = dst_unchanging_p;
5468 MEM_IN_STRUCT_P (mem) = dst_in_struct_p;
5469 MEM_SCALAR_P (mem) = dst_scalar_p;
5470 emit_move_insn (mem, gen_lowpart (QImode, part_bytes_reg));
5474 return 1;
5477 /* Generate a memory reference for a half word, such that it will be loaded
5478 into the top 16 bits of the word. We can assume that the address is
5479 known to be alignable and of the form reg, or plus (reg, const). */
5482 arm_gen_rotated_half_load (rtx memref)
5484 HOST_WIDE_INT offset = 0;
5485 rtx base = XEXP (memref, 0);
5487 if (GET_CODE (base) == PLUS)
5489 offset = INTVAL (XEXP (base, 1));
5490 base = XEXP (base, 0);
5493 /* If we aren't allowed to generate unaligned addresses, then fail. */
5494 if (TARGET_MMU_TRAPS
5495 && ((BYTES_BIG_ENDIAN ? 1 : 0) ^ ((offset & 2) == 0)))
5496 return NULL;
5498 base = gen_rtx_MEM (SImode, plus_constant (base, offset & ~2));
5500 if ((BYTES_BIG_ENDIAN ? 1 : 0) ^ ((offset & 2) == 2))
5501 return base;
5503 return gen_rtx_ROTATE (SImode, base, GEN_INT (16));
5506 /* Select a dominance comparison mode if possible for a test of the general
5507 form (OP (COND_OR (X) (Y)) (const_int 0)). We support three forms.
5508 COND_OR == DOM_CC_X_AND_Y => (X && Y)
5509 COND_OR == DOM_CC_NX_OR_Y => ((! X) || Y)
5510 COND_OR == DOM_CC_X_OR_Y => (X || Y)
5511 In all cases OP will be either EQ or NE, but we don't need to know which
5512 here. If we are unable to support a dominance comparison we return
5513 CC mode. This will then fail to match for the RTL expressions that
5514 generate this call. */
5515 enum machine_mode
5516 arm_select_dominance_cc_mode (rtx x, rtx y, HOST_WIDE_INT cond_or)
5518 enum rtx_code cond1, cond2;
5519 int swapped = 0;
5521 /* Currently we will probably get the wrong result if the individual
5522 comparisons are not simple. This also ensures that it is safe to
5523 reverse a comparison if necessary. */
5524 if ((arm_select_cc_mode (cond1 = GET_CODE (x), XEXP (x, 0), XEXP (x, 1))
5525 != CCmode)
5526 || (arm_select_cc_mode (cond2 = GET_CODE (y), XEXP (y, 0), XEXP (y, 1))
5527 != CCmode))
5528 return CCmode;
5530 /* The if_then_else variant of this tests the second condition if the
5531 first passes, but is true if the first fails. Reverse the first
5532 condition to get a true "inclusive-or" expression. */
5533 if (cond_or == DOM_CC_NX_OR_Y)
5534 cond1 = reverse_condition (cond1);
5536 /* If the comparisons are not equal, and one doesn't dominate the other,
5537 then we can't do this. */
5538 if (cond1 != cond2
5539 && !comparison_dominates_p (cond1, cond2)
5540 && (swapped = 1, !comparison_dominates_p (cond2, cond1)))
5541 return CCmode;
5543 if (swapped)
5545 enum rtx_code temp = cond1;
5546 cond1 = cond2;
5547 cond2 = temp;
5550 switch (cond1)
5552 case EQ:
5553 if (cond2 == EQ || cond_or == DOM_CC_X_AND_Y)
5554 return CC_DEQmode;
5556 switch (cond2)
5558 case LE: return CC_DLEmode;
5559 case LEU: return CC_DLEUmode;
5560 case GE: return CC_DGEmode;
5561 case GEU: return CC_DGEUmode;
5562 default: break;
5565 break;
5567 case LT:
5568 if (cond2 == LT || cond_or == DOM_CC_X_AND_Y)
5569 return CC_DLTmode;
5570 if (cond2 == LE)
5571 return CC_DLEmode;
5572 if (cond2 == NE)
5573 return CC_DNEmode;
5574 break;
5576 case GT:
5577 if (cond2 == GT || cond_or == DOM_CC_X_AND_Y)
5578 return CC_DGTmode;
5579 if (cond2 == GE)
5580 return CC_DGEmode;
5581 if (cond2 == NE)
5582 return CC_DNEmode;
5583 break;
5585 case LTU:
5586 if (cond2 == LTU || cond_or == DOM_CC_X_AND_Y)
5587 return CC_DLTUmode;
5588 if (cond2 == LEU)
5589 return CC_DLEUmode;
5590 if (cond2 == NE)
5591 return CC_DNEmode;
5592 break;
5594 case GTU:
5595 if (cond2 == GTU || cond_or == DOM_CC_X_AND_Y)
5596 return CC_DGTUmode;
5597 if (cond2 == GEU)
5598 return CC_DGEUmode;
5599 if (cond2 == NE)
5600 return CC_DNEmode;
5601 break;
5603 /* The remaining cases only occur when both comparisons are the
5604 same. */
5605 case NE:
5606 return CC_DNEmode;
5608 case LE:
5609 return CC_DLEmode;
5611 case GE:
5612 return CC_DGEmode;
5614 case LEU:
5615 return CC_DLEUmode;
5617 case GEU:
5618 return CC_DGEUmode;
5620 default:
5621 break;
5624 abort ();
5627 enum machine_mode
5628 arm_select_cc_mode (enum rtx_code op, rtx x, rtx y)
5630 /* All floating point compares return CCFP if it is an equality
5631 comparison, and CCFPE otherwise. */
5632 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
5634 switch (op)
5636 case EQ:
5637 case NE:
5638 case UNORDERED:
5639 case ORDERED:
5640 case UNLT:
5641 case UNLE:
5642 case UNGT:
5643 case UNGE:
5644 case UNEQ:
5645 case LTGT:
5646 return CCFPmode;
5648 case LT:
5649 case LE:
5650 case GT:
5651 case GE:
5652 if (TARGET_CIRRUS)
5653 return CCFPmode;
5654 return CCFPEmode;
5656 default:
5657 abort ();
5661 /* A compare with a shifted operand. Because of canonicalization, the
5662 comparison will have to be swapped when we emit the assembler. */
5663 if (GET_MODE (y) == SImode && GET_CODE (y) == REG
5664 && (GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
5665 || GET_CODE (x) == LSHIFTRT || GET_CODE (x) == ROTATE
5666 || GET_CODE (x) == ROTATERT))
5667 return CC_SWPmode;
5669 /* This is a special case that is used by combine to allow a
5670 comparison of a shifted byte load to be split into a zero-extend
5671 followed by a comparison of the shifted integer (only valid for
5672 equalities and unsigned inequalities). */
5673 if (GET_MODE (x) == SImode
5674 && GET_CODE (x) == ASHIFT
5675 && GET_CODE (XEXP (x, 1)) == CONST_INT && INTVAL (XEXP (x, 1)) == 24
5676 && GET_CODE (XEXP (x, 0)) == SUBREG
5677 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == MEM
5678 && GET_MODE (SUBREG_REG (XEXP (x, 0))) == QImode
5679 && (op == EQ || op == NE
5680 || op == GEU || op == GTU || op == LTU || op == LEU)
5681 && GET_CODE (y) == CONST_INT)
5682 return CC_Zmode;
5684 /* A construct for a conditional compare, if the false arm contains
5685 0, then both conditions must be true, otherwise either condition
5686 must be true. Not all conditions are possible, so CCmode is
5687 returned if it can't be done. */
5688 if (GET_CODE (x) == IF_THEN_ELSE
5689 && (XEXP (x, 2) == const0_rtx
5690 || XEXP (x, 2) == const1_rtx)
5691 && GET_RTX_CLASS (GET_CODE (XEXP (x, 0))) == '<'
5692 && GET_RTX_CLASS (GET_CODE (XEXP (x, 1))) == '<')
5693 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
5694 INTVAL (XEXP (x, 2)));
5696 /* Alternate canonicalizations of the above. These are somewhat cleaner. */
5697 if (GET_CODE (x) == AND
5698 && GET_RTX_CLASS (GET_CODE (XEXP (x, 0))) == '<'
5699 && GET_RTX_CLASS (GET_CODE (XEXP (x, 1))) == '<')
5700 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
5701 DOM_CC_X_AND_Y);
5703 if (GET_CODE (x) == IOR
5704 && GET_RTX_CLASS (GET_CODE (XEXP (x, 0))) == '<'
5705 && GET_RTX_CLASS (GET_CODE (XEXP (x, 1))) == '<')
5706 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
5707 DOM_CC_X_OR_Y);
5709 /* An operation (on Thumb) where we want to test for a single bit.
5710 This is done by shifting that bit up into the top bit of a
5711 scratch register; we can then branch on the sign bit. */
5712 if (TARGET_THUMB
5713 && GET_MODE (x) == SImode
5714 && (op == EQ || op == NE)
5715 && (GET_CODE (x) == ZERO_EXTRACT))
5716 return CC_Nmode;
5718 /* An operation that sets the condition codes as a side-effect, the
5719 V flag is not set correctly, so we can only use comparisons where
5720 this doesn't matter. (For LT and GE we can use "mi" and "pl"
5721 instead.) */
5722 if (GET_MODE (x) == SImode
5723 && y == const0_rtx
5724 && (op == EQ || op == NE || op == LT || op == GE)
5725 && (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
5726 || GET_CODE (x) == AND || GET_CODE (x) == IOR
5727 || GET_CODE (x) == XOR || GET_CODE (x) == MULT
5728 || GET_CODE (x) == NOT || GET_CODE (x) == NEG
5729 || GET_CODE (x) == LSHIFTRT
5730 || GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
5731 || GET_CODE (x) == ROTATERT
5732 || (TARGET_ARM && GET_CODE (x) == ZERO_EXTRACT)))
5733 return CC_NOOVmode;
5735 if (GET_MODE (x) == QImode && (op == EQ || op == NE))
5736 return CC_Zmode;
5738 if (GET_MODE (x) == SImode && (op == LTU || op == GEU)
5739 && GET_CODE (x) == PLUS
5740 && (rtx_equal_p (XEXP (x, 0), y) || rtx_equal_p (XEXP (x, 1), y)))
5741 return CC_Cmode;
5743 return CCmode;
5746 /* X and Y are two things to compare using CODE. Emit the compare insn and
5747 return the rtx for register 0 in the proper mode. FP means this is a
5748 floating point compare: I don't think that it is needed on the arm. */
5750 arm_gen_compare_reg (enum rtx_code code, rtx x, rtx y)
5752 enum machine_mode mode = SELECT_CC_MODE (code, x, y);
5753 rtx cc_reg = gen_rtx_REG (mode, CC_REGNUM);
5755 emit_insn (gen_rtx_SET (VOIDmode, cc_reg,
5756 gen_rtx_COMPARE (mode, x, y)));
5758 return cc_reg;
5761 /* Generate a sequence of insns that will generate the correct return
5762 address mask depending on the physical architecture that the program
5763 is running on. */
5765 arm_gen_return_addr_mask (void)
5767 rtx reg = gen_reg_rtx (Pmode);
5769 emit_insn (gen_return_addr_mask (reg));
5770 return reg;
5773 void
5774 arm_reload_in_hi (rtx *operands)
5776 rtx ref = operands[1];
5777 rtx base, scratch;
5778 HOST_WIDE_INT offset = 0;
5780 if (GET_CODE (ref) == SUBREG)
5782 offset = SUBREG_BYTE (ref);
5783 ref = SUBREG_REG (ref);
5786 if (GET_CODE (ref) == REG)
5788 /* We have a pseudo which has been spilt onto the stack; there
5789 are two cases here: the first where there is a simple
5790 stack-slot replacement and a second where the stack-slot is
5791 out of range, or is used as a subreg. */
5792 if (reg_equiv_mem[REGNO (ref)])
5794 ref = reg_equiv_mem[REGNO (ref)];
5795 base = find_replacement (&XEXP (ref, 0));
5797 else
5798 /* The slot is out of range, or was dressed up in a SUBREG. */
5799 base = reg_equiv_address[REGNO (ref)];
5801 else
5802 base = find_replacement (&XEXP (ref, 0));
5804 /* Handle the case where the address is too complex to be offset by 1. */
5805 if (GET_CODE (base) == MINUS
5806 || (GET_CODE (base) == PLUS && GET_CODE (XEXP (base, 1)) != CONST_INT))
5808 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
5810 emit_insn (gen_rtx_SET (VOIDmode, base_plus, base));
5811 base = base_plus;
5813 else if (GET_CODE (base) == PLUS)
5815 /* The addend must be CONST_INT, or we would have dealt with it above. */
5816 HOST_WIDE_INT hi, lo;
5818 offset += INTVAL (XEXP (base, 1));
5819 base = XEXP (base, 0);
5821 /* Rework the address into a legal sequence of insns. */
5822 /* Valid range for lo is -4095 -> 4095 */
5823 lo = (offset >= 0
5824 ? (offset & 0xfff)
5825 : -((-offset) & 0xfff));
5827 /* Corner case, if lo is the max offset then we would be out of range
5828 once we have added the additional 1 below, so bump the msb into the
5829 pre-loading insn(s). */
5830 if (lo == 4095)
5831 lo &= 0x7ff;
5833 hi = ((((offset - lo) & (HOST_WIDE_INT) 0xffffffff)
5834 ^ (HOST_WIDE_INT) 0x80000000)
5835 - (HOST_WIDE_INT) 0x80000000);
5837 if (hi + lo != offset)
5838 abort ();
5840 if (hi != 0)
5842 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
5844 /* Get the base address; addsi3 knows how to handle constants
5845 that require more than one insn. */
5846 emit_insn (gen_addsi3 (base_plus, base, GEN_INT (hi)));
5847 base = base_plus;
5848 offset = lo;
5852 /* Operands[2] may overlap operands[0] (though it won't overlap
5853 operands[1]), that's why we asked for a DImode reg -- so we can
5854 use the bit that does not overlap. */
5855 if (REGNO (operands[2]) == REGNO (operands[0]))
5856 scratch = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
5857 else
5858 scratch = gen_rtx_REG (SImode, REGNO (operands[2]));
5860 emit_insn (gen_zero_extendqisi2 (scratch,
5861 gen_rtx_MEM (QImode,
5862 plus_constant (base,
5863 offset))));
5864 emit_insn (gen_zero_extendqisi2 (gen_rtx_SUBREG (SImode, operands[0], 0),
5865 gen_rtx_MEM (QImode,
5866 plus_constant (base,
5867 offset + 1))));
5868 if (!BYTES_BIG_ENDIAN)
5869 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_SUBREG (SImode, operands[0], 0),
5870 gen_rtx_IOR (SImode,
5871 gen_rtx_ASHIFT
5872 (SImode,
5873 gen_rtx_SUBREG (SImode, operands[0], 0),
5874 GEN_INT (8)),
5875 scratch)));
5876 else
5877 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_SUBREG (SImode, operands[0], 0),
5878 gen_rtx_IOR (SImode,
5879 gen_rtx_ASHIFT (SImode, scratch,
5880 GEN_INT (8)),
5881 gen_rtx_SUBREG (SImode, operands[0],
5882 0))));
5885 /* Handle storing a half-word to memory during reload by synthesizing as two
5886 byte stores. Take care not to clobber the input values until after we
5887 have moved them somewhere safe. This code assumes that if the DImode
5888 scratch in operands[2] overlaps either the input value or output address
5889 in some way, then that value must die in this insn (we absolutely need
5890 two scratch registers for some corner cases). */
5891 void
5892 arm_reload_out_hi (rtx *operands)
5894 rtx ref = operands[0];
5895 rtx outval = operands[1];
5896 rtx base, scratch;
5897 HOST_WIDE_INT offset = 0;
5899 if (GET_CODE (ref) == SUBREG)
5901 offset = SUBREG_BYTE (ref);
5902 ref = SUBREG_REG (ref);
5905 if (GET_CODE (ref) == REG)
5907 /* We have a pseudo which has been spilt onto the stack; there
5908 are two cases here: the first where there is a simple
5909 stack-slot replacement and a second where the stack-slot is
5910 out of range, or is used as a subreg. */
5911 if (reg_equiv_mem[REGNO (ref)])
5913 ref = reg_equiv_mem[REGNO (ref)];
5914 base = find_replacement (&XEXP (ref, 0));
5916 else
5917 /* The slot is out of range, or was dressed up in a SUBREG. */
5918 base = reg_equiv_address[REGNO (ref)];
5920 else
5921 base = find_replacement (&XEXP (ref, 0));
5923 scratch = gen_rtx_REG (SImode, REGNO (operands[2]));
5925 /* Handle the case where the address is too complex to be offset by 1. */
5926 if (GET_CODE (base) == MINUS
5927 || (GET_CODE (base) == PLUS && GET_CODE (XEXP (base, 1)) != CONST_INT))
5929 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
5931 /* Be careful not to destroy OUTVAL. */
5932 if (reg_overlap_mentioned_p (base_plus, outval))
5934 /* Updating base_plus might destroy outval, see if we can
5935 swap the scratch and base_plus. */
5936 if (!reg_overlap_mentioned_p (scratch, outval))
5938 rtx tmp = scratch;
5939 scratch = base_plus;
5940 base_plus = tmp;
5942 else
5944 rtx scratch_hi = gen_rtx_REG (HImode, REGNO (operands[2]));
5946 /* Be conservative and copy OUTVAL into the scratch now,
5947 this should only be necessary if outval is a subreg
5948 of something larger than a word. */
5949 /* XXX Might this clobber base? I can't see how it can,
5950 since scratch is known to overlap with OUTVAL, and
5951 must be wider than a word. */
5952 emit_insn (gen_movhi (scratch_hi, outval));
5953 outval = scratch_hi;
5957 emit_insn (gen_rtx_SET (VOIDmode, base_plus, base));
5958 base = base_plus;
5960 else if (GET_CODE (base) == PLUS)
5962 /* The addend must be CONST_INT, or we would have dealt with it above. */
5963 HOST_WIDE_INT hi, lo;
5965 offset += INTVAL (XEXP (base, 1));
5966 base = XEXP (base, 0);
5968 /* Rework the address into a legal sequence of insns. */
5969 /* Valid range for lo is -4095 -> 4095 */
5970 lo = (offset >= 0
5971 ? (offset & 0xfff)
5972 : -((-offset) & 0xfff));
5974 /* Corner case, if lo is the max offset then we would be out of range
5975 once we have added the additional 1 below, so bump the msb into the
5976 pre-loading insn(s). */
5977 if (lo == 4095)
5978 lo &= 0x7ff;
5980 hi = ((((offset - lo) & (HOST_WIDE_INT) 0xffffffff)
5981 ^ (HOST_WIDE_INT) 0x80000000)
5982 - (HOST_WIDE_INT) 0x80000000);
5984 if (hi + lo != offset)
5985 abort ();
5987 if (hi != 0)
5989 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
5991 /* Be careful not to destroy OUTVAL. */
5992 if (reg_overlap_mentioned_p (base_plus, outval))
5994 /* Updating base_plus might destroy outval, see if we
5995 can swap the scratch and base_plus. */
5996 if (!reg_overlap_mentioned_p (scratch, outval))
5998 rtx tmp = scratch;
5999 scratch = base_plus;
6000 base_plus = tmp;
6002 else
6004 rtx scratch_hi = gen_rtx_REG (HImode, REGNO (operands[2]));
6006 /* Be conservative and copy outval into scratch now,
6007 this should only be necessary if outval is a
6008 subreg of something larger than a word. */
6009 /* XXX Might this clobber base? I can't see how it
6010 can, since scratch is known to overlap with
6011 outval. */
6012 emit_insn (gen_movhi (scratch_hi, outval));
6013 outval = scratch_hi;
6017 /* Get the base address; addsi3 knows how to handle constants
6018 that require more than one insn. */
6019 emit_insn (gen_addsi3 (base_plus, base, GEN_INT (hi)));
6020 base = base_plus;
6021 offset = lo;
6025 if (BYTES_BIG_ENDIAN)
6027 emit_insn (gen_movqi (gen_rtx_MEM (QImode,
6028 plus_constant (base, offset + 1)),
6029 gen_lowpart (QImode, outval)));
6030 emit_insn (gen_lshrsi3 (scratch,
6031 gen_rtx_SUBREG (SImode, outval, 0),
6032 GEN_INT (8)));
6033 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (base, offset)),
6034 gen_lowpart (QImode, scratch)));
6036 else
6038 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (base, offset)),
6039 gen_lowpart (QImode, outval)));
6040 emit_insn (gen_lshrsi3 (scratch,
6041 gen_rtx_SUBREG (SImode, outval, 0),
6042 GEN_INT (8)));
6043 emit_insn (gen_movqi (gen_rtx_MEM (QImode,
6044 plus_constant (base, offset + 1)),
6045 gen_lowpart (QImode, scratch)));
6049 /* Print a symbolic form of X to the debug file, F. */
6050 static void
6051 arm_print_value (FILE *f, rtx x)
6053 switch (GET_CODE (x))
6055 case CONST_INT:
6056 fprintf (f, HOST_WIDE_INT_PRINT_HEX, INTVAL (x));
6057 return;
6059 case CONST_DOUBLE:
6060 fprintf (f, "<0x%lx,0x%lx>", (long)XWINT (x, 2), (long)XWINT (x, 3));
6061 return;
6063 case CONST_VECTOR:
6065 int i;
6067 fprintf (f, "<");
6068 for (i = 0; i < CONST_VECTOR_NUNITS (x); i++)
6070 fprintf (f, HOST_WIDE_INT_PRINT_HEX, INTVAL (CONST_VECTOR_ELT (x, i)));
6071 if (i < (CONST_VECTOR_NUNITS (x) - 1))
6072 fputc (',', f);
6074 fprintf (f, ">");
6076 return;
6078 case CONST_STRING:
6079 fprintf (f, "\"%s\"", XSTR (x, 0));
6080 return;
6082 case SYMBOL_REF:
6083 fprintf (f, "`%s'", XSTR (x, 0));
6084 return;
6086 case LABEL_REF:
6087 fprintf (f, "L%d", INSN_UID (XEXP (x, 0)));
6088 return;
6090 case CONST:
6091 arm_print_value (f, XEXP (x, 0));
6092 return;
6094 case PLUS:
6095 arm_print_value (f, XEXP (x, 0));
6096 fprintf (f, "+");
6097 arm_print_value (f, XEXP (x, 1));
6098 return;
6100 case PC:
6101 fprintf (f, "pc");
6102 return;
6104 default:
6105 fprintf (f, "????");
6106 return;
6110 /* Routines for manipulation of the constant pool. */
6112 /* Arm instructions cannot load a large constant directly into a
6113 register; they have to come from a pc relative load. The constant
6114 must therefore be placed in the addressable range of the pc
6115 relative load. Depending on the precise pc relative load
6116 instruction the range is somewhere between 256 bytes and 4k. This
6117 means that we often have to dump a constant inside a function, and
6118 generate code to branch around it.
6120 It is important to minimize this, since the branches will slow
6121 things down and make the code larger.
6123 Normally we can hide the table after an existing unconditional
6124 branch so that there is no interruption of the flow, but in the
6125 worst case the code looks like this:
6127 ldr rn, L1
6129 b L2
6130 align
6131 L1: .long value
6135 ldr rn, L3
6137 b L4
6138 align
6139 L3: .long value
6143 We fix this by performing a scan after scheduling, which notices
6144 which instructions need to have their operands fetched from the
6145 constant table and builds the table.
6147 The algorithm starts by building a table of all the constants that
6148 need fixing up and all the natural barriers in the function (places
6149 where a constant table can be dropped without breaking the flow).
6150 For each fixup we note how far the pc-relative replacement will be
6151 able to reach and the offset of the instruction into the function.
6153 Having built the table we then group the fixes together to form
6154 tables that are as large as possible (subject to addressing
6155 constraints) and emit each table of constants after the last
6156 barrier that is within range of all the instructions in the group.
6157 If a group does not contain a barrier, then we forcibly create one
6158 by inserting a jump instruction into the flow. Once the table has
6159 been inserted, the insns are then modified to reference the
6160 relevant entry in the pool.
6162 Possible enhancements to the algorithm (not implemented) are:
6164 1) For some processors and object formats, there may be benefit in
6165 aligning the pools to the start of cache lines; this alignment
6166 would need to be taken into account when calculating addressability
6167 of a pool. */
6169 /* These typedefs are located at the start of this file, so that
6170 they can be used in the prototypes there. This comment is to
6171 remind readers of that fact so that the following structures
6172 can be understood more easily.
6174 typedef struct minipool_node Mnode;
6175 typedef struct minipool_fixup Mfix; */
6177 struct minipool_node
6179 /* Doubly linked chain of entries. */
6180 Mnode * next;
6181 Mnode * prev;
6182 /* The maximum offset into the code that this entry can be placed. While
6183 pushing fixes for forward references, all entries are sorted in order
6184 of increasing max_address. */
6185 HOST_WIDE_INT max_address;
6186 /* Similarly for an entry inserted for a backwards ref. */
6187 HOST_WIDE_INT min_address;
6188 /* The number of fixes referencing this entry. This can become zero
6189 if we "unpush" an entry. In this case we ignore the entry when we
6190 come to emit the code. */
6191 int refcount;
6192 /* The offset from the start of the minipool. */
6193 HOST_WIDE_INT offset;
6194 /* The value in table. */
6195 rtx value;
6196 /* The mode of value. */
6197 enum machine_mode mode;
6198 /* The size of the value. With iWMMXt enabled
6199 sizes > 4 also imply an alignment of 8-bytes. */
6200 int fix_size;
6203 struct minipool_fixup
6205 Mfix * next;
6206 rtx insn;
6207 HOST_WIDE_INT address;
6208 rtx * loc;
6209 enum machine_mode mode;
6210 int fix_size;
6211 rtx value;
6212 Mnode * minipool;
6213 HOST_WIDE_INT forwards;
6214 HOST_WIDE_INT backwards;
6217 /* Fixes less than a word need padding out to a word boundary. */
6218 #define MINIPOOL_FIX_SIZE(mode) \
6219 (GET_MODE_SIZE ((mode)) >= 4 ? GET_MODE_SIZE ((mode)) : 4)
6221 static Mnode * minipool_vector_head;
6222 static Mnode * minipool_vector_tail;
6223 static rtx minipool_vector_label;
6225 /* The linked list of all minipool fixes required for this function. */
6226 Mfix * minipool_fix_head;
6227 Mfix * minipool_fix_tail;
6228 /* The fix entry for the current minipool, once it has been placed. */
6229 Mfix * minipool_barrier;
6231 /* Determines if INSN is the start of a jump table. Returns the end
6232 of the TABLE or NULL_RTX. */
6233 static rtx
6234 is_jump_table (rtx insn)
6236 rtx table;
6238 if (GET_CODE (insn) == JUMP_INSN
6239 && JUMP_LABEL (insn) != NULL
6240 && ((table = next_real_insn (JUMP_LABEL (insn)))
6241 == next_real_insn (insn))
6242 && table != NULL
6243 && GET_CODE (table) == JUMP_INSN
6244 && (GET_CODE (PATTERN (table)) == ADDR_VEC
6245 || GET_CODE (PATTERN (table)) == ADDR_DIFF_VEC))
6246 return table;
6248 return NULL_RTX;
6251 #ifndef JUMP_TABLES_IN_TEXT_SECTION
6252 #define JUMP_TABLES_IN_TEXT_SECTION 0
6253 #endif
6255 static HOST_WIDE_INT
6256 get_jump_table_size (rtx insn)
6258 /* ADDR_VECs only take room if read-only data does into the text
6259 section. */
6260 if (JUMP_TABLES_IN_TEXT_SECTION
6261 #if !defined(READONLY_DATA_SECTION) && !defined(READONLY_DATA_SECTION_ASM_OP)
6262 || 1
6263 #endif
6266 rtx body = PATTERN (insn);
6267 int elt = GET_CODE (body) == ADDR_DIFF_VEC ? 1 : 0;
6269 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, elt);
6272 return 0;
6275 /* Move a minipool fix MP from its current location to before MAX_MP.
6276 If MAX_MP is NULL, then MP doesn't need moving, but the addressing
6277 constraints may need updating. */
6278 static Mnode *
6279 move_minipool_fix_forward_ref (Mnode *mp, Mnode *max_mp,
6280 HOST_WIDE_INT max_address)
6282 /* This should never be true and the code below assumes these are
6283 different. */
6284 if (mp == max_mp)
6285 abort ();
6287 if (max_mp == NULL)
6289 if (max_address < mp->max_address)
6290 mp->max_address = max_address;
6292 else
6294 if (max_address > max_mp->max_address - mp->fix_size)
6295 mp->max_address = max_mp->max_address - mp->fix_size;
6296 else
6297 mp->max_address = max_address;
6299 /* Unlink MP from its current position. Since max_mp is non-null,
6300 mp->prev must be non-null. */
6301 mp->prev->next = mp->next;
6302 if (mp->next != NULL)
6303 mp->next->prev = mp->prev;
6304 else
6305 minipool_vector_tail = mp->prev;
6307 /* Re-insert it before MAX_MP. */
6308 mp->next = max_mp;
6309 mp->prev = max_mp->prev;
6310 max_mp->prev = mp;
6312 if (mp->prev != NULL)
6313 mp->prev->next = mp;
6314 else
6315 minipool_vector_head = mp;
6318 /* Save the new entry. */
6319 max_mp = mp;
6321 /* Scan over the preceding entries and adjust their addresses as
6322 required. */
6323 while (mp->prev != NULL
6324 && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
6326 mp->prev->max_address = mp->max_address - mp->prev->fix_size;
6327 mp = mp->prev;
6330 return max_mp;
6333 /* Add a constant to the minipool for a forward reference. Returns the
6334 node added or NULL if the constant will not fit in this pool. */
6335 static Mnode *
6336 add_minipool_forward_ref (Mfix *fix)
6338 /* If set, max_mp is the first pool_entry that has a lower
6339 constraint than the one we are trying to add. */
6340 Mnode * max_mp = NULL;
6341 HOST_WIDE_INT max_address = fix->address + fix->forwards;
6342 Mnode * mp;
6344 /* If this fix's address is greater than the address of the first
6345 entry, then we can't put the fix in this pool. We subtract the
6346 size of the current fix to ensure that if the table is fully
6347 packed we still have enough room to insert this value by suffling
6348 the other fixes forwards. */
6349 if (minipool_vector_head &&
6350 fix->address >= minipool_vector_head->max_address - fix->fix_size)
6351 return NULL;
6353 /* Scan the pool to see if a constant with the same value has
6354 already been added. While we are doing this, also note the
6355 location where we must insert the constant if it doesn't already
6356 exist. */
6357 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
6359 if (GET_CODE (fix->value) == GET_CODE (mp->value)
6360 && fix->mode == mp->mode
6361 && (GET_CODE (fix->value) != CODE_LABEL
6362 || (CODE_LABEL_NUMBER (fix->value)
6363 == CODE_LABEL_NUMBER (mp->value)))
6364 && rtx_equal_p (fix->value, mp->value))
6366 /* More than one fix references this entry. */
6367 mp->refcount++;
6368 return move_minipool_fix_forward_ref (mp, max_mp, max_address);
6371 /* Note the insertion point if necessary. */
6372 if (max_mp == NULL
6373 && mp->max_address > max_address)
6374 max_mp = mp;
6376 /* If we are inserting an 8-bytes aligned quantity and
6377 we have not already found an insertion point, then
6378 make sure that all such 8-byte aligned quantities are
6379 placed at the start of the pool. */
6380 if (TARGET_REALLY_IWMMXT
6381 && max_mp == NULL
6382 && fix->fix_size == 8
6383 && mp->fix_size != 8)
6385 max_mp = mp;
6386 max_address = mp->max_address;
6390 /* The value is not currently in the minipool, so we need to create
6391 a new entry for it. If MAX_MP is NULL, the entry will be put on
6392 the end of the list since the placement is less constrained than
6393 any existing entry. Otherwise, we insert the new fix before
6394 MAX_MP and, if necessary, adjust the constraints on the other
6395 entries. */
6396 mp = xmalloc (sizeof (* mp));
6397 mp->fix_size = fix->fix_size;
6398 mp->mode = fix->mode;
6399 mp->value = fix->value;
6400 mp->refcount = 1;
6401 /* Not yet required for a backwards ref. */
6402 mp->min_address = -65536;
6404 if (max_mp == NULL)
6406 mp->max_address = max_address;
6407 mp->next = NULL;
6408 mp->prev = minipool_vector_tail;
6410 if (mp->prev == NULL)
6412 minipool_vector_head = mp;
6413 minipool_vector_label = gen_label_rtx ();
6415 else
6416 mp->prev->next = mp;
6418 minipool_vector_tail = mp;
6420 else
6422 if (max_address > max_mp->max_address - mp->fix_size)
6423 mp->max_address = max_mp->max_address - mp->fix_size;
6424 else
6425 mp->max_address = max_address;
6427 mp->next = max_mp;
6428 mp->prev = max_mp->prev;
6429 max_mp->prev = mp;
6430 if (mp->prev != NULL)
6431 mp->prev->next = mp;
6432 else
6433 minipool_vector_head = mp;
6436 /* Save the new entry. */
6437 max_mp = mp;
6439 /* Scan over the preceding entries and adjust their addresses as
6440 required. */
6441 while (mp->prev != NULL
6442 && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
6444 mp->prev->max_address = mp->max_address - mp->prev->fix_size;
6445 mp = mp->prev;
6448 return max_mp;
6451 static Mnode *
6452 move_minipool_fix_backward_ref (Mnode *mp, Mnode *min_mp,
6453 HOST_WIDE_INT min_address)
6455 HOST_WIDE_INT offset;
6457 /* This should never be true, and the code below assumes these are
6458 different. */
6459 if (mp == min_mp)
6460 abort ();
6462 if (min_mp == NULL)
6464 if (min_address > mp->min_address)
6465 mp->min_address = min_address;
6467 else
6469 /* We will adjust this below if it is too loose. */
6470 mp->min_address = min_address;
6472 /* Unlink MP from its current position. Since min_mp is non-null,
6473 mp->next must be non-null. */
6474 mp->next->prev = mp->prev;
6475 if (mp->prev != NULL)
6476 mp->prev->next = mp->next;
6477 else
6478 minipool_vector_head = mp->next;
6480 /* Reinsert it after MIN_MP. */
6481 mp->prev = min_mp;
6482 mp->next = min_mp->next;
6483 min_mp->next = mp;
6484 if (mp->next != NULL)
6485 mp->next->prev = mp;
6486 else
6487 minipool_vector_tail = mp;
6490 min_mp = mp;
6492 offset = 0;
6493 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
6495 mp->offset = offset;
6496 if (mp->refcount > 0)
6497 offset += mp->fix_size;
6499 if (mp->next && mp->next->min_address < mp->min_address + mp->fix_size)
6500 mp->next->min_address = mp->min_address + mp->fix_size;
6503 return min_mp;
6506 /* Add a constant to the minipool for a backward reference. Returns the
6507 node added or NULL if the constant will not fit in this pool.
6509 Note that the code for insertion for a backwards reference can be
6510 somewhat confusing because the calculated offsets for each fix do
6511 not take into account the size of the pool (which is still under
6512 construction. */
6513 static Mnode *
6514 add_minipool_backward_ref (Mfix *fix)
6516 /* If set, min_mp is the last pool_entry that has a lower constraint
6517 than the one we are trying to add. */
6518 Mnode *min_mp = NULL;
6519 /* This can be negative, since it is only a constraint. */
6520 HOST_WIDE_INT min_address = fix->address - fix->backwards;
6521 Mnode *mp;
6523 /* If we can't reach the current pool from this insn, or if we can't
6524 insert this entry at the end of the pool without pushing other
6525 fixes out of range, then we don't try. This ensures that we
6526 can't fail later on. */
6527 if (min_address >= minipool_barrier->address
6528 || (minipool_vector_tail->min_address + fix->fix_size
6529 >= minipool_barrier->address))
6530 return NULL;
6532 /* Scan the pool to see if a constant with the same value has
6533 already been added. While we are doing this, also note the
6534 location where we must insert the constant if it doesn't already
6535 exist. */
6536 for (mp = minipool_vector_tail; mp != NULL; mp = mp->prev)
6538 if (GET_CODE (fix->value) == GET_CODE (mp->value)
6539 && fix->mode == mp->mode
6540 && (GET_CODE (fix->value) != CODE_LABEL
6541 || (CODE_LABEL_NUMBER (fix->value)
6542 == CODE_LABEL_NUMBER (mp->value)))
6543 && rtx_equal_p (fix->value, mp->value)
6544 /* Check that there is enough slack to move this entry to the
6545 end of the table (this is conservative). */
6546 && (mp->max_address
6547 > (minipool_barrier->address
6548 + minipool_vector_tail->offset
6549 + minipool_vector_tail->fix_size)))
6551 mp->refcount++;
6552 return move_minipool_fix_backward_ref (mp, min_mp, min_address);
6555 if (min_mp != NULL)
6556 mp->min_address += fix->fix_size;
6557 else
6559 /* Note the insertion point if necessary. */
6560 if (mp->min_address < min_address)
6562 /* For now, we do not allow the insertion of 8-byte alignment
6563 requiring nodes anywhere but at the start of the pool. */
6564 if (TARGET_REALLY_IWMMXT && fix->fix_size == 8 && mp->fix_size != 8)
6565 return NULL;
6566 else
6567 min_mp = mp;
6569 else if (mp->max_address
6570 < minipool_barrier->address + mp->offset + fix->fix_size)
6572 /* Inserting before this entry would push the fix beyond
6573 its maximum address (which can happen if we have
6574 re-located a forwards fix); force the new fix to come
6575 after it. */
6576 min_mp = mp;
6577 min_address = mp->min_address + fix->fix_size;
6579 /* If we are inserting an 8-bytes aligned quantity and
6580 we have not already found an insertion point, then
6581 make sure that all such 8-byte aligned quantities are
6582 placed at the start of the pool. */
6583 else if (TARGET_REALLY_IWMMXT
6584 && min_mp == NULL
6585 && fix->fix_size == 8
6586 && mp->fix_size < 8)
6588 min_mp = mp;
6589 min_address = mp->min_address + fix->fix_size;
6594 /* We need to create a new entry. */
6595 mp = xmalloc (sizeof (* mp));
6596 mp->fix_size = fix->fix_size;
6597 mp->mode = fix->mode;
6598 mp->value = fix->value;
6599 mp->refcount = 1;
6600 mp->max_address = minipool_barrier->address + 65536;
6602 mp->min_address = min_address;
6604 if (min_mp == NULL)
6606 mp->prev = NULL;
6607 mp->next = minipool_vector_head;
6609 if (mp->next == NULL)
6611 minipool_vector_tail = mp;
6612 minipool_vector_label = gen_label_rtx ();
6614 else
6615 mp->next->prev = mp;
6617 minipool_vector_head = mp;
6619 else
6621 mp->next = min_mp->next;
6622 mp->prev = min_mp;
6623 min_mp->next = mp;
6625 if (mp->next != NULL)
6626 mp->next->prev = mp;
6627 else
6628 minipool_vector_tail = mp;
6631 /* Save the new entry. */
6632 min_mp = mp;
6634 if (mp->prev)
6635 mp = mp->prev;
6636 else
6637 mp->offset = 0;
6639 /* Scan over the following entries and adjust their offsets. */
6640 while (mp->next != NULL)
6642 if (mp->next->min_address < mp->min_address + mp->fix_size)
6643 mp->next->min_address = mp->min_address + mp->fix_size;
6645 if (mp->refcount)
6646 mp->next->offset = mp->offset + mp->fix_size;
6647 else
6648 mp->next->offset = mp->offset;
6650 mp = mp->next;
6653 return min_mp;
6656 static void
6657 assign_minipool_offsets (Mfix *barrier)
6659 HOST_WIDE_INT offset = 0;
6660 Mnode *mp;
6662 minipool_barrier = barrier;
6664 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
6666 mp->offset = offset;
6668 if (mp->refcount > 0)
6669 offset += mp->fix_size;
6673 /* Output the literal table */
6674 static void
6675 dump_minipool (rtx scan)
6677 Mnode * mp;
6678 Mnode * nmp;
6679 int align64 = 0;
6681 if (TARGET_REALLY_IWMMXT)
6682 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
6683 if (mp->refcount > 0 && mp->fix_size == 8)
6685 align64 = 1;
6686 break;
6689 if (rtl_dump_file)
6690 fprintf (rtl_dump_file,
6691 ";; Emitting minipool after insn %u; address %ld; align %d (bytes)\n",
6692 INSN_UID (scan), (unsigned long) minipool_barrier->address, align64 ? 8 : 4);
6694 scan = emit_label_after (gen_label_rtx (), scan);
6695 scan = emit_insn_after (align64 ? gen_align_8 () : gen_align_4 (), scan);
6696 scan = emit_label_after (minipool_vector_label, scan);
6698 for (mp = minipool_vector_head; mp != NULL; mp = nmp)
6700 if (mp->refcount > 0)
6702 if (rtl_dump_file)
6704 fprintf (rtl_dump_file,
6705 ";; Offset %u, min %ld, max %ld ",
6706 (unsigned) mp->offset, (unsigned long) mp->min_address,
6707 (unsigned long) mp->max_address);
6708 arm_print_value (rtl_dump_file, mp->value);
6709 fputc ('\n', rtl_dump_file);
6712 switch (mp->fix_size)
6714 #ifdef HAVE_consttable_1
6715 case 1:
6716 scan = emit_insn_after (gen_consttable_1 (mp->value), scan);
6717 break;
6719 #endif
6720 #ifdef HAVE_consttable_2
6721 case 2:
6722 scan = emit_insn_after (gen_consttable_2 (mp->value), scan);
6723 break;
6725 #endif
6726 #ifdef HAVE_consttable_4
6727 case 4:
6728 scan = emit_insn_after (gen_consttable_4 (mp->value), scan);
6729 break;
6731 #endif
6732 #ifdef HAVE_consttable_8
6733 case 8:
6734 scan = emit_insn_after (gen_consttable_8 (mp->value), scan);
6735 break;
6737 #endif
6738 default:
6739 abort ();
6740 break;
6744 nmp = mp->next;
6745 free (mp);
6748 minipool_vector_head = minipool_vector_tail = NULL;
6749 scan = emit_insn_after (gen_consttable_end (), scan);
6750 scan = emit_barrier_after (scan);
6753 /* Return the cost of forcibly inserting a barrier after INSN. */
6754 static int
6755 arm_barrier_cost (rtx insn)
6757 /* Basing the location of the pool on the loop depth is preferable,
6758 but at the moment, the basic block information seems to be
6759 corrupt by this stage of the compilation. */
6760 int base_cost = 50;
6761 rtx next = next_nonnote_insn (insn);
6763 if (next != NULL && GET_CODE (next) == CODE_LABEL)
6764 base_cost -= 20;
6766 switch (GET_CODE (insn))
6768 case CODE_LABEL:
6769 /* It will always be better to place the table before the label, rather
6770 than after it. */
6771 return 50;
6773 case INSN:
6774 case CALL_INSN:
6775 return base_cost;
6777 case JUMP_INSN:
6778 return base_cost - 10;
6780 default:
6781 return base_cost + 10;
6785 /* Find the best place in the insn stream in the range
6786 (FIX->address,MAX_ADDRESS) to forcibly insert a minipool barrier.
6787 Create the barrier by inserting a jump and add a new fix entry for
6788 it. */
6789 static Mfix *
6790 create_fix_barrier (Mfix *fix, HOST_WIDE_INT max_address)
6792 HOST_WIDE_INT count = 0;
6793 rtx barrier;
6794 rtx from = fix->insn;
6795 rtx selected = from;
6796 int selected_cost;
6797 HOST_WIDE_INT selected_address;
6798 Mfix * new_fix;
6799 HOST_WIDE_INT max_count = max_address - fix->address;
6800 rtx label = gen_label_rtx ();
6802 selected_cost = arm_barrier_cost (from);
6803 selected_address = fix->address;
6805 while (from && count < max_count)
6807 rtx tmp;
6808 int new_cost;
6810 /* This code shouldn't have been called if there was a natural barrier
6811 within range. */
6812 if (GET_CODE (from) == BARRIER)
6813 abort ();
6815 /* Count the length of this insn. */
6816 count += get_attr_length (from);
6818 /* If there is a jump table, add its length. */
6819 tmp = is_jump_table (from);
6820 if (tmp != NULL)
6822 count += get_jump_table_size (tmp);
6824 /* Jump tables aren't in a basic block, so base the cost on
6825 the dispatch insn. If we select this location, we will
6826 still put the pool after the table. */
6827 new_cost = arm_barrier_cost (from);
6829 if (count < max_count && new_cost <= selected_cost)
6831 selected = tmp;
6832 selected_cost = new_cost;
6833 selected_address = fix->address + count;
6836 /* Continue after the dispatch table. */
6837 from = NEXT_INSN (tmp);
6838 continue;
6841 new_cost = arm_barrier_cost (from);
6843 if (count < max_count && new_cost <= selected_cost)
6845 selected = from;
6846 selected_cost = new_cost;
6847 selected_address = fix->address + count;
6850 from = NEXT_INSN (from);
6853 /* Create a new JUMP_INSN that branches around a barrier. */
6854 from = emit_jump_insn_after (gen_jump (label), selected);
6855 JUMP_LABEL (from) = label;
6856 barrier = emit_barrier_after (from);
6857 emit_label_after (label, barrier);
6859 /* Create a minipool barrier entry for the new barrier. */
6860 new_fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* new_fix));
6861 new_fix->insn = barrier;
6862 new_fix->address = selected_address;
6863 new_fix->next = fix->next;
6864 fix->next = new_fix;
6866 return new_fix;
6869 /* Record that there is a natural barrier in the insn stream at
6870 ADDRESS. */
6871 static void
6872 push_minipool_barrier (rtx insn, HOST_WIDE_INT address)
6874 Mfix * fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* fix));
6876 fix->insn = insn;
6877 fix->address = address;
6879 fix->next = NULL;
6880 if (minipool_fix_head != NULL)
6881 minipool_fix_tail->next = fix;
6882 else
6883 minipool_fix_head = fix;
6885 minipool_fix_tail = fix;
6888 /* Record INSN, which will need fixing up to load a value from the
6889 minipool. ADDRESS is the offset of the insn since the start of the
6890 function; LOC is a pointer to the part of the insn which requires
6891 fixing; VALUE is the constant that must be loaded, which is of type
6892 MODE. */
6893 static void
6894 push_minipool_fix (rtx insn, HOST_WIDE_INT address, rtx *loc,
6895 enum machine_mode mode, rtx value)
6897 Mfix * fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* fix));
6899 #ifdef AOF_ASSEMBLER
6900 /* PIC symbol references need to be converted into offsets into the
6901 based area. */
6902 /* XXX This shouldn't be done here. */
6903 if (flag_pic && GET_CODE (value) == SYMBOL_REF)
6904 value = aof_pic_entry (value);
6905 #endif /* AOF_ASSEMBLER */
6907 fix->insn = insn;
6908 fix->address = address;
6909 fix->loc = loc;
6910 fix->mode = mode;
6911 fix->fix_size = MINIPOOL_FIX_SIZE (mode);
6912 fix->value = value;
6913 fix->forwards = get_attr_pool_range (insn);
6914 fix->backwards = get_attr_neg_pool_range (insn);
6915 fix->minipool = NULL;
6917 /* If an insn doesn't have a range defined for it, then it isn't
6918 expecting to be reworked by this code. Better to abort now than
6919 to generate duff assembly code. */
6920 if (fix->forwards == 0 && fix->backwards == 0)
6921 abort ();
6923 /* With iWMMXt enabled, the pool is aligned to an 8-byte boundary.
6924 So there might be an empty word before the start of the pool.
6925 Hence we reduce the forward range by 4 to allow for this
6926 possibility. */
6927 if (TARGET_REALLY_IWMMXT && fix->fix_size == 8)
6928 fix->forwards -= 4;
6930 if (rtl_dump_file)
6932 fprintf (rtl_dump_file,
6933 ";; %smode fixup for i%d; addr %lu, range (%ld,%ld): ",
6934 GET_MODE_NAME (mode),
6935 INSN_UID (insn), (unsigned long) address,
6936 -1 * (long)fix->backwards, (long)fix->forwards);
6937 arm_print_value (rtl_dump_file, fix->value);
6938 fprintf (rtl_dump_file, "\n");
6941 /* Add it to the chain of fixes. */
6942 fix->next = NULL;
6944 if (minipool_fix_head != NULL)
6945 minipool_fix_tail->next = fix;
6946 else
6947 minipool_fix_head = fix;
6949 minipool_fix_tail = fix;
6952 /* Scan INSN and note any of its operands that need fixing.
6953 If DO_PUSHES is false we do not actually push any of the fixups
6954 needed. The function returns TRUE is any fixups were needed/pushed.
6955 This is used by arm_memory_load_p() which needs to know about loads
6956 of constants that will be converted into minipool loads. */
6957 static bool
6958 note_invalid_constants (rtx insn, HOST_WIDE_INT address, int do_pushes)
6960 bool result = false;
6961 int opno;
6963 extract_insn (insn);
6965 if (!constrain_operands (1))
6966 fatal_insn_not_found (insn);
6968 if (recog_data.n_alternatives == 0)
6969 return false;
6971 /* Fill in recog_op_alt with information about the constraints of this insn. */
6972 preprocess_constraints ();
6974 for (opno = 0; opno < recog_data.n_operands; opno++)
6976 /* Things we need to fix can only occur in inputs. */
6977 if (recog_data.operand_type[opno] != OP_IN)
6978 continue;
6980 /* If this alternative is a memory reference, then any mention
6981 of constants in this alternative is really to fool reload
6982 into allowing us to accept one there. We need to fix them up
6983 now so that we output the right code. */
6984 if (recog_op_alt[opno][which_alternative].memory_ok)
6986 rtx op = recog_data.operand[opno];
6988 if (CONSTANT_P (op))
6990 if (do_pushes)
6991 push_minipool_fix (insn, address, recog_data.operand_loc[opno],
6992 recog_data.operand_mode[opno], op);
6993 result = true;
6995 else if (GET_CODE (op) == MEM
6996 && GET_CODE (XEXP (op, 0)) == SYMBOL_REF
6997 && CONSTANT_POOL_ADDRESS_P (XEXP (op, 0)))
6999 if (do_pushes)
7001 rtx cop = avoid_constant_pool_reference (op);
7003 /* Casting the address of something to a mode narrower
7004 than a word can cause avoid_constant_pool_reference()
7005 to return the pool reference itself. That's no good to
7006 us here. Lets just hope that we can use the
7007 constant pool value directly. */
7008 if (op == cop)
7009 cop = get_pool_constant (XEXP (op, 0));
7011 push_minipool_fix (insn, address,
7012 recog_data.operand_loc[opno],
7013 recog_data.operand_mode[opno], cop);
7016 result = true;
7021 return result;
7024 /* Gcc puts the pool in the wrong place for ARM, since we can only
7025 load addresses a limited distance around the pc. We do some
7026 special munging to move the constant pool values to the correct
7027 point in the code. */
7028 static void
7029 arm_reorg (void)
7031 rtx insn;
7032 HOST_WIDE_INT address = 0;
7033 Mfix * fix;
7035 minipool_fix_head = minipool_fix_tail = NULL;
7037 /* The first insn must always be a note, or the code below won't
7038 scan it properly. */
7039 insn = get_insns ();
7040 if (GET_CODE (insn) != NOTE)
7041 abort ();
7043 /* Scan all the insns and record the operands that will need fixing. */
7044 for (insn = next_nonnote_insn (insn); insn; insn = next_nonnote_insn (insn))
7046 if (TARGET_CIRRUS_FIX_INVALID_INSNS
7047 && (arm_cirrus_insn_p (insn)
7048 || GET_CODE (insn) == JUMP_INSN
7049 || arm_memory_load_p (insn)))
7050 cirrus_reorg (insn);
7052 if (GET_CODE (insn) == BARRIER)
7053 push_minipool_barrier (insn, address);
7054 else if (INSN_P (insn))
7056 rtx table;
7058 note_invalid_constants (insn, address, true);
7059 address += get_attr_length (insn);
7061 /* If the insn is a vector jump, add the size of the table
7062 and skip the table. */
7063 if ((table = is_jump_table (insn)) != NULL)
7065 address += get_jump_table_size (table);
7066 insn = table;
7071 fix = minipool_fix_head;
7073 /* Now scan the fixups and perform the required changes. */
7074 while (fix)
7076 Mfix * ftmp;
7077 Mfix * fdel;
7078 Mfix * last_added_fix;
7079 Mfix * last_barrier = NULL;
7080 Mfix * this_fix;
7082 /* Skip any further barriers before the next fix. */
7083 while (fix && GET_CODE (fix->insn) == BARRIER)
7084 fix = fix->next;
7086 /* No more fixes. */
7087 if (fix == NULL)
7088 break;
7090 last_added_fix = NULL;
7092 for (ftmp = fix; ftmp; ftmp = ftmp->next)
7094 if (GET_CODE (ftmp->insn) == BARRIER)
7096 if (ftmp->address >= minipool_vector_head->max_address)
7097 break;
7099 last_barrier = ftmp;
7101 else if ((ftmp->minipool = add_minipool_forward_ref (ftmp)) == NULL)
7102 break;
7104 last_added_fix = ftmp; /* Keep track of the last fix added. */
7107 /* If we found a barrier, drop back to that; any fixes that we
7108 could have reached but come after the barrier will now go in
7109 the next mini-pool. */
7110 if (last_barrier != NULL)
7112 /* Reduce the refcount for those fixes that won't go into this
7113 pool after all. */
7114 for (fdel = last_barrier->next;
7115 fdel && fdel != ftmp;
7116 fdel = fdel->next)
7118 fdel->minipool->refcount--;
7119 fdel->minipool = NULL;
7122 ftmp = last_barrier;
7124 else
7126 /* ftmp is first fix that we can't fit into this pool and
7127 there no natural barriers that we could use. Insert a
7128 new barrier in the code somewhere between the previous
7129 fix and this one, and arrange to jump around it. */
7130 HOST_WIDE_INT max_address;
7132 /* The last item on the list of fixes must be a barrier, so
7133 we can never run off the end of the list of fixes without
7134 last_barrier being set. */
7135 if (ftmp == NULL)
7136 abort ();
7138 max_address = minipool_vector_head->max_address;
7139 /* Check that there isn't another fix that is in range that
7140 we couldn't fit into this pool because the pool was
7141 already too large: we need to put the pool before such an
7142 instruction. */
7143 if (ftmp->address < max_address)
7144 max_address = ftmp->address;
7146 last_barrier = create_fix_barrier (last_added_fix, max_address);
7149 assign_minipool_offsets (last_barrier);
7151 while (ftmp)
7153 if (GET_CODE (ftmp->insn) != BARRIER
7154 && ((ftmp->minipool = add_minipool_backward_ref (ftmp))
7155 == NULL))
7156 break;
7158 ftmp = ftmp->next;
7161 /* Scan over the fixes we have identified for this pool, fixing them
7162 up and adding the constants to the pool itself. */
7163 for (this_fix = fix; this_fix && ftmp != this_fix;
7164 this_fix = this_fix->next)
7165 if (GET_CODE (this_fix->insn) != BARRIER)
7167 rtx addr
7168 = plus_constant (gen_rtx_LABEL_REF (VOIDmode,
7169 minipool_vector_label),
7170 this_fix->minipool->offset);
7171 *this_fix->loc = gen_rtx_MEM (this_fix->mode, addr);
7174 dump_minipool (last_barrier->insn);
7175 fix = ftmp;
7178 /* From now on we must synthesize any constants that we can't handle
7179 directly. This can happen if the RTL gets split during final
7180 instruction generation. */
7181 after_arm_reorg = 1;
7183 /* Free the minipool memory. */
7184 obstack_free (&minipool_obstack, minipool_startobj);
7187 /* Routines to output assembly language. */
7189 /* If the rtx is the correct value then return the string of the number.
7190 In this way we can ensure that valid double constants are generated even
7191 when cross compiling. */
7192 const char *
7193 fp_immediate_constant (rtx x)
7195 REAL_VALUE_TYPE r;
7196 int i;
7198 if (!fpa_consts_inited)
7199 init_fpa_table ();
7201 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
7202 for (i = 0; i < 8; i++)
7203 if (REAL_VALUES_EQUAL (r, values_fpa[i]))
7204 return strings_fpa[i];
7206 abort ();
7209 /* As for fp_immediate_constant, but value is passed directly, not in rtx. */
7210 static const char *
7211 fp_const_from_val (REAL_VALUE_TYPE *r)
7213 int i;
7215 if (!fpa_consts_inited)
7216 init_fpa_table ();
7218 for (i = 0; i < 8; i++)
7219 if (REAL_VALUES_EQUAL (*r, values_fpa[i]))
7220 return strings_fpa[i];
7222 abort ();
7225 /* Output the operands of a LDM/STM instruction to STREAM.
7226 MASK is the ARM register set mask of which only bits 0-15 are important.
7227 REG is the base register, either the frame pointer or the stack pointer,
7228 INSTR is the possibly suffixed load or store instruction. */
7229 static void
7230 print_multi_reg (FILE *stream, const char *instr, int reg, int mask)
7232 int i;
7233 int not_first = FALSE;
7235 fputc ('\t', stream);
7236 asm_fprintf (stream, instr, reg);
7237 fputs (", {", stream);
7239 for (i = 0; i <= LAST_ARM_REGNUM; i++)
7240 if (mask & (1 << i))
7242 if (not_first)
7243 fprintf (stream, ", ");
7245 asm_fprintf (stream, "%r", i);
7246 not_first = TRUE;
7249 fprintf (stream, "}");
7251 /* Add a ^ character for the 26-bit ABI, but only if we were loading
7252 the PC. Otherwise we would generate an UNPREDICTABLE instruction.
7253 Strictly speaking the instruction would be unpredicatble only if
7254 we were writing back the base register as well, but since we never
7255 want to generate an LDM type 2 instruction (register bank switching)
7256 which is what you get if the PC is not being loaded, we do not need
7257 to check for writeback. */
7258 if (! TARGET_APCS_32
7259 && ((mask & (1 << PC_REGNUM)) != 0))
7260 fprintf (stream, "^");
7262 fprintf (stream, "\n");
7265 /* Output a 'call' insn. */
7266 const char *
7267 output_call (rtx *operands)
7269 /* Handle calls to lr using ip (which may be clobbered in subr anyway). */
7271 if (REGNO (operands[0]) == LR_REGNUM)
7273 operands[0] = gen_rtx_REG (SImode, IP_REGNUM);
7274 output_asm_insn ("mov%?\t%0, %|lr", operands);
7277 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
7279 if (TARGET_INTERWORK)
7280 output_asm_insn ("bx%?\t%0", operands);
7281 else
7282 output_asm_insn ("mov%?\t%|pc, %0", operands);
7284 return "";
7287 /* Output a 'call' insn that is a reference in memory. */
7288 const char *
7289 output_call_mem (rtx *operands)
7291 if (TARGET_INTERWORK)
7293 output_asm_insn ("ldr%?\t%|ip, %0", operands);
7294 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
7295 output_asm_insn ("bx%?\t%|ip", operands);
7297 else if (regno_use_in (LR_REGNUM, operands[0]))
7299 /* LR is used in the memory address. We load the address in the
7300 first instruction. It's safe to use IP as the target of the
7301 load since the call will kill it anyway. */
7302 output_asm_insn ("ldr%?\t%|ip, %0", operands);
7303 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
7304 output_asm_insn ("mov%?\t%|pc, %|ip", operands);
7306 else
7308 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
7309 output_asm_insn ("ldr%?\t%|pc, %0", operands);
7312 return "";
7316 /* Output a move from arm registers to an fpa registers.
7317 OPERANDS[0] is an fpa register.
7318 OPERANDS[1] is the first registers of an arm register pair. */
7319 const char *
7320 output_mov_long_double_fpa_from_arm (rtx *operands)
7322 int arm_reg0 = REGNO (operands[1]);
7323 rtx ops[3];
7325 if (arm_reg0 == IP_REGNUM)
7326 abort ();
7328 ops[0] = gen_rtx_REG (SImode, arm_reg0);
7329 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
7330 ops[2] = gen_rtx_REG (SImode, 2 + arm_reg0);
7332 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1, %2}", ops);
7333 output_asm_insn ("ldf%?e\t%0, [%|sp], #12", operands);
7335 return "";
7338 /* Output a move from an fpa register to arm registers.
7339 OPERANDS[0] is the first registers of an arm register pair.
7340 OPERANDS[1] is an fpa register. */
7341 const char *
7342 output_mov_long_double_arm_from_fpa (rtx *operands)
7344 int arm_reg0 = REGNO (operands[0]);
7345 rtx ops[3];
7347 if (arm_reg0 == IP_REGNUM)
7348 abort ();
7350 ops[0] = gen_rtx_REG (SImode, arm_reg0);
7351 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
7352 ops[2] = gen_rtx_REG (SImode, 2 + arm_reg0);
7354 output_asm_insn ("stf%?e\t%1, [%|sp, #-12]!", operands);
7355 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1, %2}", ops);
7356 return "";
7359 /* Output a move from arm registers to arm registers of a long double
7360 OPERANDS[0] is the destination.
7361 OPERANDS[1] is the source. */
7362 const char *
7363 output_mov_long_double_arm_from_arm (rtx *operands)
7365 /* We have to be careful here because the two might overlap. */
7366 int dest_start = REGNO (operands[0]);
7367 int src_start = REGNO (operands[1]);
7368 rtx ops[2];
7369 int i;
7371 if (dest_start < src_start)
7373 for (i = 0; i < 3; i++)
7375 ops[0] = gen_rtx_REG (SImode, dest_start + i);
7376 ops[1] = gen_rtx_REG (SImode, src_start + i);
7377 output_asm_insn ("mov%?\t%0, %1", ops);
7380 else
7382 for (i = 2; i >= 0; i--)
7384 ops[0] = gen_rtx_REG (SImode, dest_start + i);
7385 ops[1] = gen_rtx_REG (SImode, src_start + i);
7386 output_asm_insn ("mov%?\t%0, %1", ops);
7390 return "";
7394 /* Output a move from arm registers to an fpa registers.
7395 OPERANDS[0] is an fpa register.
7396 OPERANDS[1] is the first registers of an arm register pair. */
7397 const char *
7398 output_mov_double_fpa_from_arm (rtx *operands)
7400 int arm_reg0 = REGNO (operands[1]);
7401 rtx ops[2];
7403 if (arm_reg0 == IP_REGNUM)
7404 abort ();
7406 ops[0] = gen_rtx_REG (SImode, arm_reg0);
7407 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
7408 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1}", ops);
7409 output_asm_insn ("ldf%?d\t%0, [%|sp], #8", operands);
7410 return "";
7413 /* Output a move from an fpa register to arm registers.
7414 OPERANDS[0] is the first registers of an arm register pair.
7415 OPERANDS[1] is an fpa register. */
7416 const char *
7417 output_mov_double_arm_from_fpa (rtx *operands)
7419 int arm_reg0 = REGNO (operands[0]);
7420 rtx ops[2];
7422 if (arm_reg0 == IP_REGNUM)
7423 abort ();
7425 ops[0] = gen_rtx_REG (SImode, arm_reg0);
7426 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
7427 output_asm_insn ("stf%?d\t%1, [%|sp, #-8]!", operands);
7428 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1}", ops);
7429 return "";
7432 /* Output a move between double words.
7433 It must be REG<-REG, REG<-CONST_DOUBLE, REG<-CONST_INT, REG<-MEM
7434 or MEM<-REG and all MEMs must be offsettable addresses. */
7435 const char *
7436 output_move_double (rtx *operands)
7438 enum rtx_code code0 = GET_CODE (operands[0]);
7439 enum rtx_code code1 = GET_CODE (operands[1]);
7440 rtx otherops[3];
7442 if (code0 == REG)
7444 int reg0 = REGNO (operands[0]);
7446 otherops[0] = gen_rtx_REG (SImode, 1 + reg0);
7448 if (code1 == REG)
7450 int reg1 = REGNO (operands[1]);
7451 if (reg1 == IP_REGNUM)
7452 abort ();
7454 /* Ensure the second source is not overwritten. */
7455 if (reg1 == reg0 + (WORDS_BIG_ENDIAN ? -1 : 1))
7456 output_asm_insn ("mov%?\t%Q0, %Q1\n\tmov%?\t%R0, %R1", operands);
7457 else
7458 output_asm_insn ("mov%?\t%R0, %R1\n\tmov%?\t%Q0, %Q1", operands);
7460 else if (code1 == CONST_VECTOR)
7462 HOST_WIDE_INT hint = 0;
7464 switch (GET_MODE (operands[1]))
7466 case V2SImode:
7467 otherops[1] = GEN_INT (INTVAL (CONST_VECTOR_ELT (operands[1], 1)));
7468 operands[1] = GEN_INT (INTVAL (CONST_VECTOR_ELT (operands[1], 0)));
7469 break;
7471 case V4HImode:
7472 if (BYTES_BIG_ENDIAN)
7474 hint = INTVAL (CONST_VECTOR_ELT (operands[1], 2));
7475 hint <<= 16;
7476 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 3));
7478 else
7480 hint = INTVAL (CONST_VECTOR_ELT (operands[1], 3));
7481 hint <<= 16;
7482 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 2));
7485 otherops[1] = GEN_INT (hint);
7486 hint = 0;
7488 if (BYTES_BIG_ENDIAN)
7490 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 0));
7491 hint <<= 16;
7492 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 1));
7494 else
7496 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 1));
7497 hint <<= 16;
7498 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 0));
7501 operands[1] = GEN_INT (hint);
7502 break;
7504 case V8QImode:
7505 if (BYTES_BIG_ENDIAN)
7507 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 4));
7508 hint <<= 8;
7509 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 5));
7510 hint <<= 8;
7511 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 6));
7512 hint <<= 8;
7513 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 7));
7515 else
7517 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 7));
7518 hint <<= 8;
7519 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 6));
7520 hint <<= 8;
7521 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 5));
7522 hint <<= 8;
7523 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 4));
7526 otherops[1] = GEN_INT (hint);
7527 hint = 0;
7529 if (BYTES_BIG_ENDIAN)
7531 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 0));
7532 hint <<= 8;
7533 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 1));
7534 hint <<= 8;
7535 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 2));
7536 hint <<= 8;
7537 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 3));
7539 else
7541 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 3));
7542 hint <<= 8;
7543 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 2));
7544 hint <<= 8;
7545 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 1));
7546 hint <<= 8;
7547 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 0));
7550 operands[1] = GEN_INT (hint);
7551 break;
7553 default:
7554 abort ();
7556 output_mov_immediate (operands);
7557 output_mov_immediate (otherops);
7559 else if (code1 == CONST_DOUBLE)
7561 if (GET_MODE (operands[1]) == DFmode)
7563 REAL_VALUE_TYPE r;
7564 long l[2];
7566 REAL_VALUE_FROM_CONST_DOUBLE (r, operands[1]);
7567 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
7568 otherops[1] = GEN_INT (l[1]);
7569 operands[1] = GEN_INT (l[0]);
7571 else if (GET_MODE (operands[1]) != VOIDmode)
7572 abort ();
7573 else if (WORDS_BIG_ENDIAN)
7575 otherops[1] = GEN_INT (CONST_DOUBLE_LOW (operands[1]));
7576 operands[1] = GEN_INT (CONST_DOUBLE_HIGH (operands[1]));
7578 else
7580 otherops[1] = GEN_INT (CONST_DOUBLE_HIGH (operands[1]));
7581 operands[1] = GEN_INT (CONST_DOUBLE_LOW (operands[1]));
7584 output_mov_immediate (operands);
7585 output_mov_immediate (otherops);
7587 else if (code1 == CONST_INT)
7589 #if HOST_BITS_PER_WIDE_INT > 32
7590 /* If HOST_WIDE_INT is more than 32 bits, the intval tells us
7591 what the upper word is. */
7592 if (WORDS_BIG_ENDIAN)
7594 otherops[1] = GEN_INT (ARM_SIGN_EXTEND (INTVAL (operands[1])));
7595 operands[1] = GEN_INT (INTVAL (operands[1]) >> 32);
7597 else
7599 otherops[1] = GEN_INT (INTVAL (operands[1]) >> 32);
7600 operands[1] = GEN_INT (ARM_SIGN_EXTEND (INTVAL (operands[1])));
7602 #else
7603 /* Sign extend the intval into the high-order word. */
7604 if (WORDS_BIG_ENDIAN)
7606 otherops[1] = operands[1];
7607 operands[1] = (INTVAL (operands[1]) < 0
7608 ? constm1_rtx : const0_rtx);
7610 else
7611 otherops[1] = INTVAL (operands[1]) < 0 ? constm1_rtx : const0_rtx;
7612 #endif
7613 output_mov_immediate (otherops);
7614 output_mov_immediate (operands);
7616 else if (code1 == MEM)
7618 switch (GET_CODE (XEXP (operands[1], 0)))
7620 case REG:
7621 output_asm_insn ("ldm%?ia\t%m1, %M0", operands);
7622 break;
7624 case PRE_INC:
7625 abort (); /* Should never happen now. */
7626 break;
7628 case PRE_DEC:
7629 output_asm_insn ("ldm%?db\t%m1!, %M0", operands);
7630 break;
7632 case POST_INC:
7633 output_asm_insn ("ldm%?ia\t%m1!, %M0", operands);
7634 break;
7636 case POST_DEC:
7637 abort (); /* Should never happen now. */
7638 break;
7640 case LABEL_REF:
7641 case CONST:
7642 output_asm_insn ("adr%?\t%0, %1", operands);
7643 output_asm_insn ("ldm%?ia\t%0, %M0", operands);
7644 break;
7646 default:
7647 if (arm_add_operand (XEXP (XEXP (operands[1], 0), 1),
7648 GET_MODE (XEXP (XEXP (operands[1], 0), 1))))
7650 otherops[0] = operands[0];
7651 otherops[1] = XEXP (XEXP (operands[1], 0), 0);
7652 otherops[2] = XEXP (XEXP (operands[1], 0), 1);
7654 if (GET_CODE (XEXP (operands[1], 0)) == PLUS)
7656 if (GET_CODE (otherops[2]) == CONST_INT)
7658 switch ((int) INTVAL (otherops[2]))
7660 case -8:
7661 output_asm_insn ("ldm%?db\t%1, %M0", otherops);
7662 return "";
7663 case -4:
7664 output_asm_insn ("ldm%?da\t%1, %M0", otherops);
7665 return "";
7666 case 4:
7667 output_asm_insn ("ldm%?ib\t%1, %M0", otherops);
7668 return "";
7671 if (!(const_ok_for_arm (INTVAL (otherops[2]))))
7672 output_asm_insn ("sub%?\t%0, %1, #%n2", otherops);
7673 else
7674 output_asm_insn ("add%?\t%0, %1, %2", otherops);
7676 else
7677 output_asm_insn ("add%?\t%0, %1, %2", otherops);
7679 else
7680 output_asm_insn ("sub%?\t%0, %1, %2", otherops);
7682 return "ldm%?ia\t%0, %M0";
7684 else
7686 otherops[1] = adjust_address (operands[1], SImode, 4);
7687 /* Take care of overlapping base/data reg. */
7688 if (reg_mentioned_p (operands[0], operands[1]))
7690 output_asm_insn ("ldr%?\t%0, %1", otherops);
7691 output_asm_insn ("ldr%?\t%0, %1", operands);
7693 else
7695 output_asm_insn ("ldr%?\t%0, %1", operands);
7696 output_asm_insn ("ldr%?\t%0, %1", otherops);
7701 else
7702 abort (); /* Constraints should prevent this. */
7704 else if (code0 == MEM && code1 == REG)
7706 if (REGNO (operands[1]) == IP_REGNUM)
7707 abort ();
7709 switch (GET_CODE (XEXP (operands[0], 0)))
7711 case REG:
7712 output_asm_insn ("stm%?ia\t%m0, %M1", operands);
7713 break;
7715 case PRE_INC:
7716 abort (); /* Should never happen now. */
7717 break;
7719 case PRE_DEC:
7720 output_asm_insn ("stm%?db\t%m0!, %M1", operands);
7721 break;
7723 case POST_INC:
7724 output_asm_insn ("stm%?ia\t%m0!, %M1", operands);
7725 break;
7727 case POST_DEC:
7728 abort (); /* Should never happen now. */
7729 break;
7731 case PLUS:
7732 if (GET_CODE (XEXP (XEXP (operands[0], 0), 1)) == CONST_INT)
7734 switch ((int) INTVAL (XEXP (XEXP (operands[0], 0), 1)))
7736 case -8:
7737 output_asm_insn ("stm%?db\t%m0, %M1", operands);
7738 return "";
7740 case -4:
7741 output_asm_insn ("stm%?da\t%m0, %M1", operands);
7742 return "";
7744 case 4:
7745 output_asm_insn ("stm%?ib\t%m0, %M1", operands);
7746 return "";
7749 /* Fall through */
7751 default:
7752 otherops[0] = adjust_address (operands[0], SImode, 4);
7753 otherops[1] = gen_rtx_REG (SImode, 1 + REGNO (operands[1]));
7754 output_asm_insn ("str%?\t%1, %0", operands);
7755 output_asm_insn ("str%?\t%1, %0", otherops);
7758 else
7759 /* Constraints should prevent this. */
7760 abort ();
7762 return "";
7766 /* Output an arbitrary MOV reg, #n.
7767 OPERANDS[0] is a register. OPERANDS[1] is a const_int. */
7768 const char *
7769 output_mov_immediate (rtx *operands)
7771 HOST_WIDE_INT n = INTVAL (operands[1]);
7773 /* Try to use one MOV. */
7774 if (const_ok_for_arm (n))
7775 output_asm_insn ("mov%?\t%0, %1", operands);
7777 /* Try to use one MVN. */
7778 else if (const_ok_for_arm (~n))
7780 operands[1] = GEN_INT (~n);
7781 output_asm_insn ("mvn%?\t%0, %1", operands);
7783 else
7785 int n_ones = 0;
7786 int i;
7788 /* If all else fails, make it out of ORRs or BICs as appropriate. */
7789 for (i = 0; i < 32; i++)
7790 if (n & 1 << i)
7791 n_ones++;
7793 if (n_ones > 16) /* Shorter to use MVN with BIC in this case. */
7794 output_multi_immediate (operands, "mvn%?\t%0, %1", "bic%?\t%0, %0, %1", 1, ~ n);
7795 else
7796 output_multi_immediate (operands, "mov%?\t%0, %1", "orr%?\t%0, %0, %1", 1, n);
7799 return "";
7802 /* Output an ADD r, s, #n where n may be too big for one instruction.
7803 If adding zero to one register, output nothing. */
7804 const char *
7805 output_add_immediate (rtx *operands)
7807 HOST_WIDE_INT n = INTVAL (operands[2]);
7809 if (n != 0 || REGNO (operands[0]) != REGNO (operands[1]))
7811 if (n < 0)
7812 output_multi_immediate (operands,
7813 "sub%?\t%0, %1, %2", "sub%?\t%0, %0, %2", 2,
7814 -n);
7815 else
7816 output_multi_immediate (operands,
7817 "add%?\t%0, %1, %2", "add%?\t%0, %0, %2", 2,
7821 return "";
7824 /* Output a multiple immediate operation.
7825 OPERANDS is the vector of operands referred to in the output patterns.
7826 INSTR1 is the output pattern to use for the first constant.
7827 INSTR2 is the output pattern to use for subsequent constants.
7828 IMMED_OP is the index of the constant slot in OPERANDS.
7829 N is the constant value. */
7830 static const char *
7831 output_multi_immediate (rtx *operands, const char *instr1, const char *instr2,
7832 int immed_op, HOST_WIDE_INT n)
7834 #if HOST_BITS_PER_WIDE_INT > 32
7835 n &= 0xffffffff;
7836 #endif
7838 if (n == 0)
7840 /* Quick and easy output. */
7841 operands[immed_op] = const0_rtx;
7842 output_asm_insn (instr1, operands);
7844 else
7846 int i;
7847 const char * instr = instr1;
7849 /* Note that n is never zero here (which would give no output). */
7850 for (i = 0; i < 32; i += 2)
7852 if (n & (3 << i))
7854 operands[immed_op] = GEN_INT (n & (255 << i));
7855 output_asm_insn (instr, operands);
7856 instr = instr2;
7857 i += 6;
7862 return "";
7865 /* Return the appropriate ARM instruction for the operation code.
7866 The returned result should not be overwritten. OP is the rtx of the
7867 operation. SHIFT_FIRST_ARG is TRUE if the first argument of the operator
7868 was shifted. */
7869 const char *
7870 arithmetic_instr (rtx op, int shift_first_arg)
7872 switch (GET_CODE (op))
7874 case PLUS:
7875 return "add";
7877 case MINUS:
7878 return shift_first_arg ? "rsb" : "sub";
7880 case IOR:
7881 return "orr";
7883 case XOR:
7884 return "eor";
7886 case AND:
7887 return "and";
7889 default:
7890 abort ();
7894 /* Ensure valid constant shifts and return the appropriate shift mnemonic
7895 for the operation code. The returned result should not be overwritten.
7896 OP is the rtx code of the shift.
7897 On exit, *AMOUNTP will be -1 if the shift is by a register, or a constant
7898 shift. */
7899 static const char *
7900 shift_op (rtx op, HOST_WIDE_INT *amountp)
7902 const char * mnem;
7903 enum rtx_code code = GET_CODE (op);
7905 if (GET_CODE (XEXP (op, 1)) == REG || GET_CODE (XEXP (op, 1)) == SUBREG)
7906 *amountp = -1;
7907 else if (GET_CODE (XEXP (op, 1)) == CONST_INT)
7908 *amountp = INTVAL (XEXP (op, 1));
7909 else
7910 abort ();
7912 switch (code)
7914 case ASHIFT:
7915 mnem = "asl";
7916 break;
7918 case ASHIFTRT:
7919 mnem = "asr";
7920 break;
7922 case LSHIFTRT:
7923 mnem = "lsr";
7924 break;
7926 case ROTATERT:
7927 mnem = "ror";
7928 break;
7930 case MULT:
7931 /* We never have to worry about the amount being other than a
7932 power of 2, since this case can never be reloaded from a reg. */
7933 if (*amountp != -1)
7934 *amountp = int_log2 (*amountp);
7935 else
7936 abort ();
7937 return "asl";
7939 default:
7940 abort ();
7943 if (*amountp != -1)
7945 /* This is not 100% correct, but follows from the desire to merge
7946 multiplication by a power of 2 with the recognizer for a
7947 shift. >=32 is not a valid shift for "asl", so we must try and
7948 output a shift that produces the correct arithmetical result.
7949 Using lsr #32 is identical except for the fact that the carry bit
7950 is not set correctly if we set the flags; but we never use the
7951 carry bit from such an operation, so we can ignore that. */
7952 if (code == ROTATERT)
7953 /* Rotate is just modulo 32. */
7954 *amountp &= 31;
7955 else if (*amountp != (*amountp & 31))
7957 if (code == ASHIFT)
7958 mnem = "lsr";
7959 *amountp = 32;
7962 /* Shifts of 0 are no-ops. */
7963 if (*amountp == 0)
7964 return NULL;
7967 return mnem;
7970 /* Obtain the shift from the POWER of two. */
7972 static HOST_WIDE_INT
7973 int_log2 (HOST_WIDE_INT power)
7975 HOST_WIDE_INT shift = 0;
7977 while ((((HOST_WIDE_INT) 1 << shift) & power) == 0)
7979 if (shift > 31)
7980 abort ();
7981 shift++;
7984 return shift;
7987 /* Output a .ascii pseudo-op, keeping track of lengths. This is because
7988 /bin/as is horribly restrictive. */
7989 #define MAX_ASCII_LEN 51
7991 void
7992 output_ascii_pseudo_op (FILE *stream, const unsigned char *p, int len)
7994 int i;
7995 int len_so_far = 0;
7997 fputs ("\t.ascii\t\"", stream);
7999 for (i = 0; i < len; i++)
8001 int c = p[i];
8003 if (len_so_far >= MAX_ASCII_LEN)
8005 fputs ("\"\n\t.ascii\t\"", stream);
8006 len_so_far = 0;
8009 switch (c)
8011 case TARGET_TAB:
8012 fputs ("\\t", stream);
8013 len_so_far += 2;
8014 break;
8016 case TARGET_FF:
8017 fputs ("\\f", stream);
8018 len_so_far += 2;
8019 break;
8021 case TARGET_BS:
8022 fputs ("\\b", stream);
8023 len_so_far += 2;
8024 break;
8026 case TARGET_CR:
8027 fputs ("\\r", stream);
8028 len_so_far += 2;
8029 break;
8031 case TARGET_NEWLINE:
8032 fputs ("\\n", stream);
8033 c = p [i + 1];
8034 if ((c >= ' ' && c <= '~')
8035 || c == TARGET_TAB)
8036 /* This is a good place for a line break. */
8037 len_so_far = MAX_ASCII_LEN;
8038 else
8039 len_so_far += 2;
8040 break;
8042 case '\"':
8043 case '\\':
8044 putc ('\\', stream);
8045 len_so_far++;
8046 /* Drop through. */
8048 default:
8049 if (c >= ' ' && c <= '~')
8051 putc (c, stream);
8052 len_so_far++;
8054 else
8056 fprintf (stream, "\\%03o", c);
8057 len_so_far += 4;
8059 break;
8063 fputs ("\"\n", stream);
8066 /* Compute the register sabe mask for registers 0 through 12
8067 inclusive. This code is used by both arm_compute_save_reg_mask
8068 and arm_compute_initial_elimination_offset. */
8069 static unsigned long
8070 arm_compute_save_reg0_reg12_mask (void)
8072 unsigned long func_type = arm_current_func_type ();
8073 unsigned int save_reg_mask = 0;
8074 unsigned int reg;
8076 if (IS_INTERRUPT (func_type))
8078 unsigned int max_reg;
8079 /* Interrupt functions must not corrupt any registers,
8080 even call clobbered ones. If this is a leaf function
8081 we can just examine the registers used by the RTL, but
8082 otherwise we have to assume that whatever function is
8083 called might clobber anything, and so we have to save
8084 all the call-clobbered registers as well. */
8085 if (ARM_FUNC_TYPE (func_type) == ARM_FT_FIQ)
8086 /* FIQ handlers have registers r8 - r12 banked, so
8087 we only need to check r0 - r7, Normal ISRs only
8088 bank r14 and r15, so we must check up to r12.
8089 r13 is the stack pointer which is always preserved,
8090 so we do not need to consider it here. */
8091 max_reg = 7;
8092 else
8093 max_reg = 12;
8095 for (reg = 0; reg <= max_reg; reg++)
8096 if (regs_ever_live[reg]
8097 || (! current_function_is_leaf && call_used_regs [reg]))
8098 save_reg_mask |= (1 << reg);
8100 else
8102 /* In the normal case we only need to save those registers
8103 which are call saved and which are used by this function. */
8104 for (reg = 0; reg <= 10; reg++)
8105 if (regs_ever_live[reg] && ! call_used_regs [reg])
8106 save_reg_mask |= (1 << reg);
8108 /* Handle the frame pointer as a special case. */
8109 if (! TARGET_APCS_FRAME
8110 && ! frame_pointer_needed
8111 && regs_ever_live[HARD_FRAME_POINTER_REGNUM]
8112 && ! call_used_regs[HARD_FRAME_POINTER_REGNUM])
8113 save_reg_mask |= 1 << HARD_FRAME_POINTER_REGNUM;
8115 /* If we aren't loading the PIC register,
8116 don't stack it even though it may be live. */
8117 if (flag_pic
8118 && ! TARGET_SINGLE_PIC_BASE
8119 && regs_ever_live[PIC_OFFSET_TABLE_REGNUM])
8120 save_reg_mask |= 1 << PIC_OFFSET_TABLE_REGNUM;
8123 return save_reg_mask;
8126 /* Compute a bit mask of which registers need to be
8127 saved on the stack for the current function. */
8129 static unsigned long
8130 arm_compute_save_reg_mask (void)
8132 unsigned int save_reg_mask = 0;
8133 unsigned long func_type = arm_current_func_type ();
8135 if (IS_NAKED (func_type))
8136 /* This should never really happen. */
8137 return 0;
8139 /* If we are creating a stack frame, then we must save the frame pointer,
8140 IP (which will hold the old stack pointer), LR and the PC. */
8141 if (frame_pointer_needed)
8142 save_reg_mask |=
8143 (1 << ARM_HARD_FRAME_POINTER_REGNUM)
8144 | (1 << IP_REGNUM)
8145 | (1 << LR_REGNUM)
8146 | (1 << PC_REGNUM);
8148 /* Volatile functions do not return, so there
8149 is no need to save any other registers. */
8150 if (IS_VOLATILE (func_type))
8151 return save_reg_mask;
8153 save_reg_mask |= arm_compute_save_reg0_reg12_mask ();
8155 /* Decide if we need to save the link register.
8156 Interrupt routines have their own banked link register,
8157 so they never need to save it.
8158 Otherwise if we do not use the link register we do not need to save
8159 it. If we are pushing other registers onto the stack however, we
8160 can save an instruction in the epilogue by pushing the link register
8161 now and then popping it back into the PC. This incurs extra memory
8162 accesses though, so we only do it when optimizing for size, and only
8163 if we know that we will not need a fancy return sequence. */
8164 if (regs_ever_live [LR_REGNUM]
8165 || (save_reg_mask
8166 && optimize_size
8167 && ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL))
8168 save_reg_mask |= 1 << LR_REGNUM;
8170 if (cfun->machine->lr_save_eliminated)
8171 save_reg_mask &= ~ (1 << LR_REGNUM);
8173 if (TARGET_REALLY_IWMMXT
8174 && ((bit_count (save_reg_mask)
8175 + ARM_NUM_INTS (current_function_pretend_args_size)) % 2) != 0)
8177 unsigned int reg;
8179 /* The total number of registers that are going to be pushed
8180 onto the stack is odd. We need to ensure that the stack
8181 is 64-bit aligned before we start to save iWMMXt registers,
8182 and also before we start to create locals. (A local variable
8183 might be a double or long long which we will load/store using
8184 an iWMMXt instruction). Therefore we need to push another
8185 ARM register, so that the stack will be 64-bit aligned. We
8186 try to avoid using the arg registers (r0 -r3) as they might be
8187 used to pass values in a tail call. */
8188 for (reg = 4; reg <= 12; reg++)
8189 if ((save_reg_mask & (1 << reg)) == 0)
8190 break;
8192 if (reg <= 12)
8193 save_reg_mask |= (1 << reg);
8194 else
8196 cfun->machine->sibcall_blocked = 1;
8197 save_reg_mask |= (1 << 3);
8201 return save_reg_mask;
8204 /* Generate a function exit sequence. If REALLY_RETURN is false, then do
8205 everything bar the final return instruction. */
8206 const char *
8207 output_return_instruction (rtx operand, int really_return, int reverse)
8209 char conditional[10];
8210 char instr[100];
8211 int reg;
8212 unsigned long live_regs_mask;
8213 unsigned long func_type;
8215 func_type = arm_current_func_type ();
8217 if (IS_NAKED (func_type))
8218 return "";
8220 if (IS_VOLATILE (func_type) && TARGET_ABORT_NORETURN)
8222 /* If this function was declared non-returning, and we have
8223 found a tail call, then we have to trust that the called
8224 function won't return. */
8225 if (really_return)
8227 rtx ops[2];
8229 /* Otherwise, trap an attempted return by aborting. */
8230 ops[0] = operand;
8231 ops[1] = gen_rtx_SYMBOL_REF (Pmode, NEED_PLT_RELOC ? "abort(PLT)"
8232 : "abort");
8233 assemble_external_libcall (ops[1]);
8234 output_asm_insn (reverse ? "bl%D0\t%a1" : "bl%d0\t%a1", ops);
8237 return "";
8240 if (current_function_calls_alloca && !really_return)
8241 abort ();
8243 sprintf (conditional, "%%?%%%c0", reverse ? 'D' : 'd');
8245 return_used_this_function = 1;
8247 live_regs_mask = arm_compute_save_reg_mask ();
8249 if (live_regs_mask)
8251 const char * return_reg;
8253 /* If we do not have any special requirements for function exit
8254 (eg interworking, or ISR) then we can load the return address
8255 directly into the PC. Otherwise we must load it into LR. */
8256 if (really_return
8257 && ! TARGET_INTERWORK)
8258 return_reg = reg_names[PC_REGNUM];
8259 else
8260 return_reg = reg_names[LR_REGNUM];
8262 if ((live_regs_mask & (1 << IP_REGNUM)) == (1 << IP_REGNUM))
8263 /* There are two possible reasons for the IP register being saved.
8264 Either a stack frame was created, in which case IP contains the
8265 old stack pointer, or an ISR routine corrupted it. If this in an
8266 ISR routine then just restore IP, otherwise restore IP into SP. */
8267 if (! IS_INTERRUPT (func_type))
8269 live_regs_mask &= ~ (1 << IP_REGNUM);
8270 live_regs_mask |= (1 << SP_REGNUM);
8273 /* On some ARM architectures it is faster to use LDR rather than
8274 LDM to load a single register. On other architectures, the
8275 cost is the same. In 26 bit mode, or for exception handlers,
8276 we have to use LDM to load the PC so that the CPSR is also
8277 restored. */
8278 for (reg = 0; reg <= LAST_ARM_REGNUM; reg++)
8280 if (live_regs_mask == (unsigned int)(1 << reg))
8281 break;
8283 if (reg <= LAST_ARM_REGNUM
8284 && (reg != LR_REGNUM
8285 || ! really_return
8286 || (TARGET_APCS_32 && ! IS_INTERRUPT (func_type))))
8288 sprintf (instr, "ldr%s\t%%|%s, [%%|sp], #4", conditional,
8289 (reg == LR_REGNUM) ? return_reg : reg_names[reg]);
8291 else
8293 char *p;
8294 int first = 1;
8296 /* Generate the load multiple instruction to restore the
8297 registers. Note we can get here, even if
8298 frame_pointer_needed is true, but only if sp already
8299 points to the base of the saved core registers. */
8300 if (live_regs_mask & (1 << SP_REGNUM))
8302 unsigned HOST_WIDE_INT stack_adjust =
8303 arm_get_frame_size () + current_function_outgoing_args_size;
8305 if (stack_adjust != 0 && stack_adjust != 4)
8306 abort ();
8308 if (stack_adjust && arm_arch5)
8309 sprintf (instr, "ldm%sib\t%%|sp, {", conditional);
8310 else
8312 /* If we can't use ldmib (SA110 bug), then try to pop r3
8313 instead. */
8314 if (stack_adjust)
8315 live_regs_mask |= 1 << 3;
8316 sprintf (instr, "ldm%sfd\t%%|sp, {", conditional);
8319 else
8320 sprintf (instr, "ldm%sfd\t%%|sp!, {", conditional);
8322 p = instr + strlen (instr);
8324 for (reg = 0; reg <= SP_REGNUM; reg++)
8325 if (live_regs_mask & (1 << reg))
8327 int l = strlen (reg_names[reg]);
8329 if (first)
8330 first = 0;
8331 else
8333 memcpy (p, ", ", 2);
8334 p += 2;
8337 memcpy (p, "%|", 2);
8338 memcpy (p + 2, reg_names[reg], l);
8339 p += l + 2;
8342 if (live_regs_mask & (1 << LR_REGNUM))
8344 sprintf (p, "%s%%|%s}", first ? "" : ", ", return_reg);
8345 /* Decide if we need to add the ^ symbol to the end of the
8346 register list. This causes the saved condition codes
8347 register to be copied into the current condition codes
8348 register. We do the copy if we are conforming to the 32-bit
8349 ABI and this is an interrupt function, or if we are
8350 conforming to the 26-bit ABI. There is a special case for
8351 the 26-bit ABI however, which is if we are writing back the
8352 stack pointer but not loading the PC. In this case adding
8353 the ^ symbol would create a type 2 LDM instruction, where
8354 writeback is UNPREDICTABLE. We are safe in leaving the ^
8355 character off in this case however, since the actual return
8356 instruction will be a MOVS which will restore the CPSR. */
8357 if ((TARGET_APCS_32 && IS_INTERRUPT (func_type))
8358 || (! TARGET_APCS_32 && really_return))
8359 strcat (p, "^");
8361 else
8362 strcpy (p, "}");
8365 output_asm_insn (instr, & operand);
8367 /* See if we need to generate an extra instruction to
8368 perform the actual function return. */
8369 if (really_return
8370 && func_type != ARM_FT_INTERWORKED
8371 && (live_regs_mask & (1 << LR_REGNUM)) != 0)
8373 /* The return has already been handled
8374 by loading the LR into the PC. */
8375 really_return = 0;
8379 if (really_return)
8381 switch ((int) ARM_FUNC_TYPE (func_type))
8383 case ARM_FT_ISR:
8384 case ARM_FT_FIQ:
8385 sprintf (instr, "sub%ss\t%%|pc, %%|lr, #4", conditional);
8386 break;
8388 case ARM_FT_INTERWORKED:
8389 sprintf (instr, "bx%s\t%%|lr", conditional);
8390 break;
8392 case ARM_FT_EXCEPTION:
8393 sprintf (instr, "mov%ss\t%%|pc, %%|lr", conditional);
8394 break;
8396 default:
8397 /* ARMv5 implementations always provide BX, so interworking
8398 is the default unless APCS-26 is in use. */
8399 if ((insn_flags & FL_ARCH5) != 0 && TARGET_APCS_32)
8400 sprintf (instr, "bx%s\t%%|lr", conditional);
8401 else
8402 sprintf (instr, "mov%s%s\t%%|pc, %%|lr",
8403 conditional, TARGET_APCS_32 ? "" : "s");
8404 break;
8407 output_asm_insn (instr, & operand);
8410 return "";
8413 /* Write the function name into the code section, directly preceding
8414 the function prologue.
8416 Code will be output similar to this:
8418 .ascii "arm_poke_function_name", 0
8419 .align
8421 .word 0xff000000 + (t1 - t0)
8422 arm_poke_function_name
8423 mov ip, sp
8424 stmfd sp!, {fp, ip, lr, pc}
8425 sub fp, ip, #4
8427 When performing a stack backtrace, code can inspect the value
8428 of 'pc' stored at 'fp' + 0. If the trace function then looks
8429 at location pc - 12 and the top 8 bits are set, then we know
8430 that there is a function name embedded immediately preceding this
8431 location and has length ((pc[-3]) & 0xff000000).
8433 We assume that pc is declared as a pointer to an unsigned long.
8435 It is of no benefit to output the function name if we are assembling
8436 a leaf function. These function types will not contain a stack
8437 backtrace structure, therefore it is not possible to determine the
8438 function name. */
8439 void
8440 arm_poke_function_name (FILE *stream, const char *name)
8442 unsigned long alignlength;
8443 unsigned long length;
8444 rtx x;
8446 length = strlen (name) + 1;
8447 alignlength = ROUND_UP_WORD (length);
8449 ASM_OUTPUT_ASCII (stream, name, length);
8450 ASM_OUTPUT_ALIGN (stream, 2);
8451 x = GEN_INT ((unsigned HOST_WIDE_INT) 0xff000000 + alignlength);
8452 assemble_aligned_integer (UNITS_PER_WORD, x);
8455 /* Place some comments into the assembler stream
8456 describing the current function. */
8457 static void
8458 arm_output_function_prologue (FILE *f, HOST_WIDE_INT frame_size)
8460 unsigned long func_type;
8462 if (!TARGET_ARM)
8464 thumb_output_function_prologue (f, frame_size);
8465 return;
8468 /* Sanity check. */
8469 if (arm_ccfsm_state || arm_target_insn)
8470 abort ();
8472 func_type = arm_current_func_type ();
8474 switch ((int) ARM_FUNC_TYPE (func_type))
8476 default:
8477 case ARM_FT_NORMAL:
8478 break;
8479 case ARM_FT_INTERWORKED:
8480 asm_fprintf (f, "\t%@ Function supports interworking.\n");
8481 break;
8482 case ARM_FT_EXCEPTION_HANDLER:
8483 asm_fprintf (f, "\t%@ C++ Exception Handler.\n");
8484 break;
8485 case ARM_FT_ISR:
8486 asm_fprintf (f, "\t%@ Interrupt Service Routine.\n");
8487 break;
8488 case ARM_FT_FIQ:
8489 asm_fprintf (f, "\t%@ Fast Interrupt Service Routine.\n");
8490 break;
8491 case ARM_FT_EXCEPTION:
8492 asm_fprintf (f, "\t%@ ARM Exception Handler.\n");
8493 break;
8496 if (IS_NAKED (func_type))
8497 asm_fprintf (f, "\t%@ Naked Function: prologue and epilogue provided by programmer.\n");
8499 if (IS_VOLATILE (func_type))
8500 asm_fprintf (f, "\t%@ Volatile: function does not return.\n");
8502 if (IS_NESTED (func_type))
8503 asm_fprintf (f, "\t%@ Nested: function declared inside another function.\n");
8505 asm_fprintf (f, "\t%@ args = %d, pretend = %d, frame = %wd\n",
8506 current_function_args_size,
8507 current_function_pretend_args_size, frame_size);
8509 asm_fprintf (f, "\t%@ frame_needed = %d, uses_anonymous_args = %d\n",
8510 frame_pointer_needed,
8511 cfun->machine->uses_anonymous_args);
8513 if (cfun->machine->lr_save_eliminated)
8514 asm_fprintf (f, "\t%@ link register save eliminated.\n");
8516 #ifdef AOF_ASSEMBLER
8517 if (flag_pic)
8518 asm_fprintf (f, "\tmov\t%r, %r\n", IP_REGNUM, PIC_OFFSET_TABLE_REGNUM);
8519 #endif
8521 return_used_this_function = 0;
8524 const char *
8525 arm_output_epilogue (rtx sibling)
8527 int reg;
8528 unsigned long saved_regs_mask;
8529 unsigned long func_type;
8530 /* Floats_offset is the offset from the "virtual" frame. In an APCS
8531 frame that is $fp + 4 for a non-variadic function. */
8532 int floats_offset = 0;
8533 rtx operands[3];
8534 int frame_size = arm_get_frame_size ();
8535 FILE * f = asm_out_file;
8536 rtx eh_ofs = cfun->machine->eh_epilogue_sp_ofs;
8537 unsigned int lrm_count = 0;
8538 int really_return = (sibling == NULL);
8540 /* If we have already generated the return instruction
8541 then it is futile to generate anything else. */
8542 if (use_return_insn (FALSE, sibling) && return_used_this_function)
8543 return "";
8545 func_type = arm_current_func_type ();
8547 if (IS_NAKED (func_type))
8548 /* Naked functions don't have epilogues. */
8549 return "";
8551 if (IS_VOLATILE (func_type) && TARGET_ABORT_NORETURN)
8553 rtx op;
8555 /* A volatile function should never return. Call abort. */
8556 op = gen_rtx_SYMBOL_REF (Pmode, NEED_PLT_RELOC ? "abort(PLT)" : "abort");
8557 assemble_external_libcall (op);
8558 output_asm_insn ("bl\t%a0", &op);
8560 return "";
8563 if (ARM_FUNC_TYPE (func_type) == ARM_FT_EXCEPTION_HANDLER
8564 && ! really_return)
8565 /* If we are throwing an exception, then we really must
8566 be doing a return, so we can't tail-call. */
8567 abort ();
8569 saved_regs_mask = arm_compute_save_reg_mask ();
8571 if (TARGET_IWMMXT)
8572 lrm_count = bit_count (saved_regs_mask);
8574 /* XXX We should adjust floats_offset for any anonymous args, and then
8575 re-adjust vfp_offset below to compensate. */
8577 /* Compute how far away the floats will be. */
8578 for (reg = 0; reg <= LAST_ARM_REGNUM; reg++)
8579 if (saved_regs_mask & (1 << reg))
8580 floats_offset += 4;
8582 if (frame_pointer_needed)
8584 int vfp_offset = 4;
8586 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
8588 for (reg = LAST_ARM_FP_REGNUM; reg >= FIRST_ARM_FP_REGNUM; reg--)
8589 if (regs_ever_live[reg] && !call_used_regs[reg])
8591 floats_offset += 12;
8592 asm_fprintf (f, "\tldfe\t%r, [%r, #-%d]\n",
8593 reg, FP_REGNUM, floats_offset - vfp_offset);
8596 else
8598 int start_reg = LAST_ARM_FP_REGNUM;
8600 for (reg = LAST_ARM_FP_REGNUM; reg >= FIRST_ARM_FP_REGNUM; reg--)
8602 if (regs_ever_live[reg] && !call_used_regs[reg])
8604 floats_offset += 12;
8606 /* We can't unstack more than four registers at once. */
8607 if (start_reg - reg == 3)
8609 asm_fprintf (f, "\tlfm\t%r, 4, [%r, #-%d]\n",
8610 reg, FP_REGNUM, floats_offset - vfp_offset);
8611 start_reg = reg - 1;
8614 else
8616 if (reg != start_reg)
8617 asm_fprintf (f, "\tlfm\t%r, %d, [%r, #-%d]\n",
8618 reg + 1, start_reg - reg,
8619 FP_REGNUM, floats_offset - vfp_offset);
8620 start_reg = reg - 1;
8624 /* Just in case the last register checked also needs unstacking. */
8625 if (reg != start_reg)
8626 asm_fprintf (f, "\tlfm\t%r, %d, [%r, #-%d]\n",
8627 reg + 1, start_reg - reg,
8628 FP_REGNUM, floats_offset - vfp_offset);
8631 if (TARGET_IWMMXT)
8633 /* The frame pointer is guaranteed to be non-double-word aligned.
8634 This is because it is set to (old_stack_pointer - 4) and the
8635 old_stack_pointer was double word aligned. Thus the offset to
8636 the iWMMXt registers to be loaded must also be non-double-word
8637 sized, so that the resultant address *is* double-word aligned.
8638 We can ignore floats_offset since that was already included in
8639 the live_regs_mask. */
8640 lrm_count += (lrm_count % 2 ? 2 : 1);
8642 for (reg = FIRST_IWMMXT_REGNUM; reg <= LAST_IWMMXT_REGNUM; reg++)
8643 if (regs_ever_live[reg] && !call_used_regs[reg])
8645 asm_fprintf (f, "\twldrd\t%r, [%r, #-%d]\n",
8646 reg, FP_REGNUM, lrm_count * 4);
8647 lrm_count += 2;
8651 /* saved_regs_mask should contain the IP, which at the time of stack
8652 frame generation actually contains the old stack pointer. So a
8653 quick way to unwind the stack is just pop the IP register directly
8654 into the stack pointer. */
8655 if ((saved_regs_mask & (1 << IP_REGNUM)) == 0)
8656 abort ();
8657 saved_regs_mask &= ~ (1 << IP_REGNUM);
8658 saved_regs_mask |= (1 << SP_REGNUM);
8660 /* There are two registers left in saved_regs_mask - LR and PC. We
8661 only need to restore the LR register (the return address), but to
8662 save time we can load it directly into the PC, unless we need a
8663 special function exit sequence, or we are not really returning. */
8664 if (really_return && ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL)
8665 /* Delete the LR from the register mask, so that the LR on
8666 the stack is loaded into the PC in the register mask. */
8667 saved_regs_mask &= ~ (1 << LR_REGNUM);
8668 else
8669 saved_regs_mask &= ~ (1 << PC_REGNUM);
8671 /* We must use SP as the base register, because SP is one of the
8672 registers being restored. If an interrupt or page fault
8673 happens in the ldm instruction, the SP might or might not
8674 have been restored. That would be bad, as then SP will no
8675 longer indicate the safe area of stack, and we can get stack
8676 corruption. Using SP as the base register means that it will
8677 be reset correctly to the original value, should an interrupt
8678 occur. If the stack pointer already points at the right
8679 place, then omit the subtraction. */
8680 if (((frame_size + current_function_outgoing_args_size + floats_offset)
8681 != 4 * (1 + (int) bit_count (saved_regs_mask)))
8682 || current_function_calls_alloca)
8683 asm_fprintf (f, "\tsub\t%r, %r, #%d\n", SP_REGNUM, FP_REGNUM,
8684 4 * bit_count (saved_regs_mask));
8685 print_multi_reg (f, "ldmfd\t%r", SP_REGNUM, saved_regs_mask);
8687 if (IS_INTERRUPT (func_type))
8688 /* Interrupt handlers will have pushed the
8689 IP onto the stack, so restore it now. */
8690 print_multi_reg (f, "ldmfd\t%r!", SP_REGNUM, 1 << IP_REGNUM);
8692 else
8694 /* Restore stack pointer if necessary. */
8695 if (frame_size + current_function_outgoing_args_size != 0)
8697 operands[0] = operands[1] = stack_pointer_rtx;
8698 operands[2] = GEN_INT (frame_size
8699 + current_function_outgoing_args_size);
8700 output_add_immediate (operands);
8703 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
8705 for (reg = FIRST_ARM_FP_REGNUM; reg <= LAST_ARM_FP_REGNUM; reg++)
8706 if (regs_ever_live[reg] && !call_used_regs[reg])
8707 asm_fprintf (f, "\tldfe\t%r, [%r], #12\n",
8708 reg, SP_REGNUM);
8710 else
8712 int start_reg = FIRST_ARM_FP_REGNUM;
8714 for (reg = FIRST_ARM_FP_REGNUM; reg <= LAST_ARM_FP_REGNUM; reg++)
8716 if (regs_ever_live[reg] && !call_used_regs[reg])
8718 if (reg - start_reg == 3)
8720 asm_fprintf (f, "\tlfmfd\t%r, 4, [%r]!\n",
8721 start_reg, SP_REGNUM);
8722 start_reg = reg + 1;
8725 else
8727 if (reg != start_reg)
8728 asm_fprintf (f, "\tlfmfd\t%r, %d, [%r]!\n",
8729 start_reg, reg - start_reg,
8730 SP_REGNUM);
8732 start_reg = reg + 1;
8736 /* Just in case the last register checked also needs unstacking. */
8737 if (reg != start_reg)
8738 asm_fprintf (f, "\tlfmfd\t%r, %d, [%r]!\n",
8739 start_reg, reg - start_reg, SP_REGNUM);
8742 if (TARGET_IWMMXT)
8743 for (reg = FIRST_IWMMXT_REGNUM; reg <= LAST_IWMMXT_REGNUM; reg++)
8744 if (regs_ever_live[reg] && !call_used_regs[reg])
8745 asm_fprintf (f, "\twldrd\t%r, [%r, #+8]!\n", reg, SP_REGNUM);
8747 /* If we can, restore the LR into the PC. */
8748 if (ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
8749 && really_return
8750 && current_function_pretend_args_size == 0
8751 && saved_regs_mask & (1 << LR_REGNUM))
8753 saved_regs_mask &= ~ (1 << LR_REGNUM);
8754 saved_regs_mask |= (1 << PC_REGNUM);
8757 /* Load the registers off the stack. If we only have one register
8758 to load use the LDR instruction - it is faster. */
8759 if (saved_regs_mask == (1 << LR_REGNUM))
8761 /* The exception handler ignores the LR, so we do
8762 not really need to load it off the stack. */
8763 if (eh_ofs)
8764 asm_fprintf (f, "\tadd\t%r, %r, #4\n", SP_REGNUM, SP_REGNUM);
8765 else
8766 asm_fprintf (f, "\tldr\t%r, [%r], #4\n", LR_REGNUM, SP_REGNUM);
8768 else if (saved_regs_mask)
8770 if (saved_regs_mask & (1 << SP_REGNUM))
8771 /* Note - write back to the stack register is not enabled
8772 (ie "ldmfd sp!..."). We know that the stack pointer is
8773 in the list of registers and if we add writeback the
8774 instruction becomes UNPREDICTABLE. */
8775 print_multi_reg (f, "ldmfd\t%r", SP_REGNUM, saved_regs_mask);
8776 else
8777 print_multi_reg (f, "ldmfd\t%r!", SP_REGNUM, saved_regs_mask);
8780 if (current_function_pretend_args_size)
8782 /* Unwind the pre-pushed regs. */
8783 operands[0] = operands[1] = stack_pointer_rtx;
8784 operands[2] = GEN_INT (current_function_pretend_args_size);
8785 output_add_immediate (operands);
8789 if (! really_return
8790 || (ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
8791 && current_function_pretend_args_size == 0
8792 && saved_regs_mask & (1 << PC_REGNUM)))
8793 return "";
8795 /* Generate the return instruction. */
8796 switch ((int) ARM_FUNC_TYPE (func_type))
8798 case ARM_FT_EXCEPTION_HANDLER:
8799 /* Even in 26-bit mode we do a mov (rather than a movs)
8800 because we don't have the PSR bits set in the address. */
8801 asm_fprintf (f, "\tmov\t%r, %r\n", PC_REGNUM, EXCEPTION_LR_REGNUM);
8802 break;
8804 case ARM_FT_ISR:
8805 case ARM_FT_FIQ:
8806 asm_fprintf (f, "\tsubs\t%r, %r, #4\n", PC_REGNUM, LR_REGNUM);
8807 break;
8809 case ARM_FT_EXCEPTION:
8810 asm_fprintf (f, "\tmovs\t%r, %r\n", PC_REGNUM, LR_REGNUM);
8811 break;
8813 case ARM_FT_INTERWORKED:
8814 asm_fprintf (f, "\tbx\t%r\n", LR_REGNUM);
8815 break;
8817 default:
8818 if (frame_pointer_needed)
8819 /* If we used the frame pointer then the return address
8820 will have been loaded off the stack directly into the
8821 PC, so there is no need to issue a MOV instruction
8822 here. */
8824 else if (current_function_pretend_args_size == 0
8825 && (saved_regs_mask & (1 << LR_REGNUM)))
8826 /* Similarly we may have been able to load LR into the PC
8827 even if we did not create a stack frame. */
8829 else if (TARGET_APCS_32)
8830 asm_fprintf (f, "\tmov\t%r, %r\n", PC_REGNUM, LR_REGNUM);
8831 else
8832 asm_fprintf (f, "\tmovs\t%r, %r\n", PC_REGNUM, LR_REGNUM);
8833 break;
8836 return "";
8839 static void
8840 arm_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
8841 HOST_WIDE_INT frame_size)
8843 if (TARGET_THUMB)
8845 /* ??? Probably not safe to set this here, since it assumes that a
8846 function will be emitted as assembly immediately after we generate
8847 RTL for it. This does not happen for inline functions. */
8848 return_used_this_function = 0;
8850 else
8852 /* We need to take into account any stack-frame rounding. */
8853 frame_size = arm_get_frame_size ();
8855 if (use_return_insn (FALSE, NULL)
8856 && return_used_this_function
8857 && (frame_size + current_function_outgoing_args_size) != 0
8858 && !frame_pointer_needed)
8859 abort ();
8861 /* Reset the ARM-specific per-function variables. */
8862 after_arm_reorg = 0;
8866 /* Generate and emit an insn that we will recognize as a push_multi.
8867 Unfortunately, since this insn does not reflect very well the actual
8868 semantics of the operation, we need to annotate the insn for the benefit
8869 of DWARF2 frame unwind information. */
8870 static rtx
8871 emit_multi_reg_push (int mask)
8873 int num_regs = 0;
8874 int num_dwarf_regs;
8875 int i, j;
8876 rtx par;
8877 rtx dwarf;
8878 int dwarf_par_index;
8879 rtx tmp, reg;
8881 for (i = 0; i <= LAST_ARM_REGNUM; i++)
8882 if (mask & (1 << i))
8883 num_regs++;
8885 if (num_regs == 0 || num_regs > 16)
8886 abort ();
8888 /* We don't record the PC in the dwarf frame information. */
8889 num_dwarf_regs = num_regs;
8890 if (mask & (1 << PC_REGNUM))
8891 num_dwarf_regs--;
8893 /* For the body of the insn we are going to generate an UNSPEC in
8894 parallel with several USEs. This allows the insn to be recognized
8895 by the push_multi pattern in the arm.md file. The insn looks
8896 something like this:
8898 (parallel [
8899 (set (mem:BLK (pre_dec:BLK (reg:SI sp)))
8900 (unspec:BLK [(reg:SI r4)] UNSPEC_PUSH_MULT))
8901 (use (reg:SI 11 fp))
8902 (use (reg:SI 12 ip))
8903 (use (reg:SI 14 lr))
8904 (use (reg:SI 15 pc))
8907 For the frame note however, we try to be more explicit and actually
8908 show each register being stored into the stack frame, plus a (single)
8909 decrement of the stack pointer. We do it this way in order to be
8910 friendly to the stack unwinding code, which only wants to see a single
8911 stack decrement per instruction. The RTL we generate for the note looks
8912 something like this:
8914 (sequence [
8915 (set (reg:SI sp) (plus:SI (reg:SI sp) (const_int -20)))
8916 (set (mem:SI (reg:SI sp)) (reg:SI r4))
8917 (set (mem:SI (plus:SI (reg:SI sp) (const_int 4))) (reg:SI fp))
8918 (set (mem:SI (plus:SI (reg:SI sp) (const_int 8))) (reg:SI ip))
8919 (set (mem:SI (plus:SI (reg:SI sp) (const_int 12))) (reg:SI lr))
8922 This sequence is used both by the code to support stack unwinding for
8923 exceptions handlers and the code to generate dwarf2 frame debugging. */
8925 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (num_regs));
8926 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (num_dwarf_regs + 1));
8927 dwarf_par_index = 1;
8929 for (i = 0; i <= LAST_ARM_REGNUM; i++)
8931 if (mask & (1 << i))
8933 reg = gen_rtx_REG (SImode, i);
8935 XVECEXP (par, 0, 0)
8936 = gen_rtx_SET (VOIDmode,
8937 gen_rtx_MEM (BLKmode,
8938 gen_rtx_PRE_DEC (BLKmode,
8939 stack_pointer_rtx)),
8940 gen_rtx_UNSPEC (BLKmode,
8941 gen_rtvec (1, reg),
8942 UNSPEC_PUSH_MULT));
8944 if (i != PC_REGNUM)
8946 tmp = gen_rtx_SET (VOIDmode,
8947 gen_rtx_MEM (SImode, stack_pointer_rtx),
8948 reg);
8949 RTX_FRAME_RELATED_P (tmp) = 1;
8950 XVECEXP (dwarf, 0, dwarf_par_index) = tmp;
8951 dwarf_par_index++;
8954 break;
8958 for (j = 1, i++; j < num_regs; i++)
8960 if (mask & (1 << i))
8962 reg = gen_rtx_REG (SImode, i);
8964 XVECEXP (par, 0, j) = gen_rtx_USE (VOIDmode, reg);
8966 if (i != PC_REGNUM)
8968 tmp = gen_rtx_SET (VOIDmode,
8969 gen_rtx_MEM (SImode,
8970 plus_constant (stack_pointer_rtx,
8971 4 * j)),
8972 reg);
8973 RTX_FRAME_RELATED_P (tmp) = 1;
8974 XVECEXP (dwarf, 0, dwarf_par_index++) = tmp;
8977 j++;
8981 par = emit_insn (par);
8983 tmp = gen_rtx_SET (SImode,
8984 stack_pointer_rtx,
8985 gen_rtx_PLUS (SImode,
8986 stack_pointer_rtx,
8987 GEN_INT (-4 * num_regs)));
8988 RTX_FRAME_RELATED_P (tmp) = 1;
8989 XVECEXP (dwarf, 0, 0) = tmp;
8991 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
8992 REG_NOTES (par));
8993 return par;
8996 static rtx
8997 emit_sfm (int base_reg, int count)
8999 rtx par;
9000 rtx dwarf;
9001 rtx tmp, reg;
9002 int i;
9004 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
9005 dwarf = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
9007 reg = gen_rtx_REG (XFmode, base_reg++);
9009 XVECEXP (par, 0, 0)
9010 = gen_rtx_SET (VOIDmode,
9011 gen_rtx_MEM (BLKmode,
9012 gen_rtx_PRE_DEC (BLKmode, stack_pointer_rtx)),
9013 gen_rtx_UNSPEC (BLKmode,
9014 gen_rtvec (1, reg),
9015 UNSPEC_PUSH_MULT));
9017 = gen_rtx_SET (VOIDmode,
9018 gen_rtx_MEM (XFmode,
9019 gen_rtx_PRE_DEC (BLKmode, stack_pointer_rtx)),
9020 reg);
9021 RTX_FRAME_RELATED_P (tmp) = 1;
9022 XVECEXP (dwarf, 0, count - 1) = tmp;
9024 for (i = 1; i < count; i++)
9026 reg = gen_rtx_REG (XFmode, base_reg++);
9027 XVECEXP (par, 0, i) = gen_rtx_USE (VOIDmode, reg);
9029 tmp = gen_rtx_SET (VOIDmode,
9030 gen_rtx_MEM (XFmode,
9031 gen_rtx_PRE_DEC (BLKmode,
9032 stack_pointer_rtx)),
9033 reg);
9034 RTX_FRAME_RELATED_P (tmp) = 1;
9035 XVECEXP (dwarf, 0, count - i - 1) = tmp;
9038 par = emit_insn (par);
9039 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
9040 REG_NOTES (par));
9041 return par;
9044 /* Compute the distance from register FROM to register TO.
9045 These can be the arg pointer (26), the soft frame pointer (25),
9046 the stack pointer (13) or the hard frame pointer (11).
9047 Typical stack layout looks like this:
9049 old stack pointer -> | |
9050 ----
9051 | | \
9052 | | saved arguments for
9053 | | vararg functions
9054 | | /
9056 hard FP & arg pointer -> | | \
9057 | | stack
9058 | | frame
9059 | | /
9061 | | \
9062 | | call saved
9063 | | registers
9064 soft frame pointer -> | | /
9066 | | \
9067 | | local
9068 | | variables
9069 | | /
9071 | | \
9072 | | outgoing
9073 | | arguments
9074 current stack pointer -> | | /
9077 For a given function some or all of these stack components
9078 may not be needed, giving rise to the possibility of
9079 eliminating some of the registers.
9081 The values returned by this function must reflect the behavior
9082 of arm_expand_prologue() and arm_compute_save_reg_mask().
9084 The sign of the number returned reflects the direction of stack
9085 growth, so the values are positive for all eliminations except
9086 from the soft frame pointer to the hard frame pointer. */
9087 unsigned int
9088 arm_compute_initial_elimination_offset (unsigned int from, unsigned int to)
9090 unsigned int local_vars = arm_get_frame_size ();
9091 unsigned int outgoing_args = current_function_outgoing_args_size;
9092 unsigned int stack_frame;
9093 unsigned int call_saved_registers;
9094 unsigned long func_type;
9096 func_type = arm_current_func_type ();
9098 /* Volatile functions never return, so there is
9099 no need to save call saved registers. */
9100 call_saved_registers = 0;
9101 if (! IS_VOLATILE (func_type))
9103 unsigned int reg_mask;
9104 unsigned int reg;
9106 /* Make sure that we compute which registers will be saved
9107 on the stack using the same algorithm that is used by
9108 the prologue creation code. */
9109 reg_mask = arm_compute_save_reg_mask ();
9111 /* Now count the number of bits set in save_reg_mask.
9112 If we have already counted the registers in the stack
9113 frame, do not count them again. Non call-saved registers
9114 might be saved in the call-save area of the stack, if
9115 doing so will preserve the stack's alignment. Hence we
9116 must count them here. For each set bit we need 4 bytes
9117 of stack space. */
9118 if (frame_pointer_needed)
9119 reg_mask &= 0x07ff;
9120 call_saved_registers += 4 * bit_count (reg_mask);
9122 /* If the hard floating point registers are going to be
9123 used then they must be saved on the stack as well.
9124 Each register occupies 12 bytes of stack space. */
9125 for (reg = FIRST_ARM_FP_REGNUM; reg <= LAST_ARM_FP_REGNUM; reg++)
9126 if (regs_ever_live[reg] && ! call_used_regs[reg])
9127 call_saved_registers += 12;
9129 if (TARGET_REALLY_IWMMXT)
9130 /* Check for the call-saved iWMMXt registers. */
9131 for (reg = FIRST_IWMMXT_REGNUM; reg <= LAST_IWMMXT_REGNUM; reg++)
9132 if (regs_ever_live[reg] && ! call_used_regs [reg])
9133 call_saved_registers += 8;
9136 /* The stack frame contains 4 registers - the old frame pointer,
9137 the old stack pointer, the return address and PC of the start
9138 of the function. */
9139 stack_frame = frame_pointer_needed ? 16 : 0;
9141 /* OK, now we have enough information to compute the distances.
9142 There must be an entry in these switch tables for each pair
9143 of registers in ELIMINABLE_REGS, even if some of the entries
9144 seem to be redundant or useless. */
9145 switch (from)
9147 case ARG_POINTER_REGNUM:
9148 switch (to)
9150 case THUMB_HARD_FRAME_POINTER_REGNUM:
9151 return 0;
9153 case FRAME_POINTER_REGNUM:
9154 /* This is the reverse of the soft frame pointer
9155 to hard frame pointer elimination below. */
9156 if (call_saved_registers == 0 && stack_frame == 0)
9157 return 0;
9158 return (call_saved_registers + stack_frame - 4);
9160 case ARM_HARD_FRAME_POINTER_REGNUM:
9161 /* If there is no stack frame then the hard
9162 frame pointer and the arg pointer coincide. */
9163 if (stack_frame == 0 && call_saved_registers != 0)
9164 return 0;
9165 /* FIXME: Not sure about this. Maybe we should always return 0 ? */
9166 return (frame_pointer_needed
9167 && current_function_needs_context
9168 && ! cfun->machine->uses_anonymous_args) ? 4 : 0;
9170 case STACK_POINTER_REGNUM:
9171 /* If nothing has been pushed on the stack at all
9172 then this will return -4. This *is* correct! */
9173 return call_saved_registers + stack_frame + local_vars + outgoing_args - 4;
9175 default:
9176 abort ();
9178 break;
9180 case FRAME_POINTER_REGNUM:
9181 switch (to)
9183 case THUMB_HARD_FRAME_POINTER_REGNUM:
9184 return 0;
9186 case ARM_HARD_FRAME_POINTER_REGNUM:
9187 /* The hard frame pointer points to the top entry in the
9188 stack frame. The soft frame pointer to the bottom entry
9189 in the stack frame. If there is no stack frame at all,
9190 then they are identical. */
9191 if (call_saved_registers == 0 && stack_frame == 0)
9192 return 0;
9193 return - (call_saved_registers + stack_frame - 4);
9195 case STACK_POINTER_REGNUM:
9196 return local_vars + outgoing_args;
9198 default:
9199 abort ();
9201 break;
9203 default:
9204 /* You cannot eliminate from the stack pointer.
9205 In theory you could eliminate from the hard frame
9206 pointer to the stack pointer, but this will never
9207 happen, since if a stack frame is not needed the
9208 hard frame pointer will never be used. */
9209 abort ();
9213 /* Calculate the size of the stack frame, taking into account any
9214 padding that is required to ensure stack-alignment. */
9215 HOST_WIDE_INT
9216 arm_get_frame_size (void)
9218 int regno;
9220 int base_size = ROUND_UP_WORD (get_frame_size ());
9221 int entry_size = 0;
9222 unsigned long func_type = arm_current_func_type ();
9223 int leaf;
9225 if (! TARGET_ARM)
9226 abort();
9228 if (! TARGET_ATPCS)
9229 return base_size;
9231 /* We need to know if we are a leaf function. Unfortunately, it
9232 is possible to be called after start_sequence has been called,
9233 which causes get_insns to return the insns for the sequence,
9234 not the function, which will cause leaf_function_p to return
9235 the incorrect result.
9237 To work around this, we cache the computed frame size. This
9238 works because we will only be calling RTL expanders that need
9239 to know about leaf functions once reload has completed, and the
9240 frame size cannot be changed after that time, so we can safely
9241 use the cached value. */
9243 if (reload_completed)
9244 return cfun->machine->frame_size;
9246 leaf = leaf_function_p ();
9248 /* A leaf function does not need any stack alignment if it has nothing
9249 on the stack. */
9250 if (leaf && base_size == 0)
9252 cfun->machine->frame_size = 0;
9253 return 0;
9256 /* We know that SP will be word aligned on entry, and we must
9257 preserve that condition at any subroutine call. But those are
9258 the only constraints. */
9260 /* Space for variadic functions. */
9261 if (current_function_pretend_args_size)
9262 entry_size += current_function_pretend_args_size;
9264 /* Space for saved registers. */
9265 entry_size += bit_count (arm_compute_save_reg_mask ()) * 4;
9267 /* Space for saved FPA registers. */
9268 if (! IS_VOLATILE (func_type))
9270 for (regno = FIRST_ARM_FP_REGNUM; regno <= LAST_ARM_FP_REGNUM; regno++)
9271 if (regs_ever_live[regno] && ! call_used_regs[regno])
9272 entry_size += 12;
9275 if (TARGET_REALLY_IWMMXT)
9277 /* Check for the call-saved iWMMXt registers. */
9278 for (regno = FIRST_IWMMXT_REGNUM; regno <= LAST_IWMMXT_REGNUM; regno++)
9279 if (regs_ever_live [regno] && ! call_used_regs [regno])
9280 entry_size += 8;
9283 if ((entry_size + base_size + current_function_outgoing_args_size) & 7)
9284 base_size += 4;
9285 if ((entry_size + base_size + current_function_outgoing_args_size) & 7)
9286 abort ();
9288 cfun->machine->frame_size = base_size;
9290 return base_size;
9293 /* Generate the prologue instructions for entry into an ARM function. */
9294 void
9295 arm_expand_prologue (void)
9297 int reg;
9298 rtx amount;
9299 rtx insn;
9300 rtx ip_rtx;
9301 unsigned long live_regs_mask;
9302 unsigned long func_type;
9303 int fp_offset = 0;
9304 int saved_pretend_args = 0;
9305 unsigned int args_to_push;
9307 func_type = arm_current_func_type ();
9309 /* Naked functions don't have prologues. */
9310 if (IS_NAKED (func_type))
9311 return;
9313 /* Make a copy of c_f_p_a_s as we may need to modify it locally. */
9314 args_to_push = current_function_pretend_args_size;
9316 /* Compute which register we will have to save onto the stack. */
9317 live_regs_mask = arm_compute_save_reg_mask ();
9319 ip_rtx = gen_rtx_REG (SImode, IP_REGNUM);
9321 if (frame_pointer_needed)
9323 if (IS_INTERRUPT (func_type))
9325 /* Interrupt functions must not corrupt any registers.
9326 Creating a frame pointer however, corrupts the IP
9327 register, so we must push it first. */
9328 insn = emit_multi_reg_push (1 << IP_REGNUM);
9330 /* Do not set RTX_FRAME_RELATED_P on this insn.
9331 The dwarf stack unwinding code only wants to see one
9332 stack decrement per function, and this is not it. If
9333 this instruction is labeled as being part of the frame
9334 creation sequence then dwarf2out_frame_debug_expr will
9335 abort when it encounters the assignment of IP to FP
9336 later on, since the use of SP here establishes SP as
9337 the CFA register and not IP.
9339 Anyway this instruction is not really part of the stack
9340 frame creation although it is part of the prologue. */
9342 else if (IS_NESTED (func_type))
9344 /* The Static chain register is the same as the IP register
9345 used as a scratch register during stack frame creation.
9346 To get around this need to find somewhere to store IP
9347 whilst the frame is being created. We try the following
9348 places in order:
9350 1. The last argument register.
9351 2. A slot on the stack above the frame. (This only
9352 works if the function is not a varargs function).
9353 3. Register r3, after pushing the argument registers
9354 onto the stack.
9356 Note - we only need to tell the dwarf2 backend about the SP
9357 adjustment in the second variant; the static chain register
9358 doesn't need to be unwound, as it doesn't contain a value
9359 inherited from the caller. */
9361 if (regs_ever_live[3] == 0)
9363 insn = gen_rtx_REG (SImode, 3);
9364 insn = gen_rtx_SET (SImode, insn, ip_rtx);
9365 insn = emit_insn (insn);
9367 else if (args_to_push == 0)
9369 rtx dwarf;
9370 insn = gen_rtx_PRE_DEC (SImode, stack_pointer_rtx);
9371 insn = gen_rtx_MEM (SImode, insn);
9372 insn = gen_rtx_SET (VOIDmode, insn, ip_rtx);
9373 insn = emit_insn (insn);
9375 fp_offset = 4;
9377 /* Just tell the dwarf backend that we adjusted SP. */
9378 dwarf = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
9379 gen_rtx_PLUS (SImode, stack_pointer_rtx,
9380 GEN_INT (-fp_offset)));
9381 RTX_FRAME_RELATED_P (insn) = 1;
9382 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
9383 dwarf, REG_NOTES (insn));
9385 else
9387 /* Store the args on the stack. */
9388 if (cfun->machine->uses_anonymous_args)
9389 insn = emit_multi_reg_push
9390 ((0xf0 >> (args_to_push / 4)) & 0xf);
9391 else
9392 insn = emit_insn
9393 (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
9394 GEN_INT (- args_to_push)));
9396 RTX_FRAME_RELATED_P (insn) = 1;
9398 saved_pretend_args = 1;
9399 fp_offset = args_to_push;
9400 args_to_push = 0;
9402 /* Now reuse r3 to preserve IP. */
9403 insn = gen_rtx_REG (SImode, 3);
9404 insn = gen_rtx_SET (SImode, insn, ip_rtx);
9405 (void) emit_insn (insn);
9409 if (fp_offset)
9411 insn = gen_rtx_PLUS (SImode, stack_pointer_rtx, GEN_INT (fp_offset));
9412 insn = gen_rtx_SET (SImode, ip_rtx, insn);
9414 else
9415 insn = gen_movsi (ip_rtx, stack_pointer_rtx);
9417 insn = emit_insn (insn);
9418 RTX_FRAME_RELATED_P (insn) = 1;
9421 if (args_to_push)
9423 /* Push the argument registers, or reserve space for them. */
9424 if (cfun->machine->uses_anonymous_args)
9425 insn = emit_multi_reg_push
9426 ((0xf0 >> (args_to_push / 4)) & 0xf);
9427 else
9428 insn = emit_insn
9429 (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
9430 GEN_INT (- args_to_push)));
9431 RTX_FRAME_RELATED_P (insn) = 1;
9434 /* If this is an interrupt service routine, and the link register
9435 is going to be pushed, and we are not creating a stack frame,
9436 (which would involve an extra push of IP and a pop in the epilogue)
9437 subtracting four from LR now will mean that the function return
9438 can be done with a single instruction. */
9439 if ((func_type == ARM_FT_ISR || func_type == ARM_FT_FIQ)
9440 && (live_regs_mask & (1 << LR_REGNUM)) != 0
9441 && ! frame_pointer_needed)
9442 emit_insn (gen_rtx_SET (SImode,
9443 gen_rtx_REG (SImode, LR_REGNUM),
9444 gen_rtx_PLUS (SImode,
9445 gen_rtx_REG (SImode, LR_REGNUM),
9446 GEN_INT (-4))));
9448 if (live_regs_mask)
9450 insn = emit_multi_reg_push (live_regs_mask);
9451 RTX_FRAME_RELATED_P (insn) = 1;
9454 if (TARGET_IWMMXT)
9455 for (reg = FIRST_IWMMXT_REGNUM; reg <= LAST_IWMMXT_REGNUM; reg++)
9456 if (regs_ever_live[reg] && ! call_used_regs [reg])
9458 insn = gen_rtx_PRE_DEC (V2SImode, stack_pointer_rtx);
9459 insn = gen_rtx_MEM (V2SImode, insn);
9460 insn = emit_insn (gen_rtx_SET (VOIDmode, insn,
9461 gen_rtx_REG (V2SImode, reg)));
9462 RTX_FRAME_RELATED_P (insn) = 1;
9465 if (! IS_VOLATILE (func_type))
9467 /* Save any floating point call-saved registers used by this
9468 function. */
9469 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
9471 for (reg = LAST_ARM_FP_REGNUM; reg >= FIRST_ARM_FP_REGNUM; reg--)
9472 if (regs_ever_live[reg] && !call_used_regs[reg])
9474 insn = gen_rtx_PRE_DEC (XFmode, stack_pointer_rtx);
9475 insn = gen_rtx_MEM (XFmode, insn);
9476 insn = emit_insn (gen_rtx_SET (VOIDmode, insn,
9477 gen_rtx_REG (XFmode, reg)));
9478 RTX_FRAME_RELATED_P (insn) = 1;
9481 else
9483 int start_reg = LAST_ARM_FP_REGNUM;
9485 for (reg = LAST_ARM_FP_REGNUM; reg >= FIRST_ARM_FP_REGNUM; reg--)
9487 if (regs_ever_live[reg] && !call_used_regs[reg])
9489 if (start_reg - reg == 3)
9491 insn = emit_sfm (reg, 4);
9492 RTX_FRAME_RELATED_P (insn) = 1;
9493 start_reg = reg - 1;
9496 else
9498 if (start_reg != reg)
9500 insn = emit_sfm (reg + 1, start_reg - reg);
9501 RTX_FRAME_RELATED_P (insn) = 1;
9503 start_reg = reg - 1;
9507 if (start_reg != reg)
9509 insn = emit_sfm (reg + 1, start_reg - reg);
9510 RTX_FRAME_RELATED_P (insn) = 1;
9515 if (frame_pointer_needed)
9517 /* Create the new frame pointer. */
9518 insn = GEN_INT (-(4 + args_to_push + fp_offset));
9519 insn = emit_insn (gen_addsi3 (hard_frame_pointer_rtx, ip_rtx, insn));
9520 RTX_FRAME_RELATED_P (insn) = 1;
9522 if (IS_NESTED (func_type))
9524 /* Recover the static chain register. */
9525 if (regs_ever_live [3] == 0
9526 || saved_pretend_args)
9527 insn = gen_rtx_REG (SImode, 3);
9528 else /* if (current_function_pretend_args_size == 0) */
9530 insn = gen_rtx_PLUS (SImode, hard_frame_pointer_rtx,
9531 GEN_INT (4));
9532 insn = gen_rtx_MEM (SImode, insn);
9535 emit_insn (gen_rtx_SET (SImode, ip_rtx, insn));
9536 /* Add a USE to stop propagate_one_insn() from barfing. */
9537 emit_insn (gen_prologue_use (ip_rtx));
9541 amount = GEN_INT (-(arm_get_frame_size ()
9542 + current_function_outgoing_args_size));
9544 if (amount != const0_rtx)
9546 /* This add can produce multiple insns for a large constant, so we
9547 need to get tricky. */
9548 rtx last = get_last_insn ();
9549 insn = emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
9550 amount));
9553 last = last ? NEXT_INSN (last) : get_insns ();
9554 RTX_FRAME_RELATED_P (last) = 1;
9556 while (last != insn);
9558 /* If the frame pointer is needed, emit a special barrier that
9559 will prevent the scheduler from moving stores to the frame
9560 before the stack adjustment. */
9561 if (frame_pointer_needed)
9562 insn = emit_insn (gen_stack_tie (stack_pointer_rtx,
9563 hard_frame_pointer_rtx));
9566 /* If we are profiling, make sure no instructions are scheduled before
9567 the call to mcount. Similarly if the user has requested no
9568 scheduling in the prolog. */
9569 if (current_function_profile || TARGET_NO_SCHED_PRO)
9570 emit_insn (gen_blockage ());
9572 /* If the link register is being kept alive, with the return address in it,
9573 then make sure that it does not get reused by the ce2 pass. */
9574 if ((live_regs_mask & (1 << LR_REGNUM)) == 0)
9576 emit_insn (gen_prologue_use (gen_rtx_REG (SImode, LR_REGNUM)));
9577 cfun->machine->lr_save_eliminated = 1;
9581 /* If CODE is 'd', then the X is a condition operand and the instruction
9582 should only be executed if the condition is true.
9583 if CODE is 'D', then the X is a condition operand and the instruction
9584 should only be executed if the condition is false: however, if the mode
9585 of the comparison is CCFPEmode, then always execute the instruction -- we
9586 do this because in these circumstances !GE does not necessarily imply LT;
9587 in these cases the instruction pattern will take care to make sure that
9588 an instruction containing %d will follow, thereby undoing the effects of
9589 doing this instruction unconditionally.
9590 If CODE is 'N' then X is a floating point operand that must be negated
9591 before output.
9592 If CODE is 'B' then output a bitwise inverted value of X (a const int).
9593 If X is a REG and CODE is `M', output a ldm/stm style multi-reg. */
9594 void
9595 arm_print_operand (FILE *stream, rtx x, int code)
9597 switch (code)
9599 case '@':
9600 fputs (ASM_COMMENT_START, stream);
9601 return;
9603 case '_':
9604 fputs (user_label_prefix, stream);
9605 return;
9607 case '|':
9608 fputs (REGISTER_PREFIX, stream);
9609 return;
9611 case '?':
9612 if (arm_ccfsm_state == 3 || arm_ccfsm_state == 4)
9614 if (TARGET_THUMB || current_insn_predicate != NULL)
9615 abort ();
9617 fputs (arm_condition_codes[arm_current_cc], stream);
9619 else if (current_insn_predicate)
9621 enum arm_cond_code code;
9623 if (TARGET_THUMB)
9624 abort ();
9626 code = get_arm_condition_code (current_insn_predicate);
9627 fputs (arm_condition_codes[code], stream);
9629 return;
9631 case 'N':
9633 REAL_VALUE_TYPE r;
9634 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
9635 r = REAL_VALUE_NEGATE (r);
9636 fprintf (stream, "%s", fp_const_from_val (&r));
9638 return;
9640 case 'B':
9641 if (GET_CODE (x) == CONST_INT)
9643 HOST_WIDE_INT val;
9644 val = ARM_SIGN_EXTEND (~INTVAL (x));
9645 fprintf (stream, HOST_WIDE_INT_PRINT_DEC, val);
9647 else
9649 putc ('~', stream);
9650 output_addr_const (stream, x);
9652 return;
9654 case 'i':
9655 fprintf (stream, "%s", arithmetic_instr (x, 1));
9656 return;
9658 /* Truncate Cirrus shift counts. */
9659 case 's':
9660 if (GET_CODE (x) == CONST_INT)
9662 fprintf (stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0x3f);
9663 return;
9665 arm_print_operand (stream, x, 0);
9666 return;
9668 case 'I':
9669 fprintf (stream, "%s", arithmetic_instr (x, 0));
9670 return;
9672 case 'S':
9674 HOST_WIDE_INT val;
9675 const char * shift = shift_op (x, &val);
9677 if (shift)
9679 fprintf (stream, ", %s ", shift_op (x, &val));
9680 if (val == -1)
9681 arm_print_operand (stream, XEXP (x, 1), 0);
9682 else
9683 fprintf (stream, "#" HOST_WIDE_INT_PRINT_DEC, val);
9686 return;
9688 /* An explanation of the 'Q', 'R' and 'H' register operands:
9690 In a pair of registers containing a DI or DF value the 'Q'
9691 operand returns the register number of the register containing
9692 the least significant part of the value. The 'R' operand returns
9693 the register number of the register containing the most
9694 significant part of the value.
9696 The 'H' operand returns the higher of the two register numbers.
9697 On a run where WORDS_BIG_ENDIAN is true the 'H' operand is the
9698 same as the 'Q' operand, since the most significant part of the
9699 value is held in the lower number register. The reverse is true
9700 on systems where WORDS_BIG_ENDIAN is false.
9702 The purpose of these operands is to distinguish between cases
9703 where the endian-ness of the values is important (for example
9704 when they are added together), and cases where the endian-ness
9705 is irrelevant, but the order of register operations is important.
9706 For example when loading a value from memory into a register
9707 pair, the endian-ness does not matter. Provided that the value
9708 from the lower memory address is put into the lower numbered
9709 register, and the value from the higher address is put into the
9710 higher numbered register, the load will work regardless of whether
9711 the value being loaded is big-wordian or little-wordian. The
9712 order of the two register loads can matter however, if the address
9713 of the memory location is actually held in one of the registers
9714 being overwritten by the load. */
9715 case 'Q':
9716 if (REGNO (x) > LAST_ARM_REGNUM)
9717 abort ();
9718 asm_fprintf (stream, "%r", REGNO (x) + (WORDS_BIG_ENDIAN ? 1 : 0));
9719 return;
9721 case 'R':
9722 if (REGNO (x) > LAST_ARM_REGNUM)
9723 abort ();
9724 asm_fprintf (stream, "%r", REGNO (x) + (WORDS_BIG_ENDIAN ? 0 : 1));
9725 return;
9727 case 'H':
9728 if (REGNO (x) > LAST_ARM_REGNUM)
9729 abort ();
9730 asm_fprintf (stream, "%r", REGNO (x) + 1);
9731 return;
9733 case 'm':
9734 asm_fprintf (stream, "%r",
9735 GET_CODE (XEXP (x, 0)) == REG
9736 ? REGNO (XEXP (x, 0)) : REGNO (XEXP (XEXP (x, 0), 0)));
9737 return;
9739 case 'M':
9740 asm_fprintf (stream, "{%r-%r}",
9741 REGNO (x),
9742 REGNO (x) + ARM_NUM_REGS (GET_MODE (x)) - 1);
9743 return;
9745 case 'd':
9746 /* CONST_TRUE_RTX means always -- that's the default. */
9747 if (x == const_true_rtx)
9748 return;
9750 fputs (arm_condition_codes[get_arm_condition_code (x)],
9751 stream);
9752 return;
9754 case 'D':
9755 /* CONST_TRUE_RTX means not always -- ie never. We shouldn't ever
9756 want to do that. */
9757 if (x == const_true_rtx)
9758 abort ();
9760 fputs (arm_condition_codes[ARM_INVERSE_CONDITION_CODE
9761 (get_arm_condition_code (x))],
9762 stream);
9763 return;
9765 /* Cirrus registers can be accessed in a variety of ways:
9766 single floating point (f)
9767 double floating point (d)
9768 32bit integer (fx)
9769 64bit integer (dx). */
9770 case 'W': /* Cirrus register in F mode. */
9771 case 'X': /* Cirrus register in D mode. */
9772 case 'Y': /* Cirrus register in FX mode. */
9773 case 'Z': /* Cirrus register in DX mode. */
9774 if (GET_CODE (x) != REG || REGNO_REG_CLASS (REGNO (x)) != CIRRUS_REGS)
9775 abort ();
9777 fprintf (stream, "mv%s%s",
9778 code == 'W' ? "f"
9779 : code == 'X' ? "d"
9780 : code == 'Y' ? "fx" : "dx", reg_names[REGNO (x)] + 2);
9782 return;
9784 /* Print cirrus register in the mode specified by the register's mode. */
9785 case 'V':
9787 int mode = GET_MODE (x);
9789 if (GET_CODE (x) != REG || REGNO_REG_CLASS (REGNO (x)) != CIRRUS_REGS)
9790 abort ();
9792 fprintf (stream, "mv%s%s",
9793 mode == DFmode ? "d"
9794 : mode == SImode ? "fx"
9795 : mode == DImode ? "dx"
9796 : "f", reg_names[REGNO (x)] + 2);
9798 return;
9801 case 'U':
9802 if (GET_CODE (x) != REG
9803 || REGNO (x) < FIRST_IWMMXT_GR_REGNUM
9804 || REGNO (x) > LAST_IWMMXT_GR_REGNUM)
9805 /* Bad value for wCG register number. */
9806 abort ();
9807 else
9808 fprintf (stream, "%d", REGNO (x) - FIRST_IWMMXT_GR_REGNUM);
9809 return;
9811 /* Print an iWMMXt control register name. */
9812 case 'w':
9813 if (GET_CODE (x) != CONST_INT
9814 || INTVAL (x) < 0
9815 || INTVAL (x) >= 16)
9816 /* Bad value for wC register number. */
9817 abort ();
9818 else
9820 static const char * wc_reg_names [16] =
9822 "wCID", "wCon", "wCSSF", "wCASF",
9823 "wC4", "wC5", "wC6", "wC7",
9824 "wCGR0", "wCGR1", "wCGR2", "wCGR3",
9825 "wC12", "wC13", "wC14", "wC15"
9828 fprintf (stream, wc_reg_names [INTVAL (x)]);
9830 return;
9832 default:
9833 if (x == 0)
9834 abort ();
9836 if (GET_CODE (x) == REG)
9837 asm_fprintf (stream, "%r", REGNO (x));
9838 else if (GET_CODE (x) == MEM)
9840 output_memory_reference_mode = GET_MODE (x);
9841 output_address (XEXP (x, 0));
9843 else if (GET_CODE (x) == CONST_DOUBLE)
9844 fprintf (stream, "#%s", fp_immediate_constant (x));
9845 else if (GET_CODE (x) == NEG)
9846 abort (); /* This should never happen now. */
9847 else
9849 fputc ('#', stream);
9850 output_addr_const (stream, x);
9855 #ifndef AOF_ASSEMBLER
9856 /* Target hook for assembling integer objects. The ARM version needs to
9857 handle word-sized values specially. */
9858 static bool
9859 arm_assemble_integer (rtx x, unsigned int size, int aligned_p)
9861 if (size == UNITS_PER_WORD && aligned_p)
9863 fputs ("\t.word\t", asm_out_file);
9864 output_addr_const (asm_out_file, x);
9866 /* Mark symbols as position independent. We only do this in the
9867 .text segment, not in the .data segment. */
9868 if (NEED_GOT_RELOC && flag_pic && making_const_table &&
9869 (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF))
9871 if (GET_CODE (x) == SYMBOL_REF
9872 && (CONSTANT_POOL_ADDRESS_P (x)
9873 || SYMBOL_REF_LOCAL_P (x)))
9874 fputs ("(GOTOFF)", asm_out_file);
9875 else if (GET_CODE (x) == LABEL_REF)
9876 fputs ("(GOTOFF)", asm_out_file);
9877 else
9878 fputs ("(GOT)", asm_out_file);
9880 fputc ('\n', asm_out_file);
9881 return true;
9884 if (VECTOR_MODE_SUPPORTED_P (GET_MODE (x)))
9886 int i, units;
9888 if (GET_CODE (x) != CONST_VECTOR)
9889 abort ();
9891 units = CONST_VECTOR_NUNITS (x);
9893 switch (GET_MODE (x))
9895 case V2SImode: size = 4; break;
9896 case V4HImode: size = 2; break;
9897 case V8QImode: size = 1; break;
9898 default:
9899 abort ();
9902 for (i = 0; i < units; i++)
9904 rtx elt;
9906 elt = CONST_VECTOR_ELT (x, i);
9907 assemble_integer
9908 (elt, size, i == 0 ? BIGGEST_ALIGNMENT : size * BITS_PER_UNIT, 1);
9911 return true;
9914 return default_assemble_integer (x, size, aligned_p);
9916 #endif
9918 /* A finite state machine takes care of noticing whether or not instructions
9919 can be conditionally executed, and thus decrease execution time and code
9920 size by deleting branch instructions. The fsm is controlled by
9921 final_prescan_insn, and controls the actions of ASM_OUTPUT_OPCODE. */
9923 /* The state of the fsm controlling condition codes are:
9924 0: normal, do nothing special
9925 1: make ASM_OUTPUT_OPCODE not output this instruction
9926 2: make ASM_OUTPUT_OPCODE not output this instruction
9927 3: make instructions conditional
9928 4: make instructions conditional
9930 State transitions (state->state by whom under condition):
9931 0 -> 1 final_prescan_insn if the `target' is a label
9932 0 -> 2 final_prescan_insn if the `target' is an unconditional branch
9933 1 -> 3 ASM_OUTPUT_OPCODE after not having output the conditional branch
9934 2 -> 4 ASM_OUTPUT_OPCODE after not having output the conditional branch
9935 3 -> 0 (*targetm.asm_out.internal_label) if the `target' label is reached
9936 (the target label has CODE_LABEL_NUMBER equal to arm_target_label).
9937 4 -> 0 final_prescan_insn if the `target' unconditional branch is reached
9938 (the target insn is arm_target_insn).
9940 If the jump clobbers the conditions then we use states 2 and 4.
9942 A similar thing can be done with conditional return insns.
9944 XXX In case the `target' is an unconditional branch, this conditionalising
9945 of the instructions always reduces code size, but not always execution
9946 time. But then, I want to reduce the code size to somewhere near what
9947 /bin/cc produces. */
9949 /* Returns the index of the ARM condition code string in
9950 `arm_condition_codes'. COMPARISON should be an rtx like
9951 `(eq (...) (...))'. */
9952 static enum arm_cond_code
9953 get_arm_condition_code (rtx comparison)
9955 enum machine_mode mode = GET_MODE (XEXP (comparison, 0));
9956 int code;
9957 enum rtx_code comp_code = GET_CODE (comparison);
9959 if (GET_MODE_CLASS (mode) != MODE_CC)
9960 mode = SELECT_CC_MODE (comp_code, XEXP (comparison, 0),
9961 XEXP (comparison, 1));
9963 switch (mode)
9965 case CC_DNEmode: code = ARM_NE; goto dominance;
9966 case CC_DEQmode: code = ARM_EQ; goto dominance;
9967 case CC_DGEmode: code = ARM_GE; goto dominance;
9968 case CC_DGTmode: code = ARM_GT; goto dominance;
9969 case CC_DLEmode: code = ARM_LE; goto dominance;
9970 case CC_DLTmode: code = ARM_LT; goto dominance;
9971 case CC_DGEUmode: code = ARM_CS; goto dominance;
9972 case CC_DGTUmode: code = ARM_HI; goto dominance;
9973 case CC_DLEUmode: code = ARM_LS; goto dominance;
9974 case CC_DLTUmode: code = ARM_CC;
9976 dominance:
9977 if (comp_code != EQ && comp_code != NE)
9978 abort ();
9980 if (comp_code == EQ)
9981 return ARM_INVERSE_CONDITION_CODE (code);
9982 return code;
9984 case CC_NOOVmode:
9985 switch (comp_code)
9987 case NE: return ARM_NE;
9988 case EQ: return ARM_EQ;
9989 case GE: return ARM_PL;
9990 case LT: return ARM_MI;
9991 default: abort ();
9994 case CC_Zmode:
9995 switch (comp_code)
9997 case NE: return ARM_NE;
9998 case EQ: return ARM_EQ;
9999 default: abort ();
10002 case CC_Nmode:
10003 switch (comp_code)
10005 case NE: return ARM_MI;
10006 case EQ: return ARM_PL;
10007 default: abort ();
10010 case CCFPEmode:
10011 case CCFPmode:
10012 /* These encodings assume that AC=1 in the FPA system control
10013 byte. This allows us to handle all cases except UNEQ and
10014 LTGT. */
10015 switch (comp_code)
10017 case GE: return ARM_GE;
10018 case GT: return ARM_GT;
10019 case LE: return ARM_LS;
10020 case LT: return ARM_MI;
10021 case NE: return ARM_NE;
10022 case EQ: return ARM_EQ;
10023 case ORDERED: return ARM_VC;
10024 case UNORDERED: return ARM_VS;
10025 case UNLT: return ARM_LT;
10026 case UNLE: return ARM_LE;
10027 case UNGT: return ARM_HI;
10028 case UNGE: return ARM_PL;
10029 /* UNEQ and LTGT do not have a representation. */
10030 case UNEQ: /* Fall through. */
10031 case LTGT: /* Fall through. */
10032 default: abort ();
10035 case CC_SWPmode:
10036 switch (comp_code)
10038 case NE: return ARM_NE;
10039 case EQ: return ARM_EQ;
10040 case GE: return ARM_LE;
10041 case GT: return ARM_LT;
10042 case LE: return ARM_GE;
10043 case LT: return ARM_GT;
10044 case GEU: return ARM_LS;
10045 case GTU: return ARM_CC;
10046 case LEU: return ARM_CS;
10047 case LTU: return ARM_HI;
10048 default: abort ();
10051 case CC_Cmode:
10052 switch (comp_code)
10054 case LTU: return ARM_CS;
10055 case GEU: return ARM_CC;
10056 default: abort ();
10059 case CCmode:
10060 switch (comp_code)
10062 case NE: return ARM_NE;
10063 case EQ: return ARM_EQ;
10064 case GE: return ARM_GE;
10065 case GT: return ARM_GT;
10066 case LE: return ARM_LE;
10067 case LT: return ARM_LT;
10068 case GEU: return ARM_CS;
10069 case GTU: return ARM_HI;
10070 case LEU: return ARM_LS;
10071 case LTU: return ARM_CC;
10072 default: abort ();
10075 default: abort ();
10078 abort ();
10081 void
10082 arm_final_prescan_insn (rtx insn)
10084 /* BODY will hold the body of INSN. */
10085 rtx body = PATTERN (insn);
10087 /* This will be 1 if trying to repeat the trick, and things need to be
10088 reversed if it appears to fail. */
10089 int reverse = 0;
10091 /* JUMP_CLOBBERS will be one implies that the conditions if a branch is
10092 taken are clobbered, even if the rtl suggests otherwise. It also
10093 means that we have to grub around within the jump expression to find
10094 out what the conditions are when the jump isn't taken. */
10095 int jump_clobbers = 0;
10097 /* If we start with a return insn, we only succeed if we find another one. */
10098 int seeking_return = 0;
10100 /* START_INSN will hold the insn from where we start looking. This is the
10101 first insn after the following code_label if REVERSE is true. */
10102 rtx start_insn = insn;
10104 /* If in state 4, check if the target branch is reached, in order to
10105 change back to state 0. */
10106 if (arm_ccfsm_state == 4)
10108 if (insn == arm_target_insn)
10110 arm_target_insn = NULL;
10111 arm_ccfsm_state = 0;
10113 return;
10116 /* If in state 3, it is possible to repeat the trick, if this insn is an
10117 unconditional branch to a label, and immediately following this branch
10118 is the previous target label which is only used once, and the label this
10119 branch jumps to is not too far off. */
10120 if (arm_ccfsm_state == 3)
10122 if (simplejump_p (insn))
10124 start_insn = next_nonnote_insn (start_insn);
10125 if (GET_CODE (start_insn) == BARRIER)
10127 /* XXX Isn't this always a barrier? */
10128 start_insn = next_nonnote_insn (start_insn);
10130 if (GET_CODE (start_insn) == CODE_LABEL
10131 && CODE_LABEL_NUMBER (start_insn) == arm_target_label
10132 && LABEL_NUSES (start_insn) == 1)
10133 reverse = TRUE;
10134 else
10135 return;
10137 else if (GET_CODE (body) == RETURN)
10139 start_insn = next_nonnote_insn (start_insn);
10140 if (GET_CODE (start_insn) == BARRIER)
10141 start_insn = next_nonnote_insn (start_insn);
10142 if (GET_CODE (start_insn) == CODE_LABEL
10143 && CODE_LABEL_NUMBER (start_insn) == arm_target_label
10144 && LABEL_NUSES (start_insn) == 1)
10146 reverse = TRUE;
10147 seeking_return = 1;
10149 else
10150 return;
10152 else
10153 return;
10156 if (arm_ccfsm_state != 0 && !reverse)
10157 abort ();
10158 if (GET_CODE (insn) != JUMP_INSN)
10159 return;
10161 /* This jump might be paralleled with a clobber of the condition codes
10162 the jump should always come first */
10163 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) > 0)
10164 body = XVECEXP (body, 0, 0);
10166 if (reverse
10167 || (GET_CODE (body) == SET && GET_CODE (SET_DEST (body)) == PC
10168 && GET_CODE (SET_SRC (body)) == IF_THEN_ELSE))
10170 int insns_skipped;
10171 int fail = FALSE, succeed = FALSE;
10172 /* Flag which part of the IF_THEN_ELSE is the LABEL_REF. */
10173 int then_not_else = TRUE;
10174 rtx this_insn = start_insn, label = 0;
10176 /* If the jump cannot be done with one instruction, we cannot
10177 conditionally execute the instruction in the inverse case. */
10178 if (get_attr_conds (insn) == CONDS_JUMP_CLOB)
10180 jump_clobbers = 1;
10181 return;
10184 /* Register the insn jumped to. */
10185 if (reverse)
10187 if (!seeking_return)
10188 label = XEXP (SET_SRC (body), 0);
10190 else if (GET_CODE (XEXP (SET_SRC (body), 1)) == LABEL_REF)
10191 label = XEXP (XEXP (SET_SRC (body), 1), 0);
10192 else if (GET_CODE (XEXP (SET_SRC (body), 2)) == LABEL_REF)
10194 label = XEXP (XEXP (SET_SRC (body), 2), 0);
10195 then_not_else = FALSE;
10197 else if (GET_CODE (XEXP (SET_SRC (body), 1)) == RETURN)
10198 seeking_return = 1;
10199 else if (GET_CODE (XEXP (SET_SRC (body), 2)) == RETURN)
10201 seeking_return = 1;
10202 then_not_else = FALSE;
10204 else
10205 abort ();
10207 /* See how many insns this branch skips, and what kind of insns. If all
10208 insns are okay, and the label or unconditional branch to the same
10209 label is not too far away, succeed. */
10210 for (insns_skipped = 0;
10211 !fail && !succeed && insns_skipped++ < max_insns_skipped;)
10213 rtx scanbody;
10215 this_insn = next_nonnote_insn (this_insn);
10216 if (!this_insn)
10217 break;
10219 switch (GET_CODE (this_insn))
10221 case CODE_LABEL:
10222 /* Succeed if it is the target label, otherwise fail since
10223 control falls in from somewhere else. */
10224 if (this_insn == label)
10226 if (jump_clobbers)
10228 arm_ccfsm_state = 2;
10229 this_insn = next_nonnote_insn (this_insn);
10231 else
10232 arm_ccfsm_state = 1;
10233 succeed = TRUE;
10235 else
10236 fail = TRUE;
10237 break;
10239 case BARRIER:
10240 /* Succeed if the following insn is the target label.
10241 Otherwise fail.
10242 If return insns are used then the last insn in a function
10243 will be a barrier. */
10244 this_insn = next_nonnote_insn (this_insn);
10245 if (this_insn && this_insn == label)
10247 if (jump_clobbers)
10249 arm_ccfsm_state = 2;
10250 this_insn = next_nonnote_insn (this_insn);
10252 else
10253 arm_ccfsm_state = 1;
10254 succeed = TRUE;
10256 else
10257 fail = TRUE;
10258 break;
10260 case CALL_INSN:
10261 /* If using 32-bit addresses the cc is not preserved over
10262 calls. */
10263 if (TARGET_APCS_32)
10265 /* Succeed if the following insn is the target label,
10266 or if the following two insns are a barrier and
10267 the target label. */
10268 this_insn = next_nonnote_insn (this_insn);
10269 if (this_insn && GET_CODE (this_insn) == BARRIER)
10270 this_insn = next_nonnote_insn (this_insn);
10272 if (this_insn && this_insn == label
10273 && insns_skipped < max_insns_skipped)
10275 if (jump_clobbers)
10277 arm_ccfsm_state = 2;
10278 this_insn = next_nonnote_insn (this_insn);
10280 else
10281 arm_ccfsm_state = 1;
10282 succeed = TRUE;
10284 else
10285 fail = TRUE;
10287 break;
10289 case JUMP_INSN:
10290 /* If this is an unconditional branch to the same label, succeed.
10291 If it is to another label, do nothing. If it is conditional,
10292 fail. */
10293 /* XXX Probably, the tests for SET and the PC are
10294 unnecessary. */
10296 scanbody = PATTERN (this_insn);
10297 if (GET_CODE (scanbody) == SET
10298 && GET_CODE (SET_DEST (scanbody)) == PC)
10300 if (GET_CODE (SET_SRC (scanbody)) == LABEL_REF
10301 && XEXP (SET_SRC (scanbody), 0) == label && !reverse)
10303 arm_ccfsm_state = 2;
10304 succeed = TRUE;
10306 else if (GET_CODE (SET_SRC (scanbody)) == IF_THEN_ELSE)
10307 fail = TRUE;
10309 /* Fail if a conditional return is undesirable (eg on a
10310 StrongARM), but still allow this if optimizing for size. */
10311 else if (GET_CODE (scanbody) == RETURN
10312 && !use_return_insn (TRUE, NULL)
10313 && !optimize_size)
10314 fail = TRUE;
10315 else if (GET_CODE (scanbody) == RETURN
10316 && seeking_return)
10318 arm_ccfsm_state = 2;
10319 succeed = TRUE;
10321 else if (GET_CODE (scanbody) == PARALLEL)
10323 switch (get_attr_conds (this_insn))
10325 case CONDS_NOCOND:
10326 break;
10327 default:
10328 fail = TRUE;
10329 break;
10332 else
10333 fail = TRUE; /* Unrecognized jump (eg epilogue). */
10335 break;
10337 case INSN:
10338 /* Instructions using or affecting the condition codes make it
10339 fail. */
10340 scanbody = PATTERN (this_insn);
10341 if (!(GET_CODE (scanbody) == SET
10342 || GET_CODE (scanbody) == PARALLEL)
10343 || get_attr_conds (this_insn) != CONDS_NOCOND)
10344 fail = TRUE;
10346 /* A conditional cirrus instruction must be followed by
10347 a non Cirrus instruction. However, since we
10348 conditionalize instructions in this function and by
10349 the time we get here we can't add instructions
10350 (nops), because shorten_branches() has already been
10351 called, we will disable conditionalizing Cirrus
10352 instructions to be safe. */
10353 if (GET_CODE (scanbody) != USE
10354 && GET_CODE (scanbody) != CLOBBER
10355 && get_attr_cirrus (this_insn) != CIRRUS_NOT)
10356 fail = TRUE;
10357 break;
10359 default:
10360 break;
10363 if (succeed)
10365 if ((!seeking_return) && (arm_ccfsm_state == 1 || reverse))
10366 arm_target_label = CODE_LABEL_NUMBER (label);
10367 else if (seeking_return || arm_ccfsm_state == 2)
10369 while (this_insn && GET_CODE (PATTERN (this_insn)) == USE)
10371 this_insn = next_nonnote_insn (this_insn);
10372 if (this_insn && (GET_CODE (this_insn) == BARRIER
10373 || GET_CODE (this_insn) == CODE_LABEL))
10374 abort ();
10376 if (!this_insn)
10378 /* Oh, dear! we ran off the end.. give up. */
10379 recog (PATTERN (insn), insn, NULL);
10380 arm_ccfsm_state = 0;
10381 arm_target_insn = NULL;
10382 return;
10384 arm_target_insn = this_insn;
10386 else
10387 abort ();
10388 if (jump_clobbers)
10390 if (reverse)
10391 abort ();
10392 arm_current_cc =
10393 get_arm_condition_code (XEXP (XEXP (XEXP (SET_SRC (body),
10394 0), 0), 1));
10395 if (GET_CODE (XEXP (XEXP (SET_SRC (body), 0), 0)) == AND)
10396 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
10397 if (GET_CODE (XEXP (SET_SRC (body), 0)) == NE)
10398 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
10400 else
10402 /* If REVERSE is true, ARM_CURRENT_CC needs to be inverted from
10403 what it was. */
10404 if (!reverse)
10405 arm_current_cc = get_arm_condition_code (XEXP (SET_SRC (body),
10406 0));
10409 if (reverse || then_not_else)
10410 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
10413 /* Restore recog_data (getting the attributes of other insns can
10414 destroy this array, but final.c assumes that it remains intact
10415 across this call; since the insn has been recognized already we
10416 call recog direct). */
10417 recog (PATTERN (insn), insn, NULL);
10421 /* Returns true if REGNO is a valid register
10422 for holding a quantity of tyoe MODE. */
10424 arm_hard_regno_mode_ok (unsigned int regno, enum machine_mode mode)
10426 if (GET_MODE_CLASS (mode) == MODE_CC)
10427 return regno == CC_REGNUM;
10429 if (TARGET_THUMB)
10430 /* For the Thumb we only allow values bigger than SImode in
10431 registers 0 - 6, so that there is always a second low
10432 register available to hold the upper part of the value.
10433 We probably we ought to ensure that the register is the
10434 start of an even numbered register pair. */
10435 return (ARM_NUM_REGS (mode) < 2) || (regno < LAST_LO_REGNUM);
10437 if (IS_CIRRUS_REGNUM (regno))
10438 /* We have outlawed SI values in Cirrus registers because they
10439 reside in the lower 32 bits, but SF values reside in the
10440 upper 32 bits. This causes gcc all sorts of grief. We can't
10441 even split the registers into pairs because Cirrus SI values
10442 get sign extended to 64bits-- aldyh. */
10443 return (GET_MODE_CLASS (mode) == MODE_FLOAT) || (mode == DImode);
10445 if (IS_IWMMXT_GR_REGNUM (regno))
10446 return mode == SImode;
10448 if (IS_IWMMXT_REGNUM (regno))
10449 return VALID_IWMMXT_REG_MODE (mode);
10451 if (regno <= LAST_ARM_REGNUM)
10452 /* We allow any value to be stored in the general registers. */
10453 return 1;
10455 if ( regno == FRAME_POINTER_REGNUM
10456 || regno == ARG_POINTER_REGNUM)
10457 /* We only allow integers in the fake hard registers. */
10458 return GET_MODE_CLASS (mode) == MODE_INT;
10460 /* The only registers left are the FPA registers
10461 which we only allow to hold FP values. */
10462 return GET_MODE_CLASS (mode) == MODE_FLOAT
10463 && regno >= FIRST_ARM_FP_REGNUM
10464 && regno <= LAST_ARM_FP_REGNUM;
10468 arm_regno_class (int regno)
10470 if (TARGET_THUMB)
10472 if (regno == STACK_POINTER_REGNUM)
10473 return STACK_REG;
10474 if (regno == CC_REGNUM)
10475 return CC_REG;
10476 if (regno < 8)
10477 return LO_REGS;
10478 return HI_REGS;
10481 if ( regno <= LAST_ARM_REGNUM
10482 || regno == FRAME_POINTER_REGNUM
10483 || regno == ARG_POINTER_REGNUM)
10484 return GENERAL_REGS;
10486 if (regno == CC_REGNUM)
10487 return NO_REGS;
10489 if (IS_CIRRUS_REGNUM (regno))
10490 return CIRRUS_REGS;
10492 if (IS_IWMMXT_REGNUM (regno))
10493 return IWMMXT_REGS;
10495 if (IS_IWMMXT_GR_REGNUM (regno))
10496 return IWMMXT_GR_REGS;
10498 return FPA_REGS;
10501 /* Handle a special case when computing the offset
10502 of an argument from the frame pointer. */
10504 arm_debugger_arg_offset (int value, rtx addr)
10506 rtx insn;
10508 /* We are only interested if dbxout_parms() failed to compute the offset. */
10509 if (value != 0)
10510 return 0;
10512 /* We can only cope with the case where the address is held in a register. */
10513 if (GET_CODE (addr) != REG)
10514 return 0;
10516 /* If we are using the frame pointer to point at the argument, then
10517 an offset of 0 is correct. */
10518 if (REGNO (addr) == (unsigned) HARD_FRAME_POINTER_REGNUM)
10519 return 0;
10521 /* If we are using the stack pointer to point at the
10522 argument, then an offset of 0 is correct. */
10523 if ((TARGET_THUMB || !frame_pointer_needed)
10524 && REGNO (addr) == SP_REGNUM)
10525 return 0;
10527 /* Oh dear. The argument is pointed to by a register rather
10528 than being held in a register, or being stored at a known
10529 offset from the frame pointer. Since GDB only understands
10530 those two kinds of argument we must translate the address
10531 held in the register into an offset from the frame pointer.
10532 We do this by searching through the insns for the function
10533 looking to see where this register gets its value. If the
10534 register is initialized from the frame pointer plus an offset
10535 then we are in luck and we can continue, otherwise we give up.
10537 This code is exercised by producing debugging information
10538 for a function with arguments like this:
10540 double func (double a, double b, int c, double d) {return d;}
10542 Without this code the stab for parameter 'd' will be set to
10543 an offset of 0 from the frame pointer, rather than 8. */
10545 /* The if() statement says:
10547 If the insn is a normal instruction
10548 and if the insn is setting the value in a register
10549 and if the register being set is the register holding the address of the argument
10550 and if the address is computing by an addition
10551 that involves adding to a register
10552 which is the frame pointer
10553 a constant integer
10555 then... */
10557 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
10559 if ( GET_CODE (insn) == INSN
10560 && GET_CODE (PATTERN (insn)) == SET
10561 && REGNO (XEXP (PATTERN (insn), 0)) == REGNO (addr)
10562 && GET_CODE (XEXP (PATTERN (insn), 1)) == PLUS
10563 && GET_CODE (XEXP (XEXP (PATTERN (insn), 1), 0)) == REG
10564 && REGNO (XEXP (XEXP (PATTERN (insn), 1), 0)) == (unsigned) HARD_FRAME_POINTER_REGNUM
10565 && GET_CODE (XEXP (XEXP (PATTERN (insn), 1), 1)) == CONST_INT
10568 value = INTVAL (XEXP (XEXP (PATTERN (insn), 1), 1));
10570 break;
10574 if (value == 0)
10576 debug_rtx (addr);
10577 warning ("unable to compute real location of stacked parameter");
10578 value = 8; /* XXX magic hack */
10581 return value;
10584 #define def_mbuiltin(MASK, NAME, TYPE, CODE) \
10585 do \
10587 if ((MASK) & insn_flags) \
10588 builtin_function ((NAME), (TYPE), (CODE), BUILT_IN_MD, NULL, NULL_TREE); \
10590 while (0)
10592 struct builtin_description
10594 const unsigned int mask;
10595 const enum insn_code icode;
10596 const char * const name;
10597 const enum arm_builtins code;
10598 const enum rtx_code comparison;
10599 const unsigned int flag;
10602 static const struct builtin_description bdesc_2arg[] =
10604 #define IWMMXT_BUILTIN(code, string, builtin) \
10605 { FL_IWMMXT, CODE_FOR_##code, "__builtin_arm_" string, \
10606 ARM_BUILTIN_##builtin, 0, 0 },
10608 IWMMXT_BUILTIN (addv8qi3, "waddb", WADDB)
10609 IWMMXT_BUILTIN (addv4hi3, "waddh", WADDH)
10610 IWMMXT_BUILTIN (addv2si3, "waddw", WADDW)
10611 IWMMXT_BUILTIN (subv8qi3, "wsubb", WSUBB)
10612 IWMMXT_BUILTIN (subv4hi3, "wsubh", WSUBH)
10613 IWMMXT_BUILTIN (subv2si3, "wsubw", WSUBW)
10614 IWMMXT_BUILTIN (ssaddv8qi3, "waddbss", WADDSSB)
10615 IWMMXT_BUILTIN (ssaddv4hi3, "waddhss", WADDSSH)
10616 IWMMXT_BUILTIN (ssaddv2si3, "waddwss", WADDSSW)
10617 IWMMXT_BUILTIN (sssubv8qi3, "wsubbss", WSUBSSB)
10618 IWMMXT_BUILTIN (sssubv4hi3, "wsubhss", WSUBSSH)
10619 IWMMXT_BUILTIN (sssubv2si3, "wsubwss", WSUBSSW)
10620 IWMMXT_BUILTIN (usaddv8qi3, "waddbus", WADDUSB)
10621 IWMMXT_BUILTIN (usaddv4hi3, "waddhus", WADDUSH)
10622 IWMMXT_BUILTIN (usaddv2si3, "waddwus", WADDUSW)
10623 IWMMXT_BUILTIN (ussubv8qi3, "wsubbus", WSUBUSB)
10624 IWMMXT_BUILTIN (ussubv4hi3, "wsubhus", WSUBUSH)
10625 IWMMXT_BUILTIN (ussubv2si3, "wsubwus", WSUBUSW)
10626 IWMMXT_BUILTIN (mulv4hi3, "wmulul", WMULUL)
10627 IWMMXT_BUILTIN (smulv4hi3_highpart, "wmulsh", WMULSH)
10628 IWMMXT_BUILTIN (umulv4hi3_highpart, "wmuluh", WMULUH)
10629 IWMMXT_BUILTIN (eqv8qi3, "wcmpeqb", WCMPEQB)
10630 IWMMXT_BUILTIN (eqv4hi3, "wcmpeqh", WCMPEQH)
10631 IWMMXT_BUILTIN (eqv2si3, "wcmpeqw", WCMPEQW)
10632 IWMMXT_BUILTIN (gtuv8qi3, "wcmpgtub", WCMPGTUB)
10633 IWMMXT_BUILTIN (gtuv4hi3, "wcmpgtuh", WCMPGTUH)
10634 IWMMXT_BUILTIN (gtuv2si3, "wcmpgtuw", WCMPGTUW)
10635 IWMMXT_BUILTIN (gtv8qi3, "wcmpgtsb", WCMPGTSB)
10636 IWMMXT_BUILTIN (gtv4hi3, "wcmpgtsh", WCMPGTSH)
10637 IWMMXT_BUILTIN (gtv2si3, "wcmpgtsw", WCMPGTSW)
10638 IWMMXT_BUILTIN (umaxv8qi3, "wmaxub", WMAXUB)
10639 IWMMXT_BUILTIN (smaxv8qi3, "wmaxsb", WMAXSB)
10640 IWMMXT_BUILTIN (umaxv4hi3, "wmaxuh", WMAXUH)
10641 IWMMXT_BUILTIN (smaxv4hi3, "wmaxsh", WMAXSH)
10642 IWMMXT_BUILTIN (umaxv2si3, "wmaxuw", WMAXUW)
10643 IWMMXT_BUILTIN (smaxv2si3, "wmaxsw", WMAXSW)
10644 IWMMXT_BUILTIN (uminv8qi3, "wminub", WMINUB)
10645 IWMMXT_BUILTIN (sminv8qi3, "wminsb", WMINSB)
10646 IWMMXT_BUILTIN (uminv4hi3, "wminuh", WMINUH)
10647 IWMMXT_BUILTIN (sminv4hi3, "wminsh", WMINSH)
10648 IWMMXT_BUILTIN (uminv2si3, "wminuw", WMINUW)
10649 IWMMXT_BUILTIN (sminv2si3, "wminsw", WMINSW)
10650 IWMMXT_BUILTIN (iwmmxt_anddi3, "wand", WAND)
10651 IWMMXT_BUILTIN (iwmmxt_nanddi3, "wandn", WANDN)
10652 IWMMXT_BUILTIN (iwmmxt_iordi3, "wor", WOR)
10653 IWMMXT_BUILTIN (iwmmxt_xordi3, "wxor", WXOR)
10654 IWMMXT_BUILTIN (iwmmxt_uavgv8qi3, "wavg2b", WAVG2B)
10655 IWMMXT_BUILTIN (iwmmxt_uavgv4hi3, "wavg2h", WAVG2H)
10656 IWMMXT_BUILTIN (iwmmxt_uavgrndv8qi3, "wavg2br", WAVG2BR)
10657 IWMMXT_BUILTIN (iwmmxt_uavgrndv4hi3, "wavg2hr", WAVG2HR)
10658 IWMMXT_BUILTIN (iwmmxt_wunpckilb, "wunpckilb", WUNPCKILB)
10659 IWMMXT_BUILTIN (iwmmxt_wunpckilh, "wunpckilh", WUNPCKILH)
10660 IWMMXT_BUILTIN (iwmmxt_wunpckilw, "wunpckilw", WUNPCKILW)
10661 IWMMXT_BUILTIN (iwmmxt_wunpckihb, "wunpckihb", WUNPCKIHB)
10662 IWMMXT_BUILTIN (iwmmxt_wunpckihh, "wunpckihh", WUNPCKIHH)
10663 IWMMXT_BUILTIN (iwmmxt_wunpckihw, "wunpckihw", WUNPCKIHW)
10664 IWMMXT_BUILTIN (iwmmxt_wmadds, "wmadds", WMADDS)
10665 IWMMXT_BUILTIN (iwmmxt_wmaddu, "wmaddu", WMADDU)
10667 #define IWMMXT_BUILTIN2(code, builtin) \
10668 { FL_IWMMXT, CODE_FOR_##code, NULL, ARM_BUILTIN_##builtin, 0, 0 },
10670 IWMMXT_BUILTIN2 (iwmmxt_wpackhss, WPACKHSS)
10671 IWMMXT_BUILTIN2 (iwmmxt_wpackwss, WPACKWSS)
10672 IWMMXT_BUILTIN2 (iwmmxt_wpackdss, WPACKDSS)
10673 IWMMXT_BUILTIN2 (iwmmxt_wpackhus, WPACKHUS)
10674 IWMMXT_BUILTIN2 (iwmmxt_wpackwus, WPACKWUS)
10675 IWMMXT_BUILTIN2 (iwmmxt_wpackdus, WPACKDUS)
10676 IWMMXT_BUILTIN2 (ashlv4hi3_di, WSLLH)
10677 IWMMXT_BUILTIN2 (ashlv4hi3, WSLLHI)
10678 IWMMXT_BUILTIN2 (ashlv2si3_di, WSLLW)
10679 IWMMXT_BUILTIN2 (ashlv2si3, WSLLWI)
10680 IWMMXT_BUILTIN2 (ashldi3_di, WSLLD)
10681 IWMMXT_BUILTIN2 (ashldi3_iwmmxt, WSLLDI)
10682 IWMMXT_BUILTIN2 (lshrv4hi3_di, WSRLH)
10683 IWMMXT_BUILTIN2 (lshrv4hi3, WSRLHI)
10684 IWMMXT_BUILTIN2 (lshrv2si3_di, WSRLW)
10685 IWMMXT_BUILTIN2 (lshrv2si3, WSRLWI)
10686 IWMMXT_BUILTIN2 (lshrdi3_di, WSRLD)
10687 IWMMXT_BUILTIN2 (lshrdi3, WSRLDI)
10688 IWMMXT_BUILTIN2 (ashrv4hi3_di, WSRAH)
10689 IWMMXT_BUILTIN2 (ashrv4hi3, WSRAHI)
10690 IWMMXT_BUILTIN2 (ashrv2si3_di, WSRAW)
10691 IWMMXT_BUILTIN2 (ashrv2si3, WSRAWI)
10692 IWMMXT_BUILTIN2 (ashrdi3_di, WSRAD)
10693 IWMMXT_BUILTIN2 (ashrdi3, WSRADI)
10694 IWMMXT_BUILTIN2 (rorv4hi3_di, WRORH)
10695 IWMMXT_BUILTIN2 (rorv4hi3, WRORHI)
10696 IWMMXT_BUILTIN2 (rorv2si3_di, WRORW)
10697 IWMMXT_BUILTIN2 (rorv2si3, WRORWI)
10698 IWMMXT_BUILTIN2 (rordi3_di, WRORD)
10699 IWMMXT_BUILTIN2 (rordi3, WRORDI)
10700 IWMMXT_BUILTIN2 (iwmmxt_wmacuz, WMACUZ)
10701 IWMMXT_BUILTIN2 (iwmmxt_wmacsz, WMACSZ)
10704 static const struct builtin_description bdesc_1arg[] =
10706 IWMMXT_BUILTIN (iwmmxt_tmovmskb, "tmovmskb", TMOVMSKB)
10707 IWMMXT_BUILTIN (iwmmxt_tmovmskh, "tmovmskh", TMOVMSKH)
10708 IWMMXT_BUILTIN (iwmmxt_tmovmskw, "tmovmskw", TMOVMSKW)
10709 IWMMXT_BUILTIN (iwmmxt_waccb, "waccb", WACCB)
10710 IWMMXT_BUILTIN (iwmmxt_wacch, "wacch", WACCH)
10711 IWMMXT_BUILTIN (iwmmxt_waccw, "waccw", WACCW)
10712 IWMMXT_BUILTIN (iwmmxt_wunpckehub, "wunpckehub", WUNPCKEHUB)
10713 IWMMXT_BUILTIN (iwmmxt_wunpckehuh, "wunpckehuh", WUNPCKEHUH)
10714 IWMMXT_BUILTIN (iwmmxt_wunpckehuw, "wunpckehuw", WUNPCKEHUW)
10715 IWMMXT_BUILTIN (iwmmxt_wunpckehsb, "wunpckehsb", WUNPCKEHSB)
10716 IWMMXT_BUILTIN (iwmmxt_wunpckehsh, "wunpckehsh", WUNPCKEHSH)
10717 IWMMXT_BUILTIN (iwmmxt_wunpckehsw, "wunpckehsw", WUNPCKEHSW)
10718 IWMMXT_BUILTIN (iwmmxt_wunpckelub, "wunpckelub", WUNPCKELUB)
10719 IWMMXT_BUILTIN (iwmmxt_wunpckeluh, "wunpckeluh", WUNPCKELUH)
10720 IWMMXT_BUILTIN (iwmmxt_wunpckeluw, "wunpckeluw", WUNPCKELUW)
10721 IWMMXT_BUILTIN (iwmmxt_wunpckelsb, "wunpckelsb", WUNPCKELSB)
10722 IWMMXT_BUILTIN (iwmmxt_wunpckelsh, "wunpckelsh", WUNPCKELSH)
10723 IWMMXT_BUILTIN (iwmmxt_wunpckelsw, "wunpckelsw", WUNPCKELSW)
10726 /* Set up all the iWMMXt builtins. This is
10727 not called if TARGET_IWMMXT is zero. */
10729 static void
10730 arm_init_iwmmxt_builtins (void)
10732 const struct builtin_description * d;
10733 size_t i;
10734 tree endlink = void_list_node;
10736 tree int_ftype_int
10737 = build_function_type (integer_type_node,
10738 tree_cons (NULL_TREE, integer_type_node, endlink));
10739 tree v8qi_ftype_v8qi_v8qi_int
10740 = build_function_type (V8QI_type_node,
10741 tree_cons (NULL_TREE, V8QI_type_node,
10742 tree_cons (NULL_TREE, V8QI_type_node,
10743 tree_cons (NULL_TREE,
10744 integer_type_node,
10745 endlink))));
10746 tree v4hi_ftype_v4hi_int
10747 = build_function_type (V4HI_type_node,
10748 tree_cons (NULL_TREE, V4HI_type_node,
10749 tree_cons (NULL_TREE, integer_type_node,
10750 endlink)));
10751 tree v2si_ftype_v2si_int
10752 = build_function_type (V2SI_type_node,
10753 tree_cons (NULL_TREE, V2SI_type_node,
10754 tree_cons (NULL_TREE, integer_type_node,
10755 endlink)));
10756 tree v2si_ftype_di_di
10757 = build_function_type (V2SI_type_node,
10758 tree_cons (NULL_TREE, long_long_integer_type_node,
10759 tree_cons (NULL_TREE, long_long_integer_type_node,
10760 endlink)));
10761 tree di_ftype_di_int
10762 = build_function_type (long_long_integer_type_node,
10763 tree_cons (NULL_TREE, long_long_integer_type_node,
10764 tree_cons (NULL_TREE, integer_type_node,
10765 endlink)));
10766 tree di_ftype_di_int_int
10767 = build_function_type (long_long_integer_type_node,
10768 tree_cons (NULL_TREE, long_long_integer_type_node,
10769 tree_cons (NULL_TREE, integer_type_node,
10770 tree_cons (NULL_TREE,
10771 integer_type_node,
10772 endlink))));
10773 tree int_ftype_v8qi
10774 = build_function_type (integer_type_node,
10775 tree_cons (NULL_TREE, V8QI_type_node,
10776 endlink));
10777 tree int_ftype_v4hi
10778 = build_function_type (integer_type_node,
10779 tree_cons (NULL_TREE, V4HI_type_node,
10780 endlink));
10781 tree int_ftype_v2si
10782 = build_function_type (integer_type_node,
10783 tree_cons (NULL_TREE, V2SI_type_node,
10784 endlink));
10785 tree int_ftype_v8qi_int
10786 = build_function_type (integer_type_node,
10787 tree_cons (NULL_TREE, V8QI_type_node,
10788 tree_cons (NULL_TREE, integer_type_node,
10789 endlink)));
10790 tree int_ftype_v4hi_int
10791 = build_function_type (integer_type_node,
10792 tree_cons (NULL_TREE, V4HI_type_node,
10793 tree_cons (NULL_TREE, integer_type_node,
10794 endlink)));
10795 tree int_ftype_v2si_int
10796 = build_function_type (integer_type_node,
10797 tree_cons (NULL_TREE, V2SI_type_node,
10798 tree_cons (NULL_TREE, integer_type_node,
10799 endlink)));
10800 tree v8qi_ftype_v8qi_int_int
10801 = build_function_type (V8QI_type_node,
10802 tree_cons (NULL_TREE, V8QI_type_node,
10803 tree_cons (NULL_TREE, integer_type_node,
10804 tree_cons (NULL_TREE,
10805 integer_type_node,
10806 endlink))));
10807 tree v4hi_ftype_v4hi_int_int
10808 = build_function_type (V4HI_type_node,
10809 tree_cons (NULL_TREE, V4HI_type_node,
10810 tree_cons (NULL_TREE, integer_type_node,
10811 tree_cons (NULL_TREE,
10812 integer_type_node,
10813 endlink))));
10814 tree v2si_ftype_v2si_int_int
10815 = build_function_type (V2SI_type_node,
10816 tree_cons (NULL_TREE, V2SI_type_node,
10817 tree_cons (NULL_TREE, integer_type_node,
10818 tree_cons (NULL_TREE,
10819 integer_type_node,
10820 endlink))));
10821 /* Miscellaneous. */
10822 tree v8qi_ftype_v4hi_v4hi
10823 = build_function_type (V8QI_type_node,
10824 tree_cons (NULL_TREE, V4HI_type_node,
10825 tree_cons (NULL_TREE, V4HI_type_node,
10826 endlink)));
10827 tree v4hi_ftype_v2si_v2si
10828 = build_function_type (V4HI_type_node,
10829 tree_cons (NULL_TREE, V2SI_type_node,
10830 tree_cons (NULL_TREE, V2SI_type_node,
10831 endlink)));
10832 tree v2si_ftype_v4hi_v4hi
10833 = build_function_type (V2SI_type_node,
10834 tree_cons (NULL_TREE, V4HI_type_node,
10835 tree_cons (NULL_TREE, V4HI_type_node,
10836 endlink)));
10837 tree v2si_ftype_v8qi_v8qi
10838 = build_function_type (V2SI_type_node,
10839 tree_cons (NULL_TREE, V8QI_type_node,
10840 tree_cons (NULL_TREE, V8QI_type_node,
10841 endlink)));
10842 tree v4hi_ftype_v4hi_di
10843 = build_function_type (V4HI_type_node,
10844 tree_cons (NULL_TREE, V4HI_type_node,
10845 tree_cons (NULL_TREE,
10846 long_long_integer_type_node,
10847 endlink)));
10848 tree v2si_ftype_v2si_di
10849 = build_function_type (V2SI_type_node,
10850 tree_cons (NULL_TREE, V2SI_type_node,
10851 tree_cons (NULL_TREE,
10852 long_long_integer_type_node,
10853 endlink)));
10854 tree void_ftype_int_int
10855 = build_function_type (void_type_node,
10856 tree_cons (NULL_TREE, integer_type_node,
10857 tree_cons (NULL_TREE, integer_type_node,
10858 endlink)));
10859 tree di_ftype_void
10860 = build_function_type (long_long_unsigned_type_node, endlink);
10861 tree di_ftype_v8qi
10862 = build_function_type (long_long_integer_type_node,
10863 tree_cons (NULL_TREE, V8QI_type_node,
10864 endlink));
10865 tree di_ftype_v4hi
10866 = build_function_type (long_long_integer_type_node,
10867 tree_cons (NULL_TREE, V4HI_type_node,
10868 endlink));
10869 tree di_ftype_v2si
10870 = build_function_type (long_long_integer_type_node,
10871 tree_cons (NULL_TREE, V2SI_type_node,
10872 endlink));
10873 tree v2si_ftype_v4hi
10874 = build_function_type (V2SI_type_node,
10875 tree_cons (NULL_TREE, V4HI_type_node,
10876 endlink));
10877 tree v4hi_ftype_v8qi
10878 = build_function_type (V4HI_type_node,
10879 tree_cons (NULL_TREE, V8QI_type_node,
10880 endlink));
10882 tree di_ftype_di_v4hi_v4hi
10883 = build_function_type (long_long_unsigned_type_node,
10884 tree_cons (NULL_TREE,
10885 long_long_unsigned_type_node,
10886 tree_cons (NULL_TREE, V4HI_type_node,
10887 tree_cons (NULL_TREE,
10888 V4HI_type_node,
10889 endlink))));
10891 tree di_ftype_v4hi_v4hi
10892 = build_function_type (long_long_unsigned_type_node,
10893 tree_cons (NULL_TREE, V4HI_type_node,
10894 tree_cons (NULL_TREE, V4HI_type_node,
10895 endlink)));
10897 /* Normal vector binops. */
10898 tree v8qi_ftype_v8qi_v8qi
10899 = build_function_type (V8QI_type_node,
10900 tree_cons (NULL_TREE, V8QI_type_node,
10901 tree_cons (NULL_TREE, V8QI_type_node,
10902 endlink)));
10903 tree v4hi_ftype_v4hi_v4hi
10904 = build_function_type (V4HI_type_node,
10905 tree_cons (NULL_TREE, V4HI_type_node,
10906 tree_cons (NULL_TREE, V4HI_type_node,
10907 endlink)));
10908 tree v2si_ftype_v2si_v2si
10909 = build_function_type (V2SI_type_node,
10910 tree_cons (NULL_TREE, V2SI_type_node,
10911 tree_cons (NULL_TREE, V2SI_type_node,
10912 endlink)));
10913 tree di_ftype_di_di
10914 = build_function_type (long_long_unsigned_type_node,
10915 tree_cons (NULL_TREE, long_long_unsigned_type_node,
10916 tree_cons (NULL_TREE,
10917 long_long_unsigned_type_node,
10918 endlink)));
10920 /* Add all builtins that are more or less simple operations on two
10921 operands. */
10922 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
10924 /* Use one of the operands; the target can have a different mode for
10925 mask-generating compares. */
10926 enum machine_mode mode;
10927 tree type;
10929 if (d->name == 0)
10930 continue;
10932 mode = insn_data[d->icode].operand[1].mode;
10934 switch (mode)
10936 case V8QImode:
10937 type = v8qi_ftype_v8qi_v8qi;
10938 break;
10939 case V4HImode:
10940 type = v4hi_ftype_v4hi_v4hi;
10941 break;
10942 case V2SImode:
10943 type = v2si_ftype_v2si_v2si;
10944 break;
10945 case DImode:
10946 type = di_ftype_di_di;
10947 break;
10949 default:
10950 abort ();
10953 def_mbuiltin (d->mask, d->name, type, d->code);
10956 /* Add the remaining MMX insns with somewhat more complicated types. */
10957 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wzero", di_ftype_void, ARM_BUILTIN_WZERO);
10958 def_mbuiltin (FL_IWMMXT, "__builtin_arm_setwcx", void_ftype_int_int, ARM_BUILTIN_SETWCX);
10959 def_mbuiltin (FL_IWMMXT, "__builtin_arm_getwcx", int_ftype_int, ARM_BUILTIN_GETWCX);
10961 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSLLH);
10962 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllw", v2si_ftype_v2si_di, ARM_BUILTIN_WSLLW);
10963 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wslld", di_ftype_di_di, ARM_BUILTIN_WSLLD);
10964 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSLLHI);
10965 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllwi", v2si_ftype_v2si_int, ARM_BUILTIN_WSLLWI);
10966 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wslldi", di_ftype_di_int, ARM_BUILTIN_WSLLDI);
10968 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSRLH);
10969 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlw", v2si_ftype_v2si_di, ARM_BUILTIN_WSRLW);
10970 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrld", di_ftype_di_di, ARM_BUILTIN_WSRLD);
10971 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSRLHI);
10972 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlwi", v2si_ftype_v2si_int, ARM_BUILTIN_WSRLWI);
10973 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrldi", di_ftype_di_int, ARM_BUILTIN_WSRLDI);
10975 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrah", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSRAH);
10976 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsraw", v2si_ftype_v2si_di, ARM_BUILTIN_WSRAW);
10977 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrad", di_ftype_di_di, ARM_BUILTIN_WSRAD);
10978 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrahi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSRAHI);
10979 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrawi", v2si_ftype_v2si_int, ARM_BUILTIN_WSRAWI);
10980 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsradi", di_ftype_di_int, ARM_BUILTIN_WSRADI);
10982 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WRORH);
10983 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorw", v2si_ftype_v2si_di, ARM_BUILTIN_WRORW);
10984 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrord", di_ftype_di_di, ARM_BUILTIN_WRORD);
10985 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WRORHI);
10986 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorwi", v2si_ftype_v2si_int, ARM_BUILTIN_WRORWI);
10987 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrordi", di_ftype_di_int, ARM_BUILTIN_WRORDI);
10989 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wshufh", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSHUFH);
10991 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadb", v2si_ftype_v8qi_v8qi, ARM_BUILTIN_WSADB);
10992 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadh", v2si_ftype_v4hi_v4hi, ARM_BUILTIN_WSADH);
10993 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadbz", v2si_ftype_v8qi_v8qi, ARM_BUILTIN_WSADBZ);
10994 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadhz", v2si_ftype_v4hi_v4hi, ARM_BUILTIN_WSADHZ);
10996 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsb", int_ftype_v8qi_int, ARM_BUILTIN_TEXTRMSB);
10997 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsh", int_ftype_v4hi_int, ARM_BUILTIN_TEXTRMSH);
10998 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsw", int_ftype_v2si_int, ARM_BUILTIN_TEXTRMSW);
10999 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmub", int_ftype_v8qi_int, ARM_BUILTIN_TEXTRMUB);
11000 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmuh", int_ftype_v4hi_int, ARM_BUILTIN_TEXTRMUH);
11001 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmuw", int_ftype_v2si_int, ARM_BUILTIN_TEXTRMUW);
11002 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrb", v8qi_ftype_v8qi_int_int, ARM_BUILTIN_TINSRB);
11003 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrh", v4hi_ftype_v4hi_int_int, ARM_BUILTIN_TINSRH);
11004 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrw", v2si_ftype_v2si_int_int, ARM_BUILTIN_TINSRW);
11006 def_mbuiltin (FL_IWMMXT, "__builtin_arm_waccb", di_ftype_v8qi, ARM_BUILTIN_WACCB);
11007 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wacch", di_ftype_v4hi, ARM_BUILTIN_WACCH);
11008 def_mbuiltin (FL_IWMMXT, "__builtin_arm_waccw", di_ftype_v2si, ARM_BUILTIN_WACCW);
11010 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskb", int_ftype_v8qi, ARM_BUILTIN_TMOVMSKB);
11011 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskh", int_ftype_v4hi, ARM_BUILTIN_TMOVMSKH);
11012 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskw", int_ftype_v2si, ARM_BUILTIN_TMOVMSKW);
11014 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackhss", v8qi_ftype_v4hi_v4hi, ARM_BUILTIN_WPACKHSS);
11015 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackhus", v8qi_ftype_v4hi_v4hi, ARM_BUILTIN_WPACKHUS);
11016 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackwus", v4hi_ftype_v2si_v2si, ARM_BUILTIN_WPACKWUS);
11017 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackwss", v4hi_ftype_v2si_v2si, ARM_BUILTIN_WPACKWSS);
11018 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackdus", v2si_ftype_di_di, ARM_BUILTIN_WPACKDUS);
11019 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackdss", v2si_ftype_di_di, ARM_BUILTIN_WPACKDSS);
11021 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehub", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKEHUB);
11022 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehuh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKEHUH);
11023 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehuw", di_ftype_v2si, ARM_BUILTIN_WUNPCKEHUW);
11024 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsb", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKEHSB);
11025 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKEHSH);
11026 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsw", di_ftype_v2si, ARM_BUILTIN_WUNPCKEHSW);
11027 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelub", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKELUB);
11028 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckeluh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKELUH);
11029 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckeluw", di_ftype_v2si, ARM_BUILTIN_WUNPCKELUW);
11030 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsb", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKELSB);
11031 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKELSH);
11032 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsw", di_ftype_v2si, ARM_BUILTIN_WUNPCKELSW);
11034 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacs", di_ftype_di_v4hi_v4hi, ARM_BUILTIN_WMACS);
11035 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacsz", di_ftype_v4hi_v4hi, ARM_BUILTIN_WMACSZ);
11036 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacu", di_ftype_di_v4hi_v4hi, ARM_BUILTIN_WMACU);
11037 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacuz", di_ftype_v4hi_v4hi, ARM_BUILTIN_WMACUZ);
11039 def_mbuiltin (FL_IWMMXT, "__builtin_arm_walign", v8qi_ftype_v8qi_v8qi_int, ARM_BUILTIN_WALIGN);
11040 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmia", di_ftype_di_int_int, ARM_BUILTIN_TMIA);
11041 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiaph", di_ftype_di_int_int, ARM_BUILTIN_TMIAPH);
11042 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiabb", di_ftype_di_int_int, ARM_BUILTIN_TMIABB);
11043 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiabt", di_ftype_di_int_int, ARM_BUILTIN_TMIABT);
11044 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiatb", di_ftype_di_int_int, ARM_BUILTIN_TMIATB);
11045 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiatt", di_ftype_di_int_int, ARM_BUILTIN_TMIATT);
11048 static void
11049 arm_init_builtins (void)
11051 if (TARGET_REALLY_IWMMXT)
11052 arm_init_iwmmxt_builtins ();
11055 /* Errors in the source file can cause expand_expr to return const0_rtx
11056 where we expect a vector. To avoid crashing, use one of the vector
11057 clear instructions. */
11059 static rtx
11060 safe_vector_operand (rtx x, enum machine_mode mode)
11062 if (x != const0_rtx)
11063 return x;
11064 x = gen_reg_rtx (mode);
11066 emit_insn (gen_iwmmxt_clrdi (mode == DImode ? x
11067 : gen_rtx_SUBREG (DImode, x, 0)));
11068 return x;
11071 /* Subroutine of arm_expand_builtin to take care of binop insns. */
11073 static rtx
11074 arm_expand_binop_builtin (enum insn_code icode,
11075 tree arglist, rtx target)
11077 rtx pat;
11078 tree arg0 = TREE_VALUE (arglist);
11079 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
11080 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
11081 rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
11082 enum machine_mode tmode = insn_data[icode].operand[0].mode;
11083 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
11084 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
11086 if (VECTOR_MODE_P (mode0))
11087 op0 = safe_vector_operand (op0, mode0);
11088 if (VECTOR_MODE_P (mode1))
11089 op1 = safe_vector_operand (op1, mode1);
11091 if (! target
11092 || GET_MODE (target) != tmode
11093 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
11094 target = gen_reg_rtx (tmode);
11096 /* In case the insn wants input operands in modes different from
11097 the result, abort. */
11098 if (GET_MODE (op0) != mode0 || GET_MODE (op1) != mode1)
11099 abort ();
11101 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
11102 op0 = copy_to_mode_reg (mode0, op0);
11103 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
11104 op1 = copy_to_mode_reg (mode1, op1);
11106 pat = GEN_FCN (icode) (target, op0, op1);
11107 if (! pat)
11108 return 0;
11109 emit_insn (pat);
11110 return target;
11113 /* Subroutine of arm_expand_builtin to take care of unop insns. */
11115 static rtx
11116 arm_expand_unop_builtin (enum insn_code icode,
11117 tree arglist, rtx target, int do_load)
11119 rtx pat;
11120 tree arg0 = TREE_VALUE (arglist);
11121 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
11122 enum machine_mode tmode = insn_data[icode].operand[0].mode;
11123 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
11125 if (! target
11126 || GET_MODE (target) != tmode
11127 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
11128 target = gen_reg_rtx (tmode);
11129 if (do_load)
11130 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
11131 else
11133 if (VECTOR_MODE_P (mode0))
11134 op0 = safe_vector_operand (op0, mode0);
11136 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
11137 op0 = copy_to_mode_reg (mode0, op0);
11140 pat = GEN_FCN (icode) (target, op0);
11141 if (! pat)
11142 return 0;
11143 emit_insn (pat);
11144 return target;
11147 /* Expand an expression EXP that calls a built-in function,
11148 with result going to TARGET if that's convenient
11149 (and in mode MODE if that's convenient).
11150 SUBTARGET may be used as the target for computing one of EXP's operands.
11151 IGNORE is nonzero if the value is to be ignored. */
11153 static rtx
11154 arm_expand_builtin (tree exp,
11155 rtx target,
11156 rtx subtarget ATTRIBUTE_UNUSED,
11157 enum machine_mode mode ATTRIBUTE_UNUSED,
11158 int ignore ATTRIBUTE_UNUSED)
11160 const struct builtin_description * d;
11161 enum insn_code icode;
11162 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
11163 tree arglist = TREE_OPERAND (exp, 1);
11164 tree arg0;
11165 tree arg1;
11166 tree arg2;
11167 rtx op0;
11168 rtx op1;
11169 rtx op2;
11170 rtx pat;
11171 int fcode = DECL_FUNCTION_CODE (fndecl);
11172 size_t i;
11173 enum machine_mode tmode;
11174 enum machine_mode mode0;
11175 enum machine_mode mode1;
11176 enum machine_mode mode2;
11178 switch (fcode)
11180 case ARM_BUILTIN_TEXTRMSB:
11181 case ARM_BUILTIN_TEXTRMUB:
11182 case ARM_BUILTIN_TEXTRMSH:
11183 case ARM_BUILTIN_TEXTRMUH:
11184 case ARM_BUILTIN_TEXTRMSW:
11185 case ARM_BUILTIN_TEXTRMUW:
11186 icode = (fcode == ARM_BUILTIN_TEXTRMSB ? CODE_FOR_iwmmxt_textrmsb
11187 : fcode == ARM_BUILTIN_TEXTRMUB ? CODE_FOR_iwmmxt_textrmub
11188 : fcode == ARM_BUILTIN_TEXTRMSH ? CODE_FOR_iwmmxt_textrmsh
11189 : fcode == ARM_BUILTIN_TEXTRMUH ? CODE_FOR_iwmmxt_textrmuh
11190 : CODE_FOR_iwmmxt_textrmw);
11192 arg0 = TREE_VALUE (arglist);
11193 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
11194 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
11195 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
11196 tmode = insn_data[icode].operand[0].mode;
11197 mode0 = insn_data[icode].operand[1].mode;
11198 mode1 = insn_data[icode].operand[2].mode;
11200 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
11201 op0 = copy_to_mode_reg (mode0, op0);
11202 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
11204 /* @@@ better error message */
11205 error ("selector must be an immediate");
11206 return gen_reg_rtx (tmode);
11208 if (target == 0
11209 || GET_MODE (target) != tmode
11210 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
11211 target = gen_reg_rtx (tmode);
11212 pat = GEN_FCN (icode) (target, op0, op1);
11213 if (! pat)
11214 return 0;
11215 emit_insn (pat);
11216 return target;
11218 case ARM_BUILTIN_TINSRB:
11219 case ARM_BUILTIN_TINSRH:
11220 case ARM_BUILTIN_TINSRW:
11221 icode = (fcode == ARM_BUILTIN_TINSRB ? CODE_FOR_iwmmxt_tinsrb
11222 : fcode == ARM_BUILTIN_TINSRH ? CODE_FOR_iwmmxt_tinsrh
11223 : CODE_FOR_iwmmxt_tinsrw);
11224 arg0 = TREE_VALUE (arglist);
11225 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
11226 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
11227 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
11228 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
11229 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
11230 tmode = insn_data[icode].operand[0].mode;
11231 mode0 = insn_data[icode].operand[1].mode;
11232 mode1 = insn_data[icode].operand[2].mode;
11233 mode2 = insn_data[icode].operand[3].mode;
11235 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
11236 op0 = copy_to_mode_reg (mode0, op0);
11237 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
11238 op1 = copy_to_mode_reg (mode1, op1);
11239 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
11241 /* @@@ better error message */
11242 error ("selector must be an immediate");
11243 return const0_rtx;
11245 if (target == 0
11246 || GET_MODE (target) != tmode
11247 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
11248 target = gen_reg_rtx (tmode);
11249 pat = GEN_FCN (icode) (target, op0, op1, op2);
11250 if (! pat)
11251 return 0;
11252 emit_insn (pat);
11253 return target;
11255 case ARM_BUILTIN_SETWCX:
11256 arg0 = TREE_VALUE (arglist);
11257 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
11258 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
11259 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
11260 emit_insn (gen_iwmmxt_tmcr (op0, op1));
11261 return 0;
11263 case ARM_BUILTIN_GETWCX:
11264 arg0 = TREE_VALUE (arglist);
11265 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
11266 target = gen_reg_rtx (SImode);
11267 emit_insn (gen_iwmmxt_tmrc (target, op0));
11268 return target;
11270 case ARM_BUILTIN_WSHUFH:
11271 icode = CODE_FOR_iwmmxt_wshufh;
11272 arg0 = TREE_VALUE (arglist);
11273 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
11274 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
11275 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
11276 tmode = insn_data[icode].operand[0].mode;
11277 mode1 = insn_data[icode].operand[1].mode;
11278 mode2 = insn_data[icode].operand[2].mode;
11280 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
11281 op0 = copy_to_mode_reg (mode1, op0);
11282 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
11284 /* @@@ better error message */
11285 error ("mask must be an immediate");
11286 return const0_rtx;
11288 if (target == 0
11289 || GET_MODE (target) != tmode
11290 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
11291 target = gen_reg_rtx (tmode);
11292 pat = GEN_FCN (icode) (target, op0, op1);
11293 if (! pat)
11294 return 0;
11295 emit_insn (pat);
11296 return target;
11298 case ARM_BUILTIN_WSADB:
11299 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadb, arglist, target);
11300 case ARM_BUILTIN_WSADH:
11301 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadh, arglist, target);
11302 case ARM_BUILTIN_WSADBZ:
11303 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadbz, arglist, target);
11304 case ARM_BUILTIN_WSADHZ:
11305 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadhz, arglist, target);
11307 /* Several three-argument builtins. */
11308 case ARM_BUILTIN_WMACS:
11309 case ARM_BUILTIN_WMACU:
11310 case ARM_BUILTIN_WALIGN:
11311 case ARM_BUILTIN_TMIA:
11312 case ARM_BUILTIN_TMIAPH:
11313 case ARM_BUILTIN_TMIATT:
11314 case ARM_BUILTIN_TMIATB:
11315 case ARM_BUILTIN_TMIABT:
11316 case ARM_BUILTIN_TMIABB:
11317 icode = (fcode == ARM_BUILTIN_WMACS ? CODE_FOR_iwmmxt_wmacs
11318 : fcode == ARM_BUILTIN_WMACU ? CODE_FOR_iwmmxt_wmacu
11319 : fcode == ARM_BUILTIN_TMIA ? CODE_FOR_iwmmxt_tmia
11320 : fcode == ARM_BUILTIN_TMIAPH ? CODE_FOR_iwmmxt_tmiaph
11321 : fcode == ARM_BUILTIN_TMIABB ? CODE_FOR_iwmmxt_tmiabb
11322 : fcode == ARM_BUILTIN_TMIABT ? CODE_FOR_iwmmxt_tmiabt
11323 : fcode == ARM_BUILTIN_TMIATB ? CODE_FOR_iwmmxt_tmiatb
11324 : fcode == ARM_BUILTIN_TMIATT ? CODE_FOR_iwmmxt_tmiatt
11325 : CODE_FOR_iwmmxt_walign);
11326 arg0 = TREE_VALUE (arglist);
11327 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
11328 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
11329 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
11330 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
11331 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
11332 tmode = insn_data[icode].operand[0].mode;
11333 mode0 = insn_data[icode].operand[1].mode;
11334 mode1 = insn_data[icode].operand[2].mode;
11335 mode2 = insn_data[icode].operand[3].mode;
11337 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
11338 op0 = copy_to_mode_reg (mode0, op0);
11339 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
11340 op1 = copy_to_mode_reg (mode1, op1);
11341 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
11342 op2 = copy_to_mode_reg (mode2, op2);
11343 if (target == 0
11344 || GET_MODE (target) != tmode
11345 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
11346 target = gen_reg_rtx (tmode);
11347 pat = GEN_FCN (icode) (target, op0, op1, op2);
11348 if (! pat)
11349 return 0;
11350 emit_insn (pat);
11351 return target;
11353 case ARM_BUILTIN_WZERO:
11354 target = gen_reg_rtx (DImode);
11355 emit_insn (gen_iwmmxt_clrdi (target));
11356 return target;
11358 default:
11359 break;
11362 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
11363 if (d->code == (const enum arm_builtins) fcode)
11364 return arm_expand_binop_builtin (d->icode, arglist, target);
11366 for (i = 0, d = bdesc_1arg; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
11367 if (d->code == (const enum arm_builtins) fcode)
11368 return arm_expand_unop_builtin (d->icode, arglist, target, 0);
11370 /* @@@ Should really do something sensible here. */
11371 return NULL_RTX;
11374 /* Recursively search through all of the blocks in a function
11375 checking to see if any of the variables created in that
11376 function match the RTX called 'orig'. If they do then
11377 replace them with the RTX called 'new'. */
11378 static void
11379 replace_symbols_in_block (tree block, rtx orig, rtx new)
11381 for (; block; block = BLOCK_CHAIN (block))
11383 tree sym;
11385 if (!TREE_USED (block))
11386 continue;
11388 for (sym = BLOCK_VARS (block); sym; sym = TREE_CHAIN (sym))
11390 if ( (DECL_NAME (sym) == 0 && TREE_CODE (sym) != TYPE_DECL)
11391 || DECL_IGNORED_P (sym)
11392 || TREE_CODE (sym) != VAR_DECL
11393 || DECL_EXTERNAL (sym)
11394 || !rtx_equal_p (DECL_RTL (sym), orig)
11396 continue;
11398 SET_DECL_RTL (sym, new);
11401 replace_symbols_in_block (BLOCK_SUBBLOCKS (block), orig, new);
11405 /* Return the number (counting from 0) of
11406 the least significant set bit in MASK. */
11408 inline static int
11409 number_of_first_bit_set (int mask)
11411 int bit;
11413 for (bit = 0;
11414 (mask & (1 << bit)) == 0;
11415 ++bit)
11416 continue;
11418 return bit;
11421 /* Generate code to return from a thumb function.
11422 If 'reg_containing_return_addr' is -1, then the return address is
11423 actually on the stack, at the stack pointer. */
11424 static void
11425 thumb_exit (FILE *f, int reg_containing_return_addr, rtx eh_ofs)
11427 unsigned regs_available_for_popping;
11428 unsigned regs_to_pop;
11429 int pops_needed;
11430 unsigned available;
11431 unsigned required;
11432 int mode;
11433 int size;
11434 int restore_a4 = FALSE;
11436 /* Compute the registers we need to pop. */
11437 regs_to_pop = 0;
11438 pops_needed = 0;
11440 /* There is an assumption here, that if eh_ofs is not NULL, the
11441 normal return address will have been pushed. */
11442 if (reg_containing_return_addr == -1 || eh_ofs)
11444 /* When we are generating a return for __builtin_eh_return,
11445 reg_containing_return_addr must specify the return regno. */
11446 if (eh_ofs && reg_containing_return_addr == -1)
11447 abort ();
11449 regs_to_pop |= 1 << LR_REGNUM;
11450 ++pops_needed;
11453 if (TARGET_BACKTRACE)
11455 /* Restore the (ARM) frame pointer and stack pointer. */
11456 regs_to_pop |= (1 << ARM_HARD_FRAME_POINTER_REGNUM) | (1 << SP_REGNUM);
11457 pops_needed += 2;
11460 /* If there is nothing to pop then just emit the BX instruction and
11461 return. */
11462 if (pops_needed == 0)
11464 if (eh_ofs)
11465 asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, REGNO (eh_ofs));
11467 asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
11468 return;
11470 /* Otherwise if we are not supporting interworking and we have not created
11471 a backtrace structure and the function was not entered in ARM mode then
11472 just pop the return address straight into the PC. */
11473 else if (!TARGET_INTERWORK
11474 && !TARGET_BACKTRACE
11475 && !is_called_in_ARM_mode (current_function_decl))
11477 if (eh_ofs)
11479 asm_fprintf (f, "\tadd\t%r, #4\n", SP_REGNUM);
11480 asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, REGNO (eh_ofs));
11481 asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
11483 else
11484 asm_fprintf (f, "\tpop\t{%r}\n", PC_REGNUM);
11486 return;
11489 /* Find out how many of the (return) argument registers we can corrupt. */
11490 regs_available_for_popping = 0;
11492 /* If returning via __builtin_eh_return, the bottom three registers
11493 all contain information needed for the return. */
11494 if (eh_ofs)
11495 size = 12;
11496 else
11498 #ifdef RTX_CODE
11499 /* If we can deduce the registers used from the function's
11500 return value. This is more reliable that examining
11501 regs_ever_live[] because that will be set if the register is
11502 ever used in the function, not just if the register is used
11503 to hold a return value. */
11505 if (current_function_return_rtx != 0)
11506 mode = GET_MODE (current_function_return_rtx);
11507 else
11508 #endif
11509 mode = DECL_MODE (DECL_RESULT (current_function_decl));
11511 size = GET_MODE_SIZE (mode);
11513 if (size == 0)
11515 /* In a void function we can use any argument register.
11516 In a function that returns a structure on the stack
11517 we can use the second and third argument registers. */
11518 if (mode == VOIDmode)
11519 regs_available_for_popping =
11520 (1 << ARG_REGISTER (1))
11521 | (1 << ARG_REGISTER (2))
11522 | (1 << ARG_REGISTER (3));
11523 else
11524 regs_available_for_popping =
11525 (1 << ARG_REGISTER (2))
11526 | (1 << ARG_REGISTER (3));
11528 else if (size <= 4)
11529 regs_available_for_popping =
11530 (1 << ARG_REGISTER (2))
11531 | (1 << ARG_REGISTER (3));
11532 else if (size <= 8)
11533 regs_available_for_popping =
11534 (1 << ARG_REGISTER (3));
11537 /* Match registers to be popped with registers into which we pop them. */
11538 for (available = regs_available_for_popping,
11539 required = regs_to_pop;
11540 required != 0 && available != 0;
11541 available &= ~(available & - available),
11542 required &= ~(required & - required))
11543 -- pops_needed;
11545 /* If we have any popping registers left over, remove them. */
11546 if (available > 0)
11547 regs_available_for_popping &= ~available;
11549 /* Otherwise if we need another popping register we can use
11550 the fourth argument register. */
11551 else if (pops_needed)
11553 /* If we have not found any free argument registers and
11554 reg a4 contains the return address, we must move it. */
11555 if (regs_available_for_popping == 0
11556 && reg_containing_return_addr == LAST_ARG_REGNUM)
11558 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM, LAST_ARG_REGNUM);
11559 reg_containing_return_addr = LR_REGNUM;
11561 else if (size > 12)
11563 /* Register a4 is being used to hold part of the return value,
11564 but we have dire need of a free, low register. */
11565 restore_a4 = TRUE;
11567 asm_fprintf (f, "\tmov\t%r, %r\n",IP_REGNUM, LAST_ARG_REGNUM);
11570 if (reg_containing_return_addr != LAST_ARG_REGNUM)
11572 /* The fourth argument register is available. */
11573 regs_available_for_popping |= 1 << LAST_ARG_REGNUM;
11575 --pops_needed;
11579 /* Pop as many registers as we can. */
11580 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
11581 regs_available_for_popping);
11583 /* Process the registers we popped. */
11584 if (reg_containing_return_addr == -1)
11586 /* The return address was popped into the lowest numbered register. */
11587 regs_to_pop &= ~(1 << LR_REGNUM);
11589 reg_containing_return_addr =
11590 number_of_first_bit_set (regs_available_for_popping);
11592 /* Remove this register for the mask of available registers, so that
11593 the return address will not be corrupted by further pops. */
11594 regs_available_for_popping &= ~(1 << reg_containing_return_addr);
11597 /* If we popped other registers then handle them here. */
11598 if (regs_available_for_popping)
11600 int frame_pointer;
11602 /* Work out which register currently contains the frame pointer. */
11603 frame_pointer = number_of_first_bit_set (regs_available_for_popping);
11605 /* Move it into the correct place. */
11606 asm_fprintf (f, "\tmov\t%r, %r\n",
11607 ARM_HARD_FRAME_POINTER_REGNUM, frame_pointer);
11609 /* (Temporarily) remove it from the mask of popped registers. */
11610 regs_available_for_popping &= ~(1 << frame_pointer);
11611 regs_to_pop &= ~(1 << ARM_HARD_FRAME_POINTER_REGNUM);
11613 if (regs_available_for_popping)
11615 int stack_pointer;
11617 /* We popped the stack pointer as well,
11618 find the register that contains it. */
11619 stack_pointer = number_of_first_bit_set (regs_available_for_popping);
11621 /* Move it into the stack register. */
11622 asm_fprintf (f, "\tmov\t%r, %r\n", SP_REGNUM, stack_pointer);
11624 /* At this point we have popped all necessary registers, so
11625 do not worry about restoring regs_available_for_popping
11626 to its correct value:
11628 assert (pops_needed == 0)
11629 assert (regs_available_for_popping == (1 << frame_pointer))
11630 assert (regs_to_pop == (1 << STACK_POINTER)) */
11632 else
11634 /* Since we have just move the popped value into the frame
11635 pointer, the popping register is available for reuse, and
11636 we know that we still have the stack pointer left to pop. */
11637 regs_available_for_popping |= (1 << frame_pointer);
11641 /* If we still have registers left on the stack, but we no longer have
11642 any registers into which we can pop them, then we must move the return
11643 address into the link register and make available the register that
11644 contained it. */
11645 if (regs_available_for_popping == 0 && pops_needed > 0)
11647 regs_available_for_popping |= 1 << reg_containing_return_addr;
11649 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM,
11650 reg_containing_return_addr);
11652 reg_containing_return_addr = LR_REGNUM;
11655 /* If we have registers left on the stack then pop some more.
11656 We know that at most we will want to pop FP and SP. */
11657 if (pops_needed > 0)
11659 int popped_into;
11660 int move_to;
11662 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
11663 regs_available_for_popping);
11665 /* We have popped either FP or SP.
11666 Move whichever one it is into the correct register. */
11667 popped_into = number_of_first_bit_set (regs_available_for_popping);
11668 move_to = number_of_first_bit_set (regs_to_pop);
11670 asm_fprintf (f, "\tmov\t%r, %r\n", move_to, popped_into);
11672 regs_to_pop &= ~(1 << move_to);
11674 --pops_needed;
11677 /* If we still have not popped everything then we must have only
11678 had one register available to us and we are now popping the SP. */
11679 if (pops_needed > 0)
11681 int popped_into;
11683 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
11684 regs_available_for_popping);
11686 popped_into = number_of_first_bit_set (regs_available_for_popping);
11688 asm_fprintf (f, "\tmov\t%r, %r\n", SP_REGNUM, popped_into);
11690 assert (regs_to_pop == (1 << STACK_POINTER))
11691 assert (pops_needed == 1)
11695 /* If necessary restore the a4 register. */
11696 if (restore_a4)
11698 if (reg_containing_return_addr != LR_REGNUM)
11700 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM, LAST_ARG_REGNUM);
11701 reg_containing_return_addr = LR_REGNUM;
11704 asm_fprintf (f, "\tmov\t%r, %r\n", LAST_ARG_REGNUM, IP_REGNUM);
11707 if (eh_ofs)
11708 asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, REGNO (eh_ofs));
11710 /* Return to caller. */
11711 asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
11714 /* Emit code to push or pop registers to or from the stack. F is the
11715 assembly file. MASK is the registers to push or pop. PUSH is
11716 non-zero if we should push, and zero if we should pop. For debugging
11717 output, if pushing, adjust CFA_OFFSET by the amount of space added
11718 to the stack. REAL_REGS should have the same number of bits set as
11719 MASK, and will be used instead (in the same order) to describe which
11720 registers were saved - this is used to mark the save slots when we
11721 push high registers after moving them to low registers. */
11722 static void
11723 thumb_pushpop (FILE *f, int mask, int push, int *cfa_offset, int real_regs)
11725 int regno;
11726 int lo_mask = mask & 0xFF;
11727 int pushed_words = 0;
11729 if (lo_mask == 0 && !push && (mask & (1 << 15)))
11731 /* Special case. Do not generate a POP PC statement here, do it in
11732 thumb_exit() */
11733 thumb_exit (f, -1, NULL_RTX);
11734 return;
11737 fprintf (f, "\t%s\t{", push ? "push" : "pop");
11739 /* Look at the low registers first. */
11740 for (regno = 0; regno <= LAST_LO_REGNUM; regno++, lo_mask >>= 1)
11742 if (lo_mask & 1)
11744 asm_fprintf (f, "%r", regno);
11746 if ((lo_mask & ~1) != 0)
11747 fprintf (f, ", ");
11749 pushed_words++;
11753 if (push && (mask & (1 << LR_REGNUM)))
11755 /* Catch pushing the LR. */
11756 if (mask & 0xFF)
11757 fprintf (f, ", ");
11759 asm_fprintf (f, "%r", LR_REGNUM);
11761 pushed_words++;
11763 else if (!push && (mask & (1 << PC_REGNUM)))
11765 /* Catch popping the PC. */
11766 if (TARGET_INTERWORK || TARGET_BACKTRACE)
11768 /* The PC is never poped directly, instead
11769 it is popped into r3 and then BX is used. */
11770 fprintf (f, "}\n");
11772 thumb_exit (f, -1, NULL_RTX);
11774 return;
11776 else
11778 if (mask & 0xFF)
11779 fprintf (f, ", ");
11781 asm_fprintf (f, "%r", PC_REGNUM);
11785 fprintf (f, "}\n");
11787 if (push && pushed_words && dwarf2out_do_frame ())
11789 char *l = dwarf2out_cfi_label ();
11790 int pushed_mask = real_regs;
11792 *cfa_offset += pushed_words * 4;
11793 dwarf2out_def_cfa (l, SP_REGNUM, *cfa_offset);
11795 pushed_words = 0;
11796 pushed_mask = real_regs;
11797 for (regno = 0; regno <= 14; regno++, pushed_mask >>= 1)
11799 if (pushed_mask & 1)
11800 dwarf2out_reg_save (l, regno, 4 * pushed_words++ - *cfa_offset);
11805 void
11806 thumb_final_prescan_insn (rtx insn)
11808 if (flag_print_asm_name)
11809 asm_fprintf (asm_out_file, "%@ 0x%04x\n",
11810 INSN_ADDRESSES (INSN_UID (insn)));
11814 thumb_shiftable_const (unsigned HOST_WIDE_INT val)
11816 unsigned HOST_WIDE_INT mask = 0xff;
11817 int i;
11819 if (val == 0) /* XXX */
11820 return 0;
11822 for (i = 0; i < 25; i++)
11823 if ((val & (mask << i)) == val)
11824 return 1;
11826 return 0;
11829 /* Returns nonzero if the current function contains,
11830 or might contain a far jump. */
11832 thumb_far_jump_used_p (int in_prologue)
11834 rtx insn;
11836 /* This test is only important for leaf functions. */
11837 /* assert (!leaf_function_p ()); */
11839 /* If we have already decided that far jumps may be used,
11840 do not bother checking again, and always return true even if
11841 it turns out that they are not being used. Once we have made
11842 the decision that far jumps are present (and that hence the link
11843 register will be pushed onto the stack) we cannot go back on it. */
11844 if (cfun->machine->far_jump_used)
11845 return 1;
11847 /* If this function is not being called from the prologue/epilogue
11848 generation code then it must be being called from the
11849 INITIAL_ELIMINATION_OFFSET macro. */
11850 if (!in_prologue)
11852 /* In this case we know that we are being asked about the elimination
11853 of the arg pointer register. If that register is not being used,
11854 then there are no arguments on the stack, and we do not have to
11855 worry that a far jump might force the prologue to push the link
11856 register, changing the stack offsets. In this case we can just
11857 return false, since the presence of far jumps in the function will
11858 not affect stack offsets.
11860 If the arg pointer is live (or if it was live, but has now been
11861 eliminated and so set to dead) then we do have to test to see if
11862 the function might contain a far jump. This test can lead to some
11863 false negatives, since before reload is completed, then length of
11864 branch instructions is not known, so gcc defaults to returning their
11865 longest length, which in turn sets the far jump attribute to true.
11867 A false negative will not result in bad code being generated, but it
11868 will result in a needless push and pop of the link register. We
11869 hope that this does not occur too often. */
11870 if (regs_ever_live [ARG_POINTER_REGNUM])
11871 cfun->machine->arg_pointer_live = 1;
11872 else if (!cfun->machine->arg_pointer_live)
11873 return 0;
11876 /* Check to see if the function contains a branch
11877 insn with the far jump attribute set. */
11878 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
11880 if (GET_CODE (insn) == JUMP_INSN
11881 /* Ignore tablejump patterns. */
11882 && GET_CODE (PATTERN (insn)) != ADDR_VEC
11883 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC
11884 && get_attr_far_jump (insn) == FAR_JUMP_YES
11887 /* Record the fact that we have decided that
11888 the function does use far jumps. */
11889 cfun->machine->far_jump_used = 1;
11890 return 1;
11894 return 0;
11897 /* Return nonzero if FUNC must be entered in ARM mode. */
11899 is_called_in_ARM_mode (tree func)
11901 if (TREE_CODE (func) != FUNCTION_DECL)
11902 abort ();
11904 /* Ignore the problem about functions whoes address is taken. */
11905 if (TARGET_CALLEE_INTERWORKING && TREE_PUBLIC (func))
11906 return TRUE;
11908 #ifdef ARM_PE
11909 return lookup_attribute ("interfacearm", DECL_ATTRIBUTES (func)) != NULL_TREE;
11910 #else
11911 return FALSE;
11912 #endif
11915 /* The bits which aren't usefully expanded as rtl. */
11916 const char *
11917 thumb_unexpanded_epilogue (void)
11919 int regno;
11920 int live_regs_mask = 0;
11921 int high_regs_pushed = 0;
11922 int leaf_function = leaf_function_p ();
11923 int had_to_push_lr;
11924 rtx eh_ofs = cfun->machine->eh_epilogue_sp_ofs;
11926 if (return_used_this_function)
11927 return "";
11929 if (IS_NAKED (arm_current_func_type ()))
11930 return "";
11932 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
11933 if (THUMB_REG_PUSHED_P (regno))
11934 live_regs_mask |= 1 << regno;
11936 for (regno = 8; regno < 13; regno++)
11937 if (THUMB_REG_PUSHED_P (regno))
11938 high_regs_pushed++;
11940 /* The prolog may have pushed some high registers to use as
11941 work registers. eg the testsuite file:
11942 gcc/testsuite/gcc/gcc.c-torture/execute/complex-2.c
11943 compiles to produce:
11944 push {r4, r5, r6, r7, lr}
11945 mov r7, r9
11946 mov r6, r8
11947 push {r6, r7}
11948 as part of the prolog. We have to undo that pushing here. */
11950 if (high_regs_pushed)
11952 int mask = live_regs_mask;
11953 int next_hi_reg;
11954 int size;
11955 int mode;
11957 #ifdef RTX_CODE
11958 /* If we can deduce the registers used from the function's return value.
11959 This is more reliable that examining regs_ever_live[] because that
11960 will be set if the register is ever used in the function, not just if
11961 the register is used to hold a return value. */
11963 if (current_function_return_rtx != 0)
11964 mode = GET_MODE (current_function_return_rtx);
11965 else
11966 #endif
11967 mode = DECL_MODE (DECL_RESULT (current_function_decl));
11969 size = GET_MODE_SIZE (mode);
11971 /* Unless we are returning a type of size > 12 register r3 is
11972 available. */
11973 if (size < 13)
11974 mask |= 1 << 3;
11976 if (mask == 0)
11977 /* Oh dear! We have no low registers into which we can pop
11978 high registers! */
11979 internal_error
11980 ("no low registers available for popping high registers");
11982 for (next_hi_reg = 8; next_hi_reg < 13; next_hi_reg++)
11983 if (THUMB_REG_PUSHED_P (next_hi_reg))
11984 break;
11986 while (high_regs_pushed)
11988 /* Find lo register(s) into which the high register(s) can
11989 be popped. */
11990 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
11992 if (mask & (1 << regno))
11993 high_regs_pushed--;
11994 if (high_regs_pushed == 0)
11995 break;
11998 mask &= (2 << regno) - 1; /* A noop if regno == 8 */
12000 /* Pop the values into the low register(s). */
12001 thumb_pushpop (asm_out_file, mask, 0, NULL, mask);
12003 /* Move the value(s) into the high registers. */
12004 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
12006 if (mask & (1 << regno))
12008 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", next_hi_reg,
12009 regno);
12011 for (next_hi_reg++; next_hi_reg < 13; next_hi_reg++)
12012 if (THUMB_REG_PUSHED_P (next_hi_reg))
12013 break;
12019 had_to_push_lr = (live_regs_mask || !leaf_function
12020 || thumb_far_jump_used_p (1));
12022 if (TARGET_BACKTRACE
12023 && ((live_regs_mask & 0xFF) == 0)
12024 && regs_ever_live [LAST_ARG_REGNUM] != 0)
12026 /* The stack backtrace structure creation code had to
12027 push R7 in order to get a work register, so we pop
12028 it now. */
12029 live_regs_mask |= (1 << LAST_LO_REGNUM);
12032 if (current_function_pretend_args_size == 0 || TARGET_BACKTRACE)
12034 if (had_to_push_lr
12035 && !is_called_in_ARM_mode (current_function_decl)
12036 && !eh_ofs)
12037 live_regs_mask |= 1 << PC_REGNUM;
12039 /* Either no argument registers were pushed or a backtrace
12040 structure was created which includes an adjusted stack
12041 pointer, so just pop everything. */
12042 if (live_regs_mask)
12043 thumb_pushpop (asm_out_file, live_regs_mask, FALSE, NULL,
12044 live_regs_mask);
12046 if (eh_ofs)
12047 thumb_exit (asm_out_file, 2, eh_ofs);
12048 /* We have either just popped the return address into the
12049 PC or it is was kept in LR for the entire function or
12050 it is still on the stack because we do not want to
12051 return by doing a pop {pc}. */
12052 else if ((live_regs_mask & (1 << PC_REGNUM)) == 0)
12053 thumb_exit (asm_out_file,
12054 (had_to_push_lr
12055 && is_called_in_ARM_mode (current_function_decl)) ?
12056 -1 : LR_REGNUM, NULL_RTX);
12058 else
12060 /* Pop everything but the return address. */
12061 live_regs_mask &= ~(1 << PC_REGNUM);
12063 if (live_regs_mask)
12064 thumb_pushpop (asm_out_file, live_regs_mask, FALSE, NULL,
12065 live_regs_mask);
12067 if (had_to_push_lr)
12068 /* Get the return address into a temporary register. */
12069 thumb_pushpop (asm_out_file, 1 << LAST_ARG_REGNUM, 0, NULL,
12070 1 << LAST_ARG_REGNUM);
12072 /* Remove the argument registers that were pushed onto the stack. */
12073 asm_fprintf (asm_out_file, "\tadd\t%r, %r, #%d\n",
12074 SP_REGNUM, SP_REGNUM,
12075 current_function_pretend_args_size);
12077 if (eh_ofs)
12078 thumb_exit (asm_out_file, 2, eh_ofs);
12079 else
12080 thumb_exit (asm_out_file,
12081 had_to_push_lr ? LAST_ARG_REGNUM : LR_REGNUM, NULL_RTX);
12084 return "";
12087 /* Functions to save and restore machine-specific function data. */
12088 static struct machine_function *
12089 arm_init_machine_status (void)
12091 struct machine_function *machine;
12092 machine = (machine_function *) ggc_alloc_cleared (sizeof (machine_function));
12094 #if ARM_FT_UNKNOWN != 0
12095 machine->func_type = ARM_FT_UNKNOWN;
12096 #endif
12097 return machine;
12100 /* Return an RTX indicating where the return address to the
12101 calling function can be found. */
12103 arm_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
12105 if (count != 0)
12106 return NULL_RTX;
12108 if (TARGET_APCS_32)
12109 return get_hard_reg_initial_val (Pmode, LR_REGNUM);
12110 else
12112 rtx lr = gen_rtx_AND (Pmode, gen_rtx_REG (Pmode, LR_REGNUM),
12113 GEN_INT (RETURN_ADDR_MASK26));
12114 return get_func_hard_reg_initial_val (cfun, lr);
12118 /* Do anything needed before RTL is emitted for each function. */
12119 void
12120 arm_init_expanders (void)
12122 /* Arrange to initialize and mark the machine per-function status. */
12123 init_machine_status = arm_init_machine_status;
12126 HOST_WIDE_INT
12127 thumb_get_frame_size (void)
12129 int regno;
12131 int base_size = ROUND_UP_WORD (get_frame_size ());
12132 int count_regs = 0;
12133 int entry_size = 0;
12134 int leaf;
12136 if (! TARGET_THUMB)
12137 abort ();
12139 if (! TARGET_ATPCS)
12140 return base_size;
12142 /* We need to know if we are a leaf function. Unfortunately, it
12143 is possible to be called after start_sequence has been called,
12144 which causes get_insns to return the insns for the sequence,
12145 not the function, which will cause leaf_function_p to return
12146 the incorrect result.
12148 To work around this, we cache the computed frame size. This
12149 works because we will only be calling RTL expanders that need
12150 to know about leaf functions once reload has completed, and the
12151 frame size cannot be changed after that time, so we can safely
12152 use the cached value. */
12154 if (reload_completed)
12155 return cfun->machine->frame_size;
12157 leaf = leaf_function_p ();
12159 /* A leaf function does not need any stack alignment if it has nothing
12160 on the stack. */
12161 if (leaf && base_size == 0)
12163 cfun->machine->frame_size = 0;
12164 return 0;
12167 /* We know that SP will be word aligned on entry, and we must
12168 preserve that condition at any subroutine call. But those are
12169 the only constraints. */
12171 /* Space for variadic functions. */
12172 if (current_function_pretend_args_size)
12173 entry_size += current_function_pretend_args_size;
12175 /* Space for pushed lo registers. */
12176 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
12177 if (THUMB_REG_PUSHED_P (regno))
12178 count_regs++;
12180 /* Space for backtrace structure. */
12181 if (TARGET_BACKTRACE)
12183 if (count_regs == 0 && regs_ever_live[LAST_ARG_REGNUM] != 0)
12184 entry_size += 20;
12185 else
12186 entry_size += 16;
12189 if (count_regs || !leaf || thumb_far_jump_used_p (1))
12190 count_regs++; /* LR */
12192 entry_size += count_regs * 4;
12193 count_regs = 0;
12195 /* Space for pushed hi regs. */
12196 for (regno = 8; regno < 13; regno++)
12197 if (THUMB_REG_PUSHED_P (regno))
12198 count_regs++;
12200 entry_size += count_regs * 4;
12202 if ((entry_size + base_size + current_function_outgoing_args_size) & 7)
12203 base_size += 4;
12204 if ((entry_size + base_size + current_function_outgoing_args_size) & 7)
12205 abort ();
12207 cfun->machine->frame_size = base_size;
12209 return base_size;
12212 /* Generate the rest of a function's prologue. */
12213 void
12214 thumb_expand_prologue (void)
12216 rtx insn, dwarf;
12218 HOST_WIDE_INT amount = (thumb_get_frame_size ()
12219 + current_function_outgoing_args_size);
12220 unsigned long func_type;
12222 func_type = arm_current_func_type ();
12224 /* Naked functions don't have prologues. */
12225 if (IS_NAKED (func_type))
12226 return;
12228 if (IS_INTERRUPT (func_type))
12230 error ("interrupt Service Routines cannot be coded in Thumb mode");
12231 return;
12234 if (frame_pointer_needed)
12236 insn = emit_insn (gen_movsi (hard_frame_pointer_rtx, stack_pointer_rtx));
12237 RTX_FRAME_RELATED_P (insn) = 1;
12240 if (amount)
12242 amount = ROUND_UP_WORD (amount);
12244 if (amount < 512)
12246 insn = emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
12247 GEN_INT (- amount)));
12248 RTX_FRAME_RELATED_P (insn) = 1;
12250 else
12252 int regno;
12253 rtx reg;
12255 /* The stack decrement is too big for an immediate value in a single
12256 insn. In theory we could issue multiple subtracts, but after
12257 three of them it becomes more space efficient to place the full
12258 value in the constant pool and load into a register. (Also the
12259 ARM debugger really likes to see only one stack decrement per
12260 function). So instead we look for a scratch register into which
12261 we can load the decrement, and then we subtract this from the
12262 stack pointer. Unfortunately on the thumb the only available
12263 scratch registers are the argument registers, and we cannot use
12264 these as they may hold arguments to the function. Instead we
12265 attempt to locate a call preserved register which is used by this
12266 function. If we can find one, then we know that it will have
12267 been pushed at the start of the prologue and so we can corrupt
12268 it now. */
12269 for (regno = LAST_ARG_REGNUM + 1; regno <= LAST_LO_REGNUM; regno++)
12270 if (THUMB_REG_PUSHED_P (regno)
12271 && !(frame_pointer_needed
12272 && (regno == THUMB_HARD_FRAME_POINTER_REGNUM)))
12273 break;
12275 if (regno > LAST_LO_REGNUM) /* Very unlikely. */
12277 rtx spare = gen_rtx (REG, SImode, IP_REGNUM);
12279 /* Choose an arbitrary, non-argument low register. */
12280 reg = gen_rtx (REG, SImode, LAST_LO_REGNUM);
12282 /* Save it by copying it into a high, scratch register. */
12283 emit_insn (gen_movsi (spare, reg));
12284 /* Add a USE to stop propagate_one_insn() from barfing. */
12285 emit_insn (gen_prologue_use (spare));
12287 /* Decrement the stack. */
12288 emit_insn (gen_movsi (reg, GEN_INT (- amount)));
12289 insn = emit_insn (gen_addsi3 (stack_pointer_rtx,
12290 stack_pointer_rtx, reg));
12291 RTX_FRAME_RELATED_P (insn) = 1;
12292 dwarf = gen_rtx_SET (SImode, stack_pointer_rtx,
12293 plus_constant (stack_pointer_rtx,
12294 GEN_INT (- amount)));
12295 RTX_FRAME_RELATED_P (dwarf) = 1;
12296 REG_NOTES (insn)
12297 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
12298 REG_NOTES (insn));
12300 /* Restore the low register's original value. */
12301 emit_insn (gen_movsi (reg, spare));
12303 /* Emit a USE of the restored scratch register, so that flow
12304 analysis will not consider the restore redundant. The
12305 register won't be used again in this function and isn't
12306 restored by the epilogue. */
12307 emit_insn (gen_prologue_use (reg));
12309 else
12311 reg = gen_rtx (REG, SImode, regno);
12313 emit_insn (gen_movsi (reg, GEN_INT (- amount)));
12315 insn = emit_insn (gen_addsi3 (stack_pointer_rtx,
12316 stack_pointer_rtx, reg));
12317 RTX_FRAME_RELATED_P (insn) = 1;
12318 dwarf = gen_rtx_SET (SImode, stack_pointer_rtx,
12319 plus_constant (stack_pointer_rtx,
12320 GEN_INT (- amount)));
12321 RTX_FRAME_RELATED_P (dwarf) = 1;
12322 REG_NOTES (insn)
12323 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
12324 REG_NOTES (insn));
12329 if (current_function_profile || TARGET_NO_SCHED_PRO)
12330 emit_insn (gen_blockage ());
12333 void
12334 thumb_expand_epilogue (void)
12336 HOST_WIDE_INT amount = (thumb_get_frame_size ()
12337 + current_function_outgoing_args_size);
12338 int regno;
12340 /* Naked functions don't have prologues. */
12341 if (IS_NAKED (arm_current_func_type ()))
12342 return;
12344 if (frame_pointer_needed)
12345 emit_insn (gen_movsi (stack_pointer_rtx, hard_frame_pointer_rtx));
12346 else if (amount)
12348 amount = ROUND_UP_WORD (amount);
12350 if (amount < 512)
12351 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
12352 GEN_INT (amount)));
12353 else
12355 /* r3 is always free in the epilogue. */
12356 rtx reg = gen_rtx (REG, SImode, LAST_ARG_REGNUM);
12358 emit_insn (gen_movsi (reg, GEN_INT (amount)));
12359 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, reg));
12363 /* Emit a USE (stack_pointer_rtx), so that
12364 the stack adjustment will not be deleted. */
12365 emit_insn (gen_prologue_use (stack_pointer_rtx));
12367 if (current_function_profile || TARGET_NO_SCHED_PRO)
12368 emit_insn (gen_blockage ());
12370 /* Emit a clobber for each insn that will be restored in the epilogue,
12371 so that flow2 will get register lifetimes correct. */
12372 for (regno = 0; regno < 13; regno++)
12373 if (regs_ever_live[regno] && !call_used_regs[regno])
12374 emit_insn (gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, regno)));
12376 if (! regs_ever_live[LR_REGNUM])
12377 emit_insn (gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, LR_REGNUM)));
12380 static void
12381 thumb_output_function_prologue (FILE *f, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
12383 int live_regs_mask = 0;
12384 int high_regs_pushed = 0;
12385 int cfa_offset = 0;
12386 int regno;
12388 if (IS_NAKED (arm_current_func_type ()))
12389 return;
12391 if (is_called_in_ARM_mode (current_function_decl))
12393 const char * name;
12395 if (GET_CODE (DECL_RTL (current_function_decl)) != MEM)
12396 abort ();
12397 if (GET_CODE (XEXP (DECL_RTL (current_function_decl), 0)) != SYMBOL_REF)
12398 abort ();
12399 name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
12401 /* Generate code sequence to switch us into Thumb mode. */
12402 /* The .code 32 directive has already been emitted by
12403 ASM_DECLARE_FUNCTION_NAME. */
12404 asm_fprintf (f, "\torr\t%r, %r, #1\n", IP_REGNUM, PC_REGNUM);
12405 asm_fprintf (f, "\tbx\t%r\n", IP_REGNUM);
12407 /* Generate a label, so that the debugger will notice the
12408 change in instruction sets. This label is also used by
12409 the assembler to bypass the ARM code when this function
12410 is called from a Thumb encoded function elsewhere in the
12411 same file. Hence the definition of STUB_NAME here must
12412 agree with the definition in gas/config/tc-arm.c. */
12414 #define STUB_NAME ".real_start_of"
12416 fprintf (f, "\t.code\t16\n");
12417 #ifdef ARM_PE
12418 if (arm_dllexport_name_p (name))
12419 name = arm_strip_name_encoding (name);
12420 #endif
12421 asm_fprintf (f, "\t.globl %s%U%s\n", STUB_NAME, name);
12422 fprintf (f, "\t.thumb_func\n");
12423 asm_fprintf (f, "%s%U%s:\n", STUB_NAME, name);
12426 if (current_function_pretend_args_size)
12428 if (cfun->machine->uses_anonymous_args)
12430 int num_pushes;
12432 fprintf (f, "\tpush\t{");
12434 num_pushes = ARM_NUM_INTS (current_function_pretend_args_size);
12436 for (regno = LAST_ARG_REGNUM + 1 - num_pushes;
12437 regno <= LAST_ARG_REGNUM;
12438 regno++)
12439 asm_fprintf (f, "%r%s", regno,
12440 regno == LAST_ARG_REGNUM ? "" : ", ");
12442 fprintf (f, "}\n");
12444 else
12445 asm_fprintf (f, "\tsub\t%r, %r, #%d\n",
12446 SP_REGNUM, SP_REGNUM,
12447 current_function_pretend_args_size);
12449 /* We don't need to record the stores for unwinding (would it
12450 help the debugger any if we did?), but record the change in
12451 the stack pointer. */
12452 if (dwarf2out_do_frame ())
12454 char *l = dwarf2out_cfi_label ();
12455 cfa_offset = cfa_offset + current_function_pretend_args_size;
12456 dwarf2out_def_cfa (l, SP_REGNUM, cfa_offset);
12460 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
12461 if (THUMB_REG_PUSHED_P (regno))
12462 live_regs_mask |= 1 << regno;
12464 if (live_regs_mask || !leaf_function_p () || thumb_far_jump_used_p (1))
12465 live_regs_mask |= 1 << LR_REGNUM;
12467 if (TARGET_BACKTRACE)
12469 int offset;
12470 int work_register = 0;
12471 int wr;
12473 /* We have been asked to create a stack backtrace structure.
12474 The code looks like this:
12476 0 .align 2
12477 0 func:
12478 0 sub SP, #16 Reserve space for 4 registers.
12479 2 push {R7} Get a work register.
12480 4 add R7, SP, #20 Get the stack pointer before the push.
12481 6 str R7, [SP, #8] Store the stack pointer (before reserving the space).
12482 8 mov R7, PC Get hold of the start of this code plus 12.
12483 10 str R7, [SP, #16] Store it.
12484 12 mov R7, FP Get hold of the current frame pointer.
12485 14 str R7, [SP, #4] Store it.
12486 16 mov R7, LR Get hold of the current return address.
12487 18 str R7, [SP, #12] Store it.
12488 20 add R7, SP, #16 Point at the start of the backtrace structure.
12489 22 mov FP, R7 Put this value into the frame pointer. */
12491 if ((live_regs_mask & 0xFF) == 0)
12493 /* See if the a4 register is free. */
12495 if (regs_ever_live [LAST_ARG_REGNUM] == 0)
12496 work_register = LAST_ARG_REGNUM;
12497 else /* We must push a register of our own. */
12498 live_regs_mask |= (1 << LAST_LO_REGNUM);
12501 if (work_register == 0)
12503 /* Select a register from the list that will be pushed to
12504 use as our work register. */
12505 for (work_register = (LAST_LO_REGNUM + 1); work_register--;)
12506 if ((1 << work_register) & live_regs_mask)
12507 break;
12510 asm_fprintf
12511 (f, "\tsub\t%r, %r, #16\t%@ Create stack backtrace structure\n",
12512 SP_REGNUM, SP_REGNUM);
12514 if (dwarf2out_do_frame ())
12516 char *l = dwarf2out_cfi_label ();
12517 cfa_offset = cfa_offset + 16;
12518 dwarf2out_def_cfa (l, SP_REGNUM, cfa_offset);
12521 if (live_regs_mask)
12522 thumb_pushpop (f, live_regs_mask, 1, &cfa_offset, live_regs_mask);
12524 for (offset = 0, wr = 1 << 15; wr != 0; wr >>= 1)
12525 if (wr & live_regs_mask)
12526 offset += 4;
12528 asm_fprintf (f, "\tadd\t%r, %r, #%d\n", work_register, SP_REGNUM,
12529 offset + 16 + current_function_pretend_args_size);
12531 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
12532 offset + 4);
12534 /* Make sure that the instruction fetching the PC is in the right place
12535 to calculate "start of backtrace creation code + 12". */
12536 if (live_regs_mask)
12538 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, PC_REGNUM);
12539 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
12540 offset + 12);
12541 asm_fprintf (f, "\tmov\t%r, %r\n", work_register,
12542 ARM_HARD_FRAME_POINTER_REGNUM);
12543 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
12544 offset);
12546 else
12548 asm_fprintf (f, "\tmov\t%r, %r\n", work_register,
12549 ARM_HARD_FRAME_POINTER_REGNUM);
12550 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
12551 offset);
12552 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, PC_REGNUM);
12553 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
12554 offset + 12);
12557 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, LR_REGNUM);
12558 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
12559 offset + 8);
12560 asm_fprintf (f, "\tadd\t%r, %r, #%d\n", work_register, SP_REGNUM,
12561 offset + 12);
12562 asm_fprintf (f, "\tmov\t%r, %r\t\t%@ Backtrace structure created\n",
12563 ARM_HARD_FRAME_POINTER_REGNUM, work_register);
12565 else if (live_regs_mask)
12566 thumb_pushpop (f, live_regs_mask, 1, &cfa_offset, live_regs_mask);
12568 for (regno = 8; regno < 13; regno++)
12569 if (THUMB_REG_PUSHED_P (regno))
12570 high_regs_pushed++;
12572 if (high_regs_pushed)
12574 int pushable_regs = 0;
12575 int mask = live_regs_mask & 0xff;
12576 int next_hi_reg;
12578 for (next_hi_reg = 12; next_hi_reg > LAST_LO_REGNUM; next_hi_reg--)
12579 if (THUMB_REG_PUSHED_P (next_hi_reg))
12580 break;
12582 pushable_regs = mask;
12584 if (pushable_regs == 0)
12586 /* Desperation time -- this probably will never happen. */
12587 if (THUMB_REG_PUSHED_P (LAST_ARG_REGNUM))
12588 asm_fprintf (f, "\tmov\t%r, %r\n", IP_REGNUM, LAST_ARG_REGNUM);
12589 mask = 1 << LAST_ARG_REGNUM;
12592 while (high_regs_pushed > 0)
12594 int real_regs_mask = 0;
12596 for (regno = LAST_LO_REGNUM; regno >= 0; regno--)
12598 if (mask & (1 << regno))
12600 asm_fprintf (f, "\tmov\t%r, %r\n", regno, next_hi_reg);
12602 high_regs_pushed--;
12603 real_regs_mask |= (1 << next_hi_reg);
12605 if (high_regs_pushed)
12607 for (next_hi_reg--; next_hi_reg > LAST_LO_REGNUM;
12608 next_hi_reg--)
12609 if (THUMB_REG_PUSHED_P (next_hi_reg))
12610 break;
12612 else
12614 mask &= ~((1 << regno) - 1);
12615 break;
12620 thumb_pushpop (f, mask, 1, &cfa_offset, real_regs_mask);
12623 if (pushable_regs == 0
12624 && (THUMB_REG_PUSHED_P (LAST_ARG_REGNUM)))
12625 asm_fprintf (f, "\tmov\t%r, %r\n", LAST_ARG_REGNUM, IP_REGNUM);
12629 /* Handle the case of a double word load into a low register from
12630 a computed memory address. The computed address may involve a
12631 register which is overwritten by the load. */
12632 const char *
12633 thumb_load_double_from_address (rtx *operands)
12635 rtx addr;
12636 rtx base;
12637 rtx offset;
12638 rtx arg1;
12639 rtx arg2;
12641 if (GET_CODE (operands[0]) != REG)
12642 abort ();
12644 if (GET_CODE (operands[1]) != MEM)
12645 abort ();
12647 /* Get the memory address. */
12648 addr = XEXP (operands[1], 0);
12650 /* Work out how the memory address is computed. */
12651 switch (GET_CODE (addr))
12653 case REG:
12654 operands[2] = gen_rtx (MEM, SImode,
12655 plus_constant (XEXP (operands[1], 0), 4));
12657 if (REGNO (operands[0]) == REGNO (addr))
12659 output_asm_insn ("ldr\t%H0, %2", operands);
12660 output_asm_insn ("ldr\t%0, %1", operands);
12662 else
12664 output_asm_insn ("ldr\t%0, %1", operands);
12665 output_asm_insn ("ldr\t%H0, %2", operands);
12667 break;
12669 case CONST:
12670 /* Compute <address> + 4 for the high order load. */
12671 operands[2] = gen_rtx (MEM, SImode,
12672 plus_constant (XEXP (operands[1], 0), 4));
12674 output_asm_insn ("ldr\t%0, %1", operands);
12675 output_asm_insn ("ldr\t%H0, %2", operands);
12676 break;
12678 case PLUS:
12679 arg1 = XEXP (addr, 0);
12680 arg2 = XEXP (addr, 1);
12682 if (CONSTANT_P (arg1))
12683 base = arg2, offset = arg1;
12684 else
12685 base = arg1, offset = arg2;
12687 if (GET_CODE (base) != REG)
12688 abort ();
12690 /* Catch the case of <address> = <reg> + <reg> */
12691 if (GET_CODE (offset) == REG)
12693 int reg_offset = REGNO (offset);
12694 int reg_base = REGNO (base);
12695 int reg_dest = REGNO (operands[0]);
12697 /* Add the base and offset registers together into the
12698 higher destination register. */
12699 asm_fprintf (asm_out_file, "\tadd\t%r, %r, %r",
12700 reg_dest + 1, reg_base, reg_offset);
12702 /* Load the lower destination register from the address in
12703 the higher destination register. */
12704 asm_fprintf (asm_out_file, "\tldr\t%r, [%r, #0]",
12705 reg_dest, reg_dest + 1);
12707 /* Load the higher destination register from its own address
12708 plus 4. */
12709 asm_fprintf (asm_out_file, "\tldr\t%r, [%r, #4]",
12710 reg_dest + 1, reg_dest + 1);
12712 else
12714 /* Compute <address> + 4 for the high order load. */
12715 operands[2] = gen_rtx (MEM, SImode,
12716 plus_constant (XEXP (operands[1], 0), 4));
12718 /* If the computed address is held in the low order register
12719 then load the high order register first, otherwise always
12720 load the low order register first. */
12721 if (REGNO (operands[0]) == REGNO (base))
12723 output_asm_insn ("ldr\t%H0, %2", operands);
12724 output_asm_insn ("ldr\t%0, %1", operands);
12726 else
12728 output_asm_insn ("ldr\t%0, %1", operands);
12729 output_asm_insn ("ldr\t%H0, %2", operands);
12732 break;
12734 case LABEL_REF:
12735 /* With no registers to worry about we can just load the value
12736 directly. */
12737 operands[2] = gen_rtx (MEM, SImode,
12738 plus_constant (XEXP (operands[1], 0), 4));
12740 output_asm_insn ("ldr\t%H0, %2", operands);
12741 output_asm_insn ("ldr\t%0, %1", operands);
12742 break;
12744 default:
12745 abort ();
12746 break;
12749 return "";
12752 const char *
12753 thumb_output_move_mem_multiple (int n, rtx *operands)
12755 rtx tmp;
12757 switch (n)
12759 case 2:
12760 if (REGNO (operands[4]) > REGNO (operands[5]))
12762 tmp = operands[4];
12763 operands[4] = operands[5];
12764 operands[5] = tmp;
12766 output_asm_insn ("ldmia\t%1!, {%4, %5}", operands);
12767 output_asm_insn ("stmia\t%0!, {%4, %5}", operands);
12768 break;
12770 case 3:
12771 if (REGNO (operands[4]) > REGNO (operands[5]))
12773 tmp = operands[4];
12774 operands[4] = operands[5];
12775 operands[5] = tmp;
12777 if (REGNO (operands[5]) > REGNO (operands[6]))
12779 tmp = operands[5];
12780 operands[5] = operands[6];
12781 operands[6] = tmp;
12783 if (REGNO (operands[4]) > REGNO (operands[5]))
12785 tmp = operands[4];
12786 operands[4] = operands[5];
12787 operands[5] = tmp;
12790 output_asm_insn ("ldmia\t%1!, {%4, %5, %6}", operands);
12791 output_asm_insn ("stmia\t%0!, {%4, %5, %6}", operands);
12792 break;
12794 default:
12795 abort ();
12798 return "";
12801 /* Routines for generating rtl. */
12802 void
12803 thumb_expand_movstrqi (rtx *operands)
12805 rtx out = copy_to_mode_reg (SImode, XEXP (operands[0], 0));
12806 rtx in = copy_to_mode_reg (SImode, XEXP (operands[1], 0));
12807 HOST_WIDE_INT len = INTVAL (operands[2]);
12808 HOST_WIDE_INT offset = 0;
12810 while (len >= 12)
12812 emit_insn (gen_movmem12b (out, in, out, in));
12813 len -= 12;
12816 if (len >= 8)
12818 emit_insn (gen_movmem8b (out, in, out, in));
12819 len -= 8;
12822 if (len >= 4)
12824 rtx reg = gen_reg_rtx (SImode);
12825 emit_insn (gen_movsi (reg, gen_rtx (MEM, SImode, in)));
12826 emit_insn (gen_movsi (gen_rtx (MEM, SImode, out), reg));
12827 len -= 4;
12828 offset += 4;
12831 if (len >= 2)
12833 rtx reg = gen_reg_rtx (HImode);
12834 emit_insn (gen_movhi (reg, gen_rtx (MEM, HImode,
12835 plus_constant (in, offset))));
12836 emit_insn (gen_movhi (gen_rtx (MEM, HImode, plus_constant (out, offset)),
12837 reg));
12838 len -= 2;
12839 offset += 2;
12842 if (len)
12844 rtx reg = gen_reg_rtx (QImode);
12845 emit_insn (gen_movqi (reg, gen_rtx (MEM, QImode,
12846 plus_constant (in, offset))));
12847 emit_insn (gen_movqi (gen_rtx (MEM, QImode, plus_constant (out, offset)),
12848 reg));
12853 thumb_cmp_operand (rtx op, enum machine_mode mode)
12855 return ((GET_CODE (op) == CONST_INT
12856 && INTVAL (op) < 256
12857 && INTVAL (op) >= 0)
12858 || s_register_operand (op, mode));
12862 thumb_cmpneg_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
12864 return (GET_CODE (op) == CONST_INT
12865 && INTVAL (op) < 0
12866 && INTVAL (op) > -256);
12869 /* Return TRUE if a result can be stored in OP without clobbering the
12870 condition code register. Prior to reload we only accept a
12871 register. After reload we have to be able to handle memory as
12872 well, since a pseudo may not get a hard reg and reload cannot
12873 handle output-reloads on jump insns.
12875 We could possibly handle mem before reload as well, but that might
12876 complicate things with the need to handle increment
12877 side-effects. */
12880 thumb_cbrch_target_operand (rtx op, enum machine_mode mode)
12882 return (s_register_operand (op, mode)
12883 || ((reload_in_progress || reload_completed)
12884 && memory_operand (op, mode)));
12887 /* Handle storing a half-word to memory during reload. */
12888 void
12889 thumb_reload_out_hi (rtx *operands)
12891 emit_insn (gen_thumb_movhi_clobber (operands[0], operands[1], operands[2]));
12894 /* Handle reading a half-word from memory during reload. */
12895 void
12896 thumb_reload_in_hi (rtx *operands ATTRIBUTE_UNUSED)
12898 abort ();
12901 /* Return the length of a function name prefix
12902 that starts with the character 'c'. */
12903 static int
12904 arm_get_strip_length (int c)
12906 switch (c)
12908 ARM_NAME_ENCODING_LENGTHS
12909 default: return 0;
12913 /* Return a pointer to a function's name with any
12914 and all prefix encodings stripped from it. */
12915 const char *
12916 arm_strip_name_encoding (const char *name)
12918 int skip;
12920 while ((skip = arm_get_strip_length (* name)))
12921 name += skip;
12923 return name;
12926 /* If there is a '*' anywhere in the name's prefix, then
12927 emit the stripped name verbatim, otherwise prepend an
12928 underscore if leading underscores are being used. */
12929 void
12930 arm_asm_output_labelref (FILE *stream, const char *name)
12932 int skip;
12933 int verbatim = 0;
12935 while ((skip = arm_get_strip_length (* name)))
12937 verbatim |= (*name == '*');
12938 name += skip;
12941 if (verbatim)
12942 fputs (name, stream);
12943 else
12944 asm_fprintf (stream, "%U%s", name);
12947 rtx aof_pic_label;
12949 #ifdef AOF_ASSEMBLER
12950 /* Special functions only needed when producing AOF syntax assembler. */
12952 struct pic_chain
12954 struct pic_chain * next;
12955 const char * symname;
12958 static struct pic_chain * aof_pic_chain = NULL;
12961 aof_pic_entry (rtx x)
12963 struct pic_chain ** chainp;
12964 int offset;
12966 if (aof_pic_label == NULL_RTX)
12968 aof_pic_label = gen_rtx_SYMBOL_REF (Pmode, "x$adcons");
12971 for (offset = 0, chainp = &aof_pic_chain; *chainp;
12972 offset += 4, chainp = &(*chainp)->next)
12973 if ((*chainp)->symname == XSTR (x, 0))
12974 return plus_constant (aof_pic_label, offset);
12976 *chainp = (struct pic_chain *) xmalloc (sizeof (struct pic_chain));
12977 (*chainp)->next = NULL;
12978 (*chainp)->symname = XSTR (x, 0);
12979 return plus_constant (aof_pic_label, offset);
12982 void
12983 aof_dump_pic_table (FILE *f)
12985 struct pic_chain * chain;
12987 if (aof_pic_chain == NULL)
12988 return;
12990 asm_fprintf (f, "\tAREA |%r$$adcons|, BASED %r\n",
12991 PIC_OFFSET_TABLE_REGNUM,
12992 PIC_OFFSET_TABLE_REGNUM);
12993 fputs ("|x$adcons|\n", f);
12995 for (chain = aof_pic_chain; chain; chain = chain->next)
12997 fputs ("\tDCD\t", f);
12998 assemble_name (f, chain->symname);
12999 fputs ("\n", f);
13003 int arm_text_section_count = 1;
13005 char *
13006 aof_text_section (void )
13008 static char buf[100];
13009 sprintf (buf, "\tAREA |C$$code%d|, CODE, READONLY",
13010 arm_text_section_count++);
13011 if (flag_pic)
13012 strcat (buf, ", PIC, REENTRANT");
13013 return buf;
13016 static int arm_data_section_count = 1;
13018 char *
13019 aof_data_section (void)
13021 static char buf[100];
13022 sprintf (buf, "\tAREA |C$$data%d|, DATA", arm_data_section_count++);
13023 return buf;
13026 /* The AOF assembler is religiously strict about declarations of
13027 imported and exported symbols, so that it is impossible to declare
13028 a function as imported near the beginning of the file, and then to
13029 export it later on. It is, however, possible to delay the decision
13030 until all the functions in the file have been compiled. To get
13031 around this, we maintain a list of the imports and exports, and
13032 delete from it any that are subsequently defined. At the end of
13033 compilation we spit the remainder of the list out before the END
13034 directive. */
13036 struct import
13038 struct import * next;
13039 const char * name;
13042 static struct import * imports_list = NULL;
13044 void
13045 aof_add_import (const char *name)
13047 struct import * new;
13049 for (new = imports_list; new; new = new->next)
13050 if (new->name == name)
13051 return;
13053 new = (struct import *) xmalloc (sizeof (struct import));
13054 new->next = imports_list;
13055 imports_list = new;
13056 new->name = name;
13059 void
13060 aof_delete_import (const char *name)
13062 struct import ** old;
13064 for (old = &imports_list; *old; old = & (*old)->next)
13066 if ((*old)->name == name)
13068 *old = (*old)->next;
13069 return;
13074 int arm_main_function = 0;
13076 static void
13077 aof_dump_imports (FILE *f)
13079 /* The AOF assembler needs this to cause the startup code to be extracted
13080 from the library. Brining in __main causes the whole thing to work
13081 automagically. */
13082 if (arm_main_function)
13084 text_section ();
13085 fputs ("\tIMPORT __main\n", f);
13086 fputs ("\tDCD __main\n", f);
13089 /* Now dump the remaining imports. */
13090 while (imports_list)
13092 fprintf (f, "\tIMPORT\t");
13093 assemble_name (f, imports_list->name);
13094 fputc ('\n', f);
13095 imports_list = imports_list->next;
13099 static void
13100 aof_globalize_label (FILE *stream, const char *name)
13102 default_globalize_label (stream, name);
13103 if (! strcmp (name, "main"))
13104 arm_main_function = 1;
13107 static void
13108 aof_file_start (void)
13110 fputs ("__r0\tRN\t0\n", asm_out_file);
13111 fputs ("__a1\tRN\t0\n", asm_out_file);
13112 fputs ("__a2\tRN\t1\n", asm_out_file);
13113 fputs ("__a3\tRN\t2\n", asm_out_file);
13114 fputs ("__a4\tRN\t3\n", asm_out_file);
13115 fputs ("__v1\tRN\t4\n", asm_out_file);
13116 fputs ("__v2\tRN\t5\n", asm_out_file);
13117 fputs ("__v3\tRN\t6\n", asm_out_file);
13118 fputs ("__v4\tRN\t7\n", asm_out_file);
13119 fputs ("__v5\tRN\t8\n", asm_out_file);
13120 fputs ("__v6\tRN\t9\n", asm_out_file);
13121 fputs ("__sl\tRN\t10\n", asm_out_file);
13122 fputs ("__fp\tRN\t11\n", asm_out_file);
13123 fputs ("__ip\tRN\t12\n", asm_out_file);
13124 fputs ("__sp\tRN\t13\n", asm_out_file);
13125 fputs ("__lr\tRN\t14\n", asm_out_file);
13126 fputs ("__pc\tRN\t15\n", asm_out_file);
13127 fputs ("__f0\tFN\t0\n", asm_out_file);
13128 fputs ("__f1\tFN\t1\n", asm_out_file);
13129 fputs ("__f2\tFN\t2\n", asm_out_file);
13130 fputs ("__f3\tFN\t3\n", asm_out_file);
13131 fputs ("__f4\tFN\t4\n", asm_out_file);
13132 fputs ("__f5\tFN\t5\n", asm_out_file);
13133 fputs ("__f6\tFN\t6\n", asm_out_file);
13134 fputs ("__f7\tFN\t7\n", asm_out_file);
13135 text_section ();
13138 static void
13139 aof_file_end (void)
13141 if (flag_pic)
13142 aof_dump_pic_table (asm_out_file);
13143 aof_dump_imports (asm_out_file);
13144 fputs ("\tEND\n", asm_out_file);
13146 #endif /* AOF_ASSEMBLER */
13148 #ifdef OBJECT_FORMAT_ELF
13149 /* Switch to an arbitrary section NAME with attributes as specified
13150 by FLAGS. ALIGN specifies any known alignment requirements for
13151 the section; 0 if the default should be used.
13153 Differs from the default elf version only in the prefix character
13154 used before the section type. */
13156 static void
13157 arm_elf_asm_named_section (const char *name, unsigned int flags)
13159 char flagchars[10], *f = flagchars;
13161 if (! named_section_first_declaration (name))
13163 fprintf (asm_out_file, "\t.section\t%s\n", name);
13164 return;
13167 if (!(flags & SECTION_DEBUG))
13168 *f++ = 'a';
13169 if (flags & SECTION_WRITE)
13170 *f++ = 'w';
13171 if (flags & SECTION_CODE)
13172 *f++ = 'x';
13173 if (flags & SECTION_SMALL)
13174 *f++ = 's';
13175 if (flags & SECTION_MERGE)
13176 *f++ = 'M';
13177 if (flags & SECTION_STRINGS)
13178 *f++ = 'S';
13179 if (flags & SECTION_TLS)
13180 *f++ = 'T';
13181 *f = '\0';
13183 fprintf (asm_out_file, "\t.section\t%s,\"%s\"", name, flagchars);
13185 if (!(flags & SECTION_NOTYPE))
13187 const char *type;
13189 if (flags & SECTION_BSS)
13190 type = "nobits";
13191 else
13192 type = "progbits";
13194 fprintf (asm_out_file, ",%%%s", type);
13196 if (flags & SECTION_ENTSIZE)
13197 fprintf (asm_out_file, ",%d", flags & SECTION_ENTSIZE);
13200 putc ('\n', asm_out_file);
13202 #endif
13204 #ifndef ARM_PE
13205 /* Symbols in the text segment can be accessed without indirecting via the
13206 constant pool; it may take an extra binary operation, but this is still
13207 faster than indirecting via memory. Don't do this when not optimizing,
13208 since we won't be calculating al of the offsets necessary to do this
13209 simplification. */
13211 static void
13212 arm_encode_section_info (tree decl, rtx rtl, int first)
13214 /* This doesn't work with AOF syntax, since the string table may be in
13215 a different AREA. */
13216 #ifndef AOF_ASSEMBLER
13217 if (optimize > 0 && TREE_CONSTANT (decl)
13218 && (!flag_writable_strings || TREE_CODE (decl) != STRING_CST))
13219 SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
13220 #endif
13222 /* If we are referencing a function that is weak then encode a long call
13223 flag in the function name, otherwise if the function is static or
13224 or known to be defined in this file then encode a short call flag. */
13225 if (first && TREE_CODE_CLASS (TREE_CODE (decl)) == 'd')
13227 if (TREE_CODE (decl) == FUNCTION_DECL && DECL_WEAK (decl))
13228 arm_encode_call_attribute (decl, LONG_CALL_FLAG_CHAR);
13229 else if (! TREE_PUBLIC (decl))
13230 arm_encode_call_attribute (decl, SHORT_CALL_FLAG_CHAR);
13233 #endif /* !ARM_PE */
13235 static void
13236 arm_internal_label (FILE *stream, const char *prefix, unsigned long labelno)
13238 if (arm_ccfsm_state == 3 && (unsigned) arm_target_label == labelno
13239 && !strcmp (prefix, "L"))
13241 arm_ccfsm_state = 0;
13242 arm_target_insn = NULL;
13244 default_internal_label (stream, prefix, labelno);
13247 /* Output code to add DELTA to the first argument, and then jump
13248 to FUNCTION. Used for C++ multiple inheritance. */
13249 static void
13250 arm_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
13251 HOST_WIDE_INT delta,
13252 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
13253 tree function)
13255 int mi_delta = delta;
13256 const char *const mi_op = mi_delta < 0 ? "sub" : "add";
13257 int shift = 0;
13258 int this_regno = (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function)
13259 ? 1 : 0);
13260 if (mi_delta < 0)
13261 mi_delta = - mi_delta;
13262 while (mi_delta != 0)
13264 if ((mi_delta & (3 << shift)) == 0)
13265 shift += 2;
13266 else
13268 asm_fprintf (file, "\t%s\t%r, %r, #%d\n",
13269 mi_op, this_regno, this_regno,
13270 mi_delta & (0xff << shift));
13271 mi_delta &= ~(0xff << shift);
13272 shift += 8;
13275 fputs ("\tb\t", file);
13276 assemble_name (file, XSTR (XEXP (DECL_RTL (function), 0), 0));
13277 if (NEED_PLT_RELOC)
13278 fputs ("(PLT)", file);
13279 fputc ('\n', file);
13283 arm_emit_vector_const (FILE *file, rtx x)
13285 int i;
13286 const char * pattern;
13288 if (GET_CODE (x) != CONST_VECTOR)
13289 abort ();
13291 switch (GET_MODE (x))
13293 case V2SImode: pattern = "%08x"; break;
13294 case V4HImode: pattern = "%04x"; break;
13295 case V8QImode: pattern = "%02x"; break;
13296 default: abort ();
13299 fprintf (file, "0x");
13300 for (i = CONST_VECTOR_NUNITS (x); i--;)
13302 rtx element;
13304 element = CONST_VECTOR_ELT (x, i);
13305 fprintf (file, pattern, INTVAL (element));
13308 return 1;
13311 const char *
13312 arm_output_load_gr (rtx *operands)
13314 rtx reg;
13315 rtx offset;
13316 rtx wcgr;
13317 rtx sum;
13319 if (GET_CODE (operands [1]) != MEM
13320 || GET_CODE (sum = XEXP (operands [1], 0)) != PLUS
13321 || GET_CODE (reg = XEXP (sum, 0)) != REG
13322 || GET_CODE (offset = XEXP (sum, 1)) != CONST_INT
13323 || ((INTVAL (offset) < 1024) && (INTVAL (offset) > -1024)))
13324 return "wldrw%?\t%0, %1";
13326 /* Fix up an out-of-range load of a GR register. */
13327 output_asm_insn ("str%?\t%0, [sp, #-4]!\t@ Start of GR load expansion", & reg);
13328 wcgr = operands[0];
13329 operands[0] = reg;
13330 output_asm_insn ("ldr%?\t%0, %1", operands);
13332 operands[0] = wcgr;
13333 operands[1] = reg;
13334 output_asm_insn ("tmcr%?\t%0, %1", operands);
13335 output_asm_insn ("ldr%?\t%0, [sp], #4\t@ End of GR load expansion", & reg);
13337 return "";
13340 static rtx
13341 arm_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
13342 int incoming ATTRIBUTE_UNUSED)
13344 #if 0
13345 /* FIXME: The ARM backend has special code to handle structure
13346 returns, and will reserve its own hidden first argument. So
13347 if this macro is enabled a *second* hidden argument will be
13348 reserved, which will break binary compatibility with old
13349 toolchains and also thunk handling. One day this should be
13350 fixed. */
13351 return 0;
13352 #else
13353 /* Register in which address to store a structure value
13354 is passed to a function. */
13355 return gen_rtx_REG (Pmode, ARG_REGISTER (1));
13356 #endif
13359 /* Worker function for TARGET_SETUP_INCOMING_VARARGS.
13361 On the ARM, PRETEND_SIZE is set in order to have the prologue push the last
13362 named arg and all anonymous args onto the stack.
13363 XXX I know the prologue shouldn't be pushing registers, but it is faster
13364 that way. */
13366 static void
13367 arm_setup_incoming_varargs (CUMULATIVE_ARGS *cum,
13368 enum machine_mode mode ATTRIBUTE_UNUSED,
13369 tree type ATTRIBUTE_UNUSED,
13370 int *pretend_size,
13371 int second_time ATTRIBUTE_UNUSED)
13373 cfun->machine->uses_anonymous_args = 1;
13374 if (cum->nregs < NUM_ARG_REGS)
13375 *pretend_size = (NUM_ARG_REGS - cum->nregs) * UNITS_PER_WORD;