* config/arm/arm.c (output_return_instruction): Only restore IP
[official-gcc.git] / gcc / config / arm / arm.c
blobb70e68b1e1822f57bc69f5471710a909f55913e0
1 /* Output routines for GCC for ARM.
2 Copyright (C) 1991, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004 Free Software Foundation, Inc.
4 Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
5 and Martin Simmons (@harleqn.co.uk).
6 More major hacks by Richard Earnshaw (rearnsha@arm.com).
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it
11 under the terms of the GNU General Public License as published
12 by the Free Software Foundation; either version 2, or (at your
13 option) any later version.
15 GCC is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
17 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
18 License for more details.
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING. If not, write to
22 the Free Software Foundation, 59 Temple Place - Suite 330,
23 Boston, MA 02111-1307, USA. */
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "tm.h"
29 #include "rtl.h"
30 #include "tree.h"
31 #include "obstack.h"
32 #include "regs.h"
33 #include "hard-reg-set.h"
34 #include "real.h"
35 #include "insn-config.h"
36 #include "conditions.h"
37 #include "output.h"
38 #include "insn-attr.h"
39 #include "flags.h"
40 #include "reload.h"
41 #include "function.h"
42 #include "expr.h"
43 #include "optabs.h"
44 #include "toplev.h"
45 #include "recog.h"
46 #include "ggc.h"
47 #include "except.h"
48 #include "c-pragma.h"
49 #include "integrate.h"
50 #include "tm_p.h"
51 #include "target.h"
52 #include "target-def.h"
53 #include "debug.h"
55 /* Forward definitions of types. */
56 typedef struct minipool_node Mnode;
57 typedef struct minipool_fixup Mfix;
59 const struct attribute_spec arm_attribute_table[];
61 /* Forward function declarations. */
62 static void arm_add_gc_roots (void);
63 static int arm_gen_constant (enum rtx_code, enum machine_mode, HOST_WIDE_INT,
64 rtx, rtx, int, int);
65 static unsigned bit_count (unsigned long);
66 static int arm_address_register_rtx_p (rtx, int);
67 static int arm_legitimate_index_p (enum machine_mode, rtx, int);
68 static int thumb_base_register_rtx_p (rtx, enum machine_mode, int);
69 inline static int thumb_index_register_rtx_p (rtx, int);
70 static int const_ok_for_op (HOST_WIDE_INT, enum rtx_code);
71 static rtx emit_multi_reg_push (int);
72 static rtx emit_sfm (int, int);
73 #ifndef AOF_ASSEMBLER
74 static bool arm_assemble_integer (rtx, unsigned int, int);
75 #endif
76 static const char *fp_const_from_val (REAL_VALUE_TYPE *);
77 static arm_cc get_arm_condition_code (rtx);
78 static void init_fpa_table (void);
79 static HOST_WIDE_INT int_log2 (HOST_WIDE_INT);
80 static rtx is_jump_table (rtx);
81 static const char *output_multi_immediate (rtx *, const char *, const char *,
82 int, HOST_WIDE_INT);
83 static void print_multi_reg (FILE *, const char *, int, int);
84 static const char *shift_op (rtx, HOST_WIDE_INT *);
85 static struct machine_function *arm_init_machine_status (void);
86 static int number_of_first_bit_set (int);
87 static void replace_symbols_in_block (tree, rtx, rtx);
88 static void thumb_exit (FILE *, int, rtx);
89 static void thumb_pushpop (FILE *, int, int, int *, int);
90 static rtx is_jump_table (rtx);
91 static HOST_WIDE_INT get_jump_table_size (rtx);
92 static Mnode *move_minipool_fix_forward_ref (Mnode *, Mnode *, HOST_WIDE_INT);
93 static Mnode *add_minipool_forward_ref (Mfix *);
94 static Mnode *move_minipool_fix_backward_ref (Mnode *, Mnode *, HOST_WIDE_INT);
95 static Mnode *add_minipool_backward_ref (Mfix *);
96 static void assign_minipool_offsets (Mfix *);
97 static void arm_print_value (FILE *, rtx);
98 static void dump_minipool (rtx);
99 static int arm_barrier_cost (rtx);
100 static Mfix *create_fix_barrier (Mfix *, HOST_WIDE_INT);
101 static void push_minipool_barrier (rtx, HOST_WIDE_INT);
102 static void push_minipool_fix (rtx, HOST_WIDE_INT, rtx *, enum machine_mode,
103 rtx);
104 static void arm_reorg (void);
105 static bool note_invalid_constants (rtx, HOST_WIDE_INT, int);
106 static int current_file_function_operand (rtx);
107 static unsigned long arm_compute_save_reg0_reg12_mask (void);
108 static unsigned long arm_compute_save_reg_mask (void);
109 static unsigned long arm_isr_value (tree);
110 static unsigned long arm_compute_func_type (void);
111 static tree arm_handle_fndecl_attribute (tree *, tree, tree, int, bool *);
112 static tree arm_handle_isr_attribute (tree *, tree, tree, int, bool *);
113 static void arm_output_function_epilogue (FILE *, HOST_WIDE_INT);
114 static void arm_output_function_prologue (FILE *, HOST_WIDE_INT);
115 static void thumb_output_function_prologue (FILE *, HOST_WIDE_INT);
116 static int arm_comp_type_attributes (tree, tree);
117 static void arm_set_default_type_attributes (tree);
118 static int arm_adjust_cost (rtx, rtx, rtx, int);
119 static int arm_use_dfa_pipeline_interface (void);
120 static int count_insns_for_constant (HOST_WIDE_INT, int);
121 static int arm_get_strip_length (int);
122 static bool arm_function_ok_for_sibcall (tree, tree);
123 static void arm_internal_label (FILE *, const char *, unsigned long);
124 static void arm_output_mi_thunk (FILE *, tree, HOST_WIDE_INT, HOST_WIDE_INT,
125 tree);
126 static int arm_rtx_costs_1 (rtx, enum rtx_code, enum rtx_code);
127 static bool arm_rtx_costs (rtx, int, int, int *);
128 static int arm_address_cost (rtx);
129 static bool arm_memory_load_p (rtx);
130 static bool arm_cirrus_insn_p (rtx);
131 static void cirrus_reorg (rtx);
132 static void arm_init_builtins (void);
133 static rtx arm_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
134 static void arm_init_iwmmxt_builtins (void);
135 static rtx safe_vector_operand (rtx, enum machine_mode);
136 static rtx arm_expand_binop_builtin (enum insn_code, tree, rtx);
137 static rtx arm_expand_unop_builtin (enum insn_code, tree, rtx, int);
138 static rtx arm_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
140 #ifdef OBJECT_FORMAT_ELF
141 static void arm_elf_asm_named_section (const char *, unsigned int);
142 #endif
143 #ifndef ARM_PE
144 static void arm_encode_section_info (tree, rtx, int);
145 #endif
146 #ifdef AOF_ASSEMBLER
147 static void aof_globalize_label (FILE *, const char *);
148 static void aof_dump_imports (FILE *);
149 static void aof_dump_pic_table (FILE *);
150 static void aof_file_start (void);
151 static void aof_file_end (void);
152 #endif
153 static rtx arm_struct_value_rtx (tree, int);
154 static void arm_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
155 tree, int *, int);
158 /* Initialize the GCC target structure. */
159 #ifdef TARGET_DLLIMPORT_DECL_ATTRIBUTES
160 #undef TARGET_MERGE_DECL_ATTRIBUTES
161 #define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
162 #endif
164 #undef TARGET_ATTRIBUTE_TABLE
165 #define TARGET_ATTRIBUTE_TABLE arm_attribute_table
167 #ifdef AOF_ASSEMBLER
168 #undef TARGET_ASM_BYTE_OP
169 #define TARGET_ASM_BYTE_OP "\tDCB\t"
170 #undef TARGET_ASM_ALIGNED_HI_OP
171 #define TARGET_ASM_ALIGNED_HI_OP "\tDCW\t"
172 #undef TARGET_ASM_ALIGNED_SI_OP
173 #define TARGET_ASM_ALIGNED_SI_OP "\tDCD\t"
174 #undef TARGET_ASM_GLOBALIZE_LABEL
175 #define TARGET_ASM_GLOBALIZE_LABEL aof_globalize_label
176 #undef TARGET_ASM_FILE_START
177 #define TARGET_ASM_FILE_START aof_file_start
178 #undef TARGET_ASM_FILE_END
179 #define TARGET_ASM_FILE_END aof_file_end
180 #else
181 #undef TARGET_ASM_ALIGNED_SI_OP
182 #define TARGET_ASM_ALIGNED_SI_OP NULL
183 #undef TARGET_ASM_INTEGER
184 #define TARGET_ASM_INTEGER arm_assemble_integer
185 #endif
187 #undef TARGET_ASM_FUNCTION_PROLOGUE
188 #define TARGET_ASM_FUNCTION_PROLOGUE arm_output_function_prologue
190 #undef TARGET_ASM_FUNCTION_EPILOGUE
191 #define TARGET_ASM_FUNCTION_EPILOGUE arm_output_function_epilogue
193 #undef TARGET_COMP_TYPE_ATTRIBUTES
194 #define TARGET_COMP_TYPE_ATTRIBUTES arm_comp_type_attributes
196 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
197 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES arm_set_default_type_attributes
199 #undef TARGET_SCHED_ADJUST_COST
200 #define TARGET_SCHED_ADJUST_COST arm_adjust_cost
202 #undef TARGET_SCHED_USE_DFA_PIPELINE_INTERFACE
203 #define TARGET_SCHED_USE_DFA_PIPELINE_INTERFACE arm_use_dfa_pipeline_interface
205 #undef TARGET_ENCODE_SECTION_INFO
206 #ifdef ARM_PE
207 #define TARGET_ENCODE_SECTION_INFO arm_pe_encode_section_info
208 #else
209 #define TARGET_ENCODE_SECTION_INFO arm_encode_section_info
210 #endif
212 #undef TARGET_STRIP_NAME_ENCODING
213 #define TARGET_STRIP_NAME_ENCODING arm_strip_name_encoding
215 #undef TARGET_ASM_INTERNAL_LABEL
216 #define TARGET_ASM_INTERNAL_LABEL arm_internal_label
218 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
219 #define TARGET_FUNCTION_OK_FOR_SIBCALL arm_function_ok_for_sibcall
221 #undef TARGET_ASM_OUTPUT_MI_THUNK
222 #define TARGET_ASM_OUTPUT_MI_THUNK arm_output_mi_thunk
223 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
224 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
226 #undef TARGET_RTX_COSTS
227 #define TARGET_RTX_COSTS arm_rtx_costs
228 #undef TARGET_ADDRESS_COST
229 #define TARGET_ADDRESS_COST arm_address_cost
231 #undef TARGET_MACHINE_DEPENDENT_REORG
232 #define TARGET_MACHINE_DEPENDENT_REORG arm_reorg
234 #undef TARGET_INIT_BUILTINS
235 #define TARGET_INIT_BUILTINS arm_init_builtins
236 #undef TARGET_EXPAND_BUILTIN
237 #define TARGET_EXPAND_BUILTIN arm_expand_builtin
239 #undef TARGET_PROMOTE_FUNCTION_ARGS
240 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
241 #undef TARGET_PROMOTE_PROTOTYPES
242 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
244 #undef TARGET_STRUCT_VALUE_RTX
245 #define TARGET_STRUCT_VALUE_RTX arm_struct_value_rtx
247 #undef TARGET_SETUP_INCOMING_VARARGS
248 #define TARGET_SETUP_INCOMING_VARARGS arm_setup_incoming_varargs
250 struct gcc_target targetm = TARGET_INITIALIZER;
252 /* Obstack for minipool constant handling. */
253 static struct obstack minipool_obstack;
254 static char * minipool_startobj;
256 /* The maximum number of insns skipped which
257 will be conditionalised if possible. */
258 static int max_insns_skipped = 5;
260 extern FILE * asm_out_file;
262 /* True if we are currently building a constant table. */
263 int making_const_table;
265 /* Define the information needed to generate branch insns. This is
266 stored from the compare operation. */
267 rtx arm_compare_op0, arm_compare_op1;
269 /* What type of floating point are we tuning for? */
270 enum fputype arm_fpu_tune;
272 /* What type of floating point instructions are available? */
273 enum fputype arm_fpu_arch;
275 /* What program mode is the cpu running in? 26-bit mode or 32-bit mode. */
276 enum prog_mode_type arm_prgmode;
278 /* Set by the -mfp=... option. */
279 const char * target_fp_name = NULL;
281 /* Used to parse -mstructure_size_boundary command line option. */
282 const char * structure_size_string = NULL;
283 int arm_structure_size_boundary = DEFAULT_STRUCTURE_SIZE_BOUNDARY;
285 /* Bit values used to identify processor capabilities. */
286 #define FL_CO_PROC (1 << 0) /* Has external co-processor bus */
287 #define FL_FAST_MULT (1 << 1) /* Fast multiply */
288 #define FL_MODE26 (1 << 2) /* 26-bit mode support */
289 #define FL_MODE32 (1 << 3) /* 32-bit mode support */
290 #define FL_ARCH4 (1 << 4) /* Architecture rel 4 */
291 #define FL_ARCH5 (1 << 5) /* Architecture rel 5 */
292 #define FL_THUMB (1 << 6) /* Thumb aware */
293 #define FL_LDSCHED (1 << 7) /* Load scheduling necessary */
294 #define FL_STRONG (1 << 8) /* StrongARM */
295 #define FL_ARCH5E (1 << 9) /* DSP extensions to v5 */
296 #define FL_XSCALE (1 << 10) /* XScale */
297 #define FL_CIRRUS (1 << 11) /* Cirrus/DSP. */
298 #define FL_IWMMXT (1 << 29) /* XScale v2 or "Intel Wireless MMX technology". */
299 #define FL_ARCH6J (1 << 12) /* Architecture rel 6. Adds
300 media instructions. */
301 #define FL_VFPV2 (1 << 13) /* Vector Floating Point V2. */
303 /* The bits in this mask specify which
304 instructions we are allowed to generate. */
305 static unsigned long insn_flags = 0;
307 /* The bits in this mask specify which instruction scheduling options should
308 be used. Note - there is an overlap with the FL_FAST_MULT. For some
309 hardware we want to be able to generate the multiply instructions, but to
310 tune as if they were not present in the architecture. */
311 static unsigned long tune_flags = 0;
313 /* The following are used in the arm.md file as equivalents to bits
314 in the above two flag variables. */
316 /* Nonzero if this is an "M" variant of the processor. */
317 int arm_fast_multiply = 0;
319 /* Nonzero if this chip supports the ARM Architecture 4 extensions. */
320 int arm_arch4 = 0;
322 /* Nonzero if this chip supports the ARM Architecture 5 extensions. */
323 int arm_arch5 = 0;
325 /* Nonzero if this chip supports the ARM Architecture 5E extensions. */
326 int arm_arch5e = 0;
328 /* Nonzero if this chip can benefit from load scheduling. */
329 int arm_ld_sched = 0;
331 /* Nonzero if this chip is a StrongARM. */
332 int arm_is_strong = 0;
334 /* Nonzero if this chip supports Intel Wireless MMX technology. */
335 int arm_arch_iwmmxt = 0;
337 /* Nonzero if this chip is an XScale. */
338 int arm_arch_xscale = 0;
340 /* Nonzero if tuning for XScale */
341 int arm_tune_xscale = 0;
343 /* Nonzero if this chip is an ARM6 or an ARM7. */
344 int arm_is_6_or_7 = 0;
346 /* Nonzero if this chip is a Cirrus/DSP. */
347 int arm_is_cirrus = 0;
349 /* Nonzero if generating Thumb instructions. */
350 int thumb_code = 0;
352 /* In case of a PRE_INC, POST_INC, PRE_DEC, POST_DEC memory reference, we
353 must report the mode of the memory reference from PRINT_OPERAND to
354 PRINT_OPERAND_ADDRESS. */
355 enum machine_mode output_memory_reference_mode;
357 /* The register number to be used for the PIC offset register. */
358 const char * arm_pic_register_string = NULL;
359 int arm_pic_register = INVALID_REGNUM;
361 /* Set to 1 when a return insn is output, this means that the epilogue
362 is not needed. */
363 int return_used_this_function;
365 /* Set to 1 after arm_reorg has started. Reset to start at the start of
366 the next function. */
367 static int after_arm_reorg = 0;
369 /* The maximum number of insns to be used when loading a constant. */
370 static int arm_constant_limit = 3;
372 /* For an explanation of these variables, see final_prescan_insn below. */
373 int arm_ccfsm_state;
374 enum arm_cond_code arm_current_cc;
375 rtx arm_target_insn;
376 int arm_target_label;
378 /* The condition codes of the ARM, and the inverse function. */
379 static const char * const arm_condition_codes[] =
381 "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc",
382 "hi", "ls", "ge", "lt", "gt", "le", "al", "nv"
385 #define streq(string1, string2) (strcmp (string1, string2) == 0)
387 /* Initialization code. */
389 struct processors
391 const char *const name;
392 const unsigned long flags;
395 /* Not all of these give usefully different compilation alternatives,
396 but there is no simple way of generalizing them. */
397 static const struct processors all_cores[] =
399 /* ARM Cores */
401 {"arm2", FL_CO_PROC | FL_MODE26 },
402 {"arm250", FL_CO_PROC | FL_MODE26 },
403 {"arm3", FL_CO_PROC | FL_MODE26 },
404 {"arm6", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
405 {"arm60", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
406 {"arm600", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
407 {"arm610", FL_MODE26 | FL_MODE32 },
408 {"arm620", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
409 {"arm7", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
410 /* arm7m doesn't exist on its own, but only with D, (and I), but
411 those don't alter the code, so arm7m is sometimes used. */
412 {"arm7m", FL_CO_PROC | FL_MODE26 | FL_MODE32 | FL_FAST_MULT },
413 {"arm7d", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
414 {"arm7dm", FL_CO_PROC | FL_MODE26 | FL_MODE32 | FL_FAST_MULT },
415 {"arm7di", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
416 {"arm7dmi", FL_CO_PROC | FL_MODE26 | FL_MODE32 | FL_FAST_MULT },
417 {"arm70", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
418 {"arm700", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
419 {"arm700i", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
420 {"arm710", FL_MODE26 | FL_MODE32 },
421 {"arm720", FL_MODE26 | FL_MODE32 },
422 {"arm710c", FL_MODE26 | FL_MODE32 },
423 {"arm7100", FL_MODE26 | FL_MODE32 },
424 {"arm7500", FL_MODE26 | FL_MODE32 },
425 /* Doesn't have an external co-proc, but does have embedded fpa. */
426 {"arm7500fe", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
427 /* V4 Architecture Processors */
428 {"arm7tdmi", FL_CO_PROC | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB },
429 {"arm710t", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB },
430 {"arm720t", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB },
431 {"arm740t", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB },
432 {"arm8", FL_MODE26 | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_LDSCHED },
433 {"arm810", FL_MODE26 | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_LDSCHED },
434 {"arm9", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_LDSCHED },
435 {"arm920", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_LDSCHED },
436 {"arm920t", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_LDSCHED },
437 {"arm940t", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_LDSCHED },
438 {"arm9tdmi", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_LDSCHED },
439 {"arm9e", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_LDSCHED },
440 {"ep9312", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_LDSCHED | FL_CIRRUS },
441 {"strongarm", FL_MODE26 | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_LDSCHED | FL_STRONG },
442 {"strongarm110", FL_MODE26 | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_LDSCHED | FL_STRONG },
443 {"strongarm1100", FL_MODE26 | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_LDSCHED | FL_STRONG },
444 {"strongarm1110", FL_MODE26 | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_LDSCHED | FL_STRONG },
445 /* V5 Architecture Processors */
446 {"arm10tdmi", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_LDSCHED | FL_ARCH5 },
447 {"arm1020t", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_LDSCHED | FL_ARCH5 },
448 {"arm926ejs", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_ARCH5 | FL_ARCH5E },
449 {"arm1026ejs", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_ARCH5 | FL_ARCH5E },
450 {"xscale", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_LDSCHED | FL_STRONG | FL_ARCH5 | FL_ARCH5E | FL_XSCALE },
451 {"iwmmxt", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_LDSCHED | FL_STRONG | FL_ARCH5 | FL_ARCH5E | FL_XSCALE | FL_IWMMXT },
452 /* V6 Architecture Processors */
453 {"arm1136js", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_ARCH5 | FL_ARCH5E | FL_ARCH6J },
454 {"arm1136jfs", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_ARCH5 | FL_ARCH5E | FL_ARCH6J | FL_VFPV2 },
455 {NULL, 0}
458 static const struct processors all_architectures[] =
460 /* ARM Architectures */
462 { "armv2", FL_CO_PROC | FL_MODE26 },
463 { "armv2a", FL_CO_PROC | FL_MODE26 },
464 { "armv3", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
465 { "armv3m", FL_CO_PROC | FL_MODE26 | FL_MODE32 | FL_FAST_MULT },
466 { "armv4", FL_CO_PROC | FL_MODE26 | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 },
467 /* Strictly, FL_MODE26 is a permitted option for v4t, but there are no
468 implementations that support it, so we will leave it out for now. */
469 { "armv4t", FL_CO_PROC | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB },
470 { "armv5", FL_CO_PROC | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_ARCH5 },
471 { "armv5t", FL_CO_PROC | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_ARCH5 },
472 { "armv5te", FL_CO_PROC | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_ARCH5 | FL_ARCH5E },
473 { "armv6j", FL_CO_PROC | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_ARCH5 | FL_ARCH5E | FL_ARCH6J },
474 { "ep9312", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_LDSCHED | FL_CIRRUS },
475 {"iwmmxt", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_LDSCHED | FL_STRONG | FL_ARCH5 | FL_ARCH5E | FL_XSCALE | FL_IWMMXT },
476 { NULL, 0 }
479 /* This is a magic structure. The 'string' field is magically filled in
480 with a pointer to the value specified by the user on the command line
481 assuming that the user has specified such a value. */
483 struct arm_cpu_select arm_select[] =
485 /* string name processors */
486 { NULL, "-mcpu=", all_cores },
487 { NULL, "-march=", all_architectures },
488 { NULL, "-mtune=", all_cores }
491 /* Return the number of bits set in VALUE. */
492 static unsigned
493 bit_count (unsigned long value)
495 unsigned long count = 0;
497 while (value)
499 count++;
500 value &= value - 1; /* Clear the least-significant set bit. */
503 return count;
506 /* Fix up any incompatible options that the user has specified.
507 This has now turned into a maze. */
508 void
509 arm_override_options (void)
511 unsigned i;
513 /* Set up the flags based on the cpu/architecture selected by the user. */
514 for (i = ARRAY_SIZE (arm_select); i--;)
516 struct arm_cpu_select * ptr = arm_select + i;
518 if (ptr->string != NULL && ptr->string[0] != '\0')
520 const struct processors * sel;
522 for (sel = ptr->processors; sel->name != NULL; sel++)
523 if (streq (ptr->string, sel->name))
525 if (i == 2)
526 tune_flags = sel->flags;
527 else
529 /* If we have been given an architecture and a processor
530 make sure that they are compatible. We only generate
531 a warning though, and we prefer the CPU over the
532 architecture. */
533 if (insn_flags != 0 && (insn_flags ^ sel->flags))
534 warning ("switch -mcpu=%s conflicts with -march= switch",
535 ptr->string);
537 insn_flags = sel->flags;
540 break;
543 if (sel->name == NULL)
544 error ("bad value (%s) for %s switch", ptr->string, ptr->name);
548 /* If the user did not specify a processor, choose one for them. */
549 if (insn_flags == 0)
551 const struct processors * sel;
552 unsigned int sought;
553 static const struct cpu_default
555 const int cpu;
556 const char *const name;
558 cpu_defaults[] =
560 { TARGET_CPU_arm2, "arm2" },
561 { TARGET_CPU_arm6, "arm6" },
562 { TARGET_CPU_arm610, "arm610" },
563 { TARGET_CPU_arm710, "arm710" },
564 { TARGET_CPU_arm7m, "arm7m" },
565 { TARGET_CPU_arm7500fe, "arm7500fe" },
566 { TARGET_CPU_arm7tdmi, "arm7tdmi" },
567 { TARGET_CPU_arm8, "arm8" },
568 { TARGET_CPU_arm810, "arm810" },
569 { TARGET_CPU_arm9, "arm9" },
570 { TARGET_CPU_strongarm, "strongarm" },
571 { TARGET_CPU_xscale, "xscale" },
572 { TARGET_CPU_ep9312, "ep9312" },
573 { TARGET_CPU_iwmmxt, "iwmmxt" },
574 { TARGET_CPU_arm926ej_s, "arm926ej-s" },
575 { TARGET_CPU_arm1026ej_s, "arm1026ej-s" },
576 { TARGET_CPU_arm1136j_s, "arm1136j_s" },
577 { TARGET_CPU_arm1136jf_s, "arm1136jf_s" },
578 { TARGET_CPU_generic, "arm" },
579 { 0, 0 }
581 const struct cpu_default * def;
583 /* Find the default. */
584 for (def = cpu_defaults; def->name; def++)
585 if (def->cpu == TARGET_CPU_DEFAULT)
586 break;
588 /* Make sure we found the default CPU. */
589 if (def->name == NULL)
590 abort ();
592 /* Find the default CPU's flags. */
593 for (sel = all_cores; sel->name != NULL; sel++)
594 if (streq (def->name, sel->name))
595 break;
597 if (sel->name == NULL)
598 abort ();
600 insn_flags = sel->flags;
602 /* Now check to see if the user has specified some command line
603 switch that require certain abilities from the cpu. */
604 sought = 0;
606 if (TARGET_INTERWORK || TARGET_THUMB)
608 sought |= (FL_THUMB | FL_MODE32);
610 /* Force apcs-32 to be used for interworking. */
611 target_flags |= ARM_FLAG_APCS_32;
613 /* There are no ARM processors that support both APCS-26 and
614 interworking. Therefore we force FL_MODE26 to be removed
615 from insn_flags here (if it was set), so that the search
616 below will always be able to find a compatible processor. */
617 insn_flags &= ~FL_MODE26;
619 else if (!TARGET_APCS_32)
620 sought |= FL_MODE26;
622 if (sought != 0 && ((sought & insn_flags) != sought))
624 /* Try to locate a CPU type that supports all of the abilities
625 of the default CPU, plus the extra abilities requested by
626 the user. */
627 for (sel = all_cores; sel->name != NULL; sel++)
628 if ((sel->flags & sought) == (sought | insn_flags))
629 break;
631 if (sel->name == NULL)
633 unsigned current_bit_count = 0;
634 const struct processors * best_fit = NULL;
636 /* Ideally we would like to issue an error message here
637 saying that it was not possible to find a CPU compatible
638 with the default CPU, but which also supports the command
639 line options specified by the programmer, and so they
640 ought to use the -mcpu=<name> command line option to
641 override the default CPU type.
643 Unfortunately this does not work with multilibing. We
644 need to be able to support multilibs for -mapcs-26 and for
645 -mthumb-interwork and there is no CPU that can support both
646 options. Instead if we cannot find a cpu that has both the
647 characteristics of the default cpu and the given command line
648 options we scan the array again looking for a best match. */
649 for (sel = all_cores; sel->name != NULL; sel++)
650 if ((sel->flags & sought) == sought)
652 unsigned count;
654 count = bit_count (sel->flags & insn_flags);
656 if (count >= current_bit_count)
658 best_fit = sel;
659 current_bit_count = count;
663 if (best_fit == NULL)
664 abort ();
665 else
666 sel = best_fit;
669 insn_flags = sel->flags;
673 /* If tuning has not been specified, tune for whichever processor or
674 architecture has been selected. */
675 if (tune_flags == 0)
676 tune_flags = insn_flags;
678 /* Make sure that the processor choice does not conflict with any of the
679 other command line choices. */
680 if (TARGET_APCS_32 && !(insn_flags & FL_MODE32))
682 /* If APCS-32 was not the default then it must have been set by the
683 user, so issue a warning message. If the user has specified
684 "-mapcs-32 -mcpu=arm2" then we loose here. */
685 if ((TARGET_DEFAULT & ARM_FLAG_APCS_32) == 0)
686 warning ("target CPU does not support APCS-32" );
687 target_flags &= ~ARM_FLAG_APCS_32;
689 else if (!TARGET_APCS_32 && !(insn_flags & FL_MODE26))
691 warning ("target CPU does not support APCS-26" );
692 target_flags |= ARM_FLAG_APCS_32;
695 if (TARGET_INTERWORK && !(insn_flags & FL_THUMB))
697 warning ("target CPU does not support interworking" );
698 target_flags &= ~ARM_FLAG_INTERWORK;
701 if (TARGET_THUMB && !(insn_flags & FL_THUMB))
703 warning ("target CPU does not support THUMB instructions");
704 target_flags &= ~ARM_FLAG_THUMB;
707 if (TARGET_APCS_FRAME && TARGET_THUMB)
709 /* warning ("ignoring -mapcs-frame because -mthumb was used"); */
710 target_flags &= ~ARM_FLAG_APCS_FRAME;
713 /* TARGET_BACKTRACE calls leaf_function_p, which causes a crash if done
714 from here where no function is being compiled currently. */
715 if ((target_flags & (THUMB_FLAG_LEAF_BACKTRACE | THUMB_FLAG_BACKTRACE))
716 && TARGET_ARM)
717 warning ("enabling backtrace support is only meaningful when compiling for the Thumb");
719 if (TARGET_ARM && TARGET_CALLEE_INTERWORKING)
720 warning ("enabling callee interworking support is only meaningful when compiling for the Thumb");
722 if (TARGET_ARM && TARGET_CALLER_INTERWORKING)
723 warning ("enabling caller interworking support is only meaningful when compiling for the Thumb");
725 /* If interworking is enabled then APCS-32 must be selected as well. */
726 if (TARGET_INTERWORK)
728 if (!TARGET_APCS_32)
729 warning ("interworking forces APCS-32 to be used" );
730 target_flags |= ARM_FLAG_APCS_32;
733 if (TARGET_APCS_STACK && !TARGET_APCS_FRAME)
735 warning ("-mapcs-stack-check incompatible with -mno-apcs-frame");
736 target_flags |= ARM_FLAG_APCS_FRAME;
739 if (TARGET_POKE_FUNCTION_NAME)
740 target_flags |= ARM_FLAG_APCS_FRAME;
742 if (TARGET_APCS_REENT && flag_pic)
743 error ("-fpic and -mapcs-reent are incompatible");
745 if (TARGET_APCS_REENT)
746 warning ("APCS reentrant code not supported. Ignored");
748 /* If this target is normally configured to use APCS frames, warn if they
749 are turned off and debugging is turned on. */
750 if (TARGET_ARM
751 && write_symbols != NO_DEBUG
752 && !TARGET_APCS_FRAME
753 && (TARGET_DEFAULT & ARM_FLAG_APCS_FRAME))
754 warning ("-g with -mno-apcs-frame may not give sensible debugging");
756 /* If stack checking is disabled, we can use r10 as the PIC register,
757 which keeps r9 available. */
758 if (flag_pic)
759 arm_pic_register = TARGET_APCS_STACK ? 9 : 10;
761 if (TARGET_APCS_FLOAT)
762 warning ("passing floating point arguments in fp regs not yet supported");
764 /* Initialize boolean versions of the flags, for use in the arm.md file. */
765 arm_fast_multiply = (insn_flags & FL_FAST_MULT) != 0;
766 arm_arch4 = (insn_flags & FL_ARCH4) != 0;
767 arm_arch5 = (insn_flags & FL_ARCH5) != 0;
768 arm_arch5e = (insn_flags & FL_ARCH5E) != 0;
769 arm_arch_xscale = (insn_flags & FL_XSCALE) != 0;
771 arm_ld_sched = (tune_flags & FL_LDSCHED) != 0;
772 arm_is_strong = (tune_flags & FL_STRONG) != 0;
773 thumb_code = (TARGET_ARM == 0);
774 arm_is_6_or_7 = (((tune_flags & (FL_MODE26 | FL_MODE32))
775 && !(tune_flags & FL_ARCH4))) != 0;
776 arm_tune_xscale = (tune_flags & FL_XSCALE) != 0;
777 arm_is_cirrus = (tune_flags & FL_CIRRUS) != 0;
778 arm_arch_iwmmxt = (insn_flags & FL_IWMMXT) != 0;
780 if (TARGET_IWMMXT && (! TARGET_ATPCS))
781 target_flags |= ARM_FLAG_ATPCS;
783 if (arm_is_cirrus)
785 arm_fpu_tune = FPUTYPE_MAVERICK;
787 /* Ignore -mhard-float if -mcpu=ep9312. */
788 if (TARGET_HARD_FLOAT)
789 target_flags ^= ARM_FLAG_SOFT_FLOAT;
791 else
792 /* Default value for floating point code... if no co-processor
793 bus, then schedule for emulated floating point. Otherwise,
794 assume the user has an FPA.
795 Note: this does not prevent use of floating point instructions,
796 -msoft-float does that. */
797 arm_fpu_tune = (tune_flags & FL_CO_PROC) ? FPUTYPE_FPA : FPUTYPE_FPA_EMU3;
799 if (target_fp_name)
801 if (streq (target_fp_name, "2"))
802 arm_fpu_arch = FPUTYPE_FPA_EMU2;
803 else if (streq (target_fp_name, "3"))
804 arm_fpu_arch = FPUTYPE_FPA_EMU3;
805 else
806 error ("invalid floating point emulation option: -mfpe-%s",
807 target_fp_name);
809 else
810 arm_fpu_arch = FPUTYPE_DEFAULT;
812 if (TARGET_FPE)
814 if (arm_fpu_tune == FPUTYPE_FPA_EMU3)
815 arm_fpu_tune = FPUTYPE_FPA_EMU2;
816 else if (arm_fpu_tune == FPUTYPE_MAVERICK)
817 warning ("-mfpe switch not supported by ep9312 target cpu - ignored.");
818 else if (arm_fpu_tune != FPUTYPE_FPA)
819 arm_fpu_tune = FPUTYPE_FPA_EMU2;
822 /* For arm2/3 there is no need to do any scheduling if there is only
823 a floating point emulator, or we are doing software floating-point. */
824 if ((TARGET_SOFT_FLOAT || arm_fpu_tune != FPUTYPE_FPA)
825 && (tune_flags & FL_MODE32) == 0)
826 flag_schedule_insns = flag_schedule_insns_after_reload = 0;
828 arm_prgmode = TARGET_APCS_32 ? PROG_MODE_PROG32 : PROG_MODE_PROG26;
830 if (structure_size_string != NULL)
832 int size = strtol (structure_size_string, NULL, 0);
834 if (size == 8 || size == 32)
835 arm_structure_size_boundary = size;
836 else
837 warning ("structure size boundary can only be set to 8 or 32");
840 if (arm_pic_register_string != NULL)
842 int pic_register = decode_reg_name (arm_pic_register_string);
844 if (!flag_pic)
845 warning ("-mpic-register= is useless without -fpic");
847 /* Prevent the user from choosing an obviously stupid PIC register. */
848 else if (pic_register < 0 || call_used_regs[pic_register]
849 || pic_register == HARD_FRAME_POINTER_REGNUM
850 || pic_register == STACK_POINTER_REGNUM
851 || pic_register >= PC_REGNUM)
852 error ("unable to use '%s' for PIC register", arm_pic_register_string);
853 else
854 arm_pic_register = pic_register;
857 if (TARGET_THUMB && flag_schedule_insns)
859 /* Don't warn since it's on by default in -O2. */
860 flag_schedule_insns = 0;
863 if (optimize_size)
865 /* There's some dispute as to whether this should be 1 or 2. However,
866 experiments seem to show that in pathological cases a setting of
867 1 degrades less severely than a setting of 2. This could change if
868 other parts of the compiler change their behavior. */
869 arm_constant_limit = 1;
871 /* If optimizing for size, bump the number of instructions that we
872 are prepared to conditionally execute (even on a StrongARM). */
873 max_insns_skipped = 6;
875 else
877 /* For processors with load scheduling, it never costs more than
878 2 cycles to load a constant, and the load scheduler may well
879 reduce that to 1. */
880 if (tune_flags & FL_LDSCHED)
881 arm_constant_limit = 1;
883 /* On XScale the longer latency of a load makes it more difficult
884 to achieve a good schedule, so it's faster to synthesize
885 constants that can be done in two insns. */
886 if (arm_tune_xscale)
887 arm_constant_limit = 2;
889 /* StrongARM has early execution of branches, so a sequence
890 that is worth skipping is shorter. */
891 if (arm_is_strong)
892 max_insns_skipped = 3;
895 /* Register global variables with the garbage collector. */
896 arm_add_gc_roots ();
899 static void
900 arm_add_gc_roots (void)
902 gcc_obstack_init(&minipool_obstack);
903 minipool_startobj = (char *) obstack_alloc (&minipool_obstack, 0);
906 /* A table of known ARM exception types.
907 For use with the interrupt function attribute. */
909 typedef struct
911 const char *const arg;
912 const unsigned long return_value;
914 isr_attribute_arg;
916 static const isr_attribute_arg isr_attribute_args [] =
918 { "IRQ", ARM_FT_ISR },
919 { "irq", ARM_FT_ISR },
920 { "FIQ", ARM_FT_FIQ },
921 { "fiq", ARM_FT_FIQ },
922 { "ABORT", ARM_FT_ISR },
923 { "abort", ARM_FT_ISR },
924 { "ABORT", ARM_FT_ISR },
925 { "abort", ARM_FT_ISR },
926 { "UNDEF", ARM_FT_EXCEPTION },
927 { "undef", ARM_FT_EXCEPTION },
928 { "SWI", ARM_FT_EXCEPTION },
929 { "swi", ARM_FT_EXCEPTION },
930 { NULL, ARM_FT_NORMAL }
933 /* Returns the (interrupt) function type of the current
934 function, or ARM_FT_UNKNOWN if the type cannot be determined. */
936 static unsigned long
937 arm_isr_value (tree argument)
939 const isr_attribute_arg * ptr;
940 const char * arg;
942 /* No argument - default to IRQ. */
943 if (argument == NULL_TREE)
944 return ARM_FT_ISR;
946 /* Get the value of the argument. */
947 if (TREE_VALUE (argument) == NULL_TREE
948 || TREE_CODE (TREE_VALUE (argument)) != STRING_CST)
949 return ARM_FT_UNKNOWN;
951 arg = TREE_STRING_POINTER (TREE_VALUE (argument));
953 /* Check it against the list of known arguments. */
954 for (ptr = isr_attribute_args; ptr->arg != NULL; ptr++)
955 if (streq (arg, ptr->arg))
956 return ptr->return_value;
958 /* An unrecognized interrupt type. */
959 return ARM_FT_UNKNOWN;
962 /* Computes the type of the current function. */
964 static unsigned long
965 arm_compute_func_type (void)
967 unsigned long type = ARM_FT_UNKNOWN;
968 tree a;
969 tree attr;
971 if (TREE_CODE (current_function_decl) != FUNCTION_DECL)
972 abort ();
974 /* Decide if the current function is volatile. Such functions
975 never return, and many memory cycles can be saved by not storing
976 register values that will never be needed again. This optimization
977 was added to speed up context switching in a kernel application. */
978 if (optimize > 0
979 && current_function_nothrow
980 && TREE_THIS_VOLATILE (current_function_decl))
981 type |= ARM_FT_VOLATILE;
983 if (current_function_needs_context)
984 type |= ARM_FT_NESTED;
986 attr = DECL_ATTRIBUTES (current_function_decl);
988 a = lookup_attribute ("naked", attr);
989 if (a != NULL_TREE)
990 type |= ARM_FT_NAKED;
992 if (cfun->machine->eh_epilogue_sp_ofs != NULL_RTX)
993 type |= ARM_FT_EXCEPTION_HANDLER;
994 else
996 a = lookup_attribute ("isr", attr);
997 if (a == NULL_TREE)
998 a = lookup_attribute ("interrupt", attr);
1000 if (a == NULL_TREE)
1001 type |= TARGET_INTERWORK ? ARM_FT_INTERWORKED : ARM_FT_NORMAL;
1002 else
1003 type |= arm_isr_value (TREE_VALUE (a));
1006 return type;
1009 /* Returns the type of the current function. */
1011 unsigned long
1012 arm_current_func_type (void)
1014 if (ARM_FUNC_TYPE (cfun->machine->func_type) == ARM_FT_UNKNOWN)
1015 cfun->machine->func_type = arm_compute_func_type ();
1017 return cfun->machine->func_type;
1020 /* Return 1 if it is possible to return using a single instruction.
1021 If SIBLING is non-null, this is a test for a return before a sibling
1022 call. SIBLING is the call insn, so we can examine its register usage. */
1025 use_return_insn (int iscond, rtx sibling)
1027 int regno;
1028 unsigned int func_type;
1029 unsigned long saved_int_regs;
1030 unsigned HOST_WIDE_INT stack_adjust;
1032 /* Never use a return instruction before reload has run. */
1033 if (!reload_completed)
1034 return 0;
1036 func_type = arm_current_func_type ();
1038 /* Naked functions and volatile functions need special
1039 consideration. */
1040 if (func_type & (ARM_FT_VOLATILE | ARM_FT_NAKED))
1041 return 0;
1043 /* So do interrupt functions that use the frame pointer. */
1044 if (IS_INTERRUPT (func_type) && frame_pointer_needed)
1045 return 0;
1047 stack_adjust = arm_get_frame_size () + current_function_outgoing_args_size;
1049 /* As do variadic functions. */
1050 if (current_function_pretend_args_size
1051 || cfun->machine->uses_anonymous_args
1052 /* Or if the function calls __builtin_eh_return () */
1053 || ARM_FUNC_TYPE (func_type) == ARM_FT_EXCEPTION_HANDLER
1054 /* Or if the function calls alloca */
1055 || current_function_calls_alloca
1056 /* Or if there is a stack adjustment. However, if the stack pointer
1057 is saved on the stack, we can use a pre-incrementing stack load. */
1058 || !(stack_adjust == 0 || (frame_pointer_needed && stack_adjust == 4)))
1059 return 0;
1061 saved_int_regs = arm_compute_save_reg_mask ();
1063 /* Unfortunately, the insn
1065 ldmib sp, {..., sp, ...}
1067 triggers a bug on most SA-110 based devices, such that the stack
1068 pointer won't be correctly restored if the instruction takes a
1069 page fault. We work around this problem by popping r3 along with
1070 the other registers, since that is never slower than executing
1071 another instruction.
1073 We test for !arm_arch5 here, because code for any architecture
1074 less than this could potentially be run on one of the buggy
1075 chips. */
1076 if (stack_adjust == 4 && !arm_arch5)
1078 /* Validate that r3 is a call-clobbered register (always true in
1079 the default abi) ... */
1080 if (!call_used_regs[3])
1081 return 0;
1083 /* ... that it isn't being used for a return value (always true
1084 until we implement return-in-regs), or for a tail-call
1085 argument ... */
1086 if (sibling)
1088 if (GET_CODE (sibling) != CALL_INSN)
1089 abort ();
1091 if (find_regno_fusage (sibling, USE, 3))
1092 return 0;
1095 /* ... and that there are no call-saved registers in r0-r2
1096 (always true in the default ABI). */
1097 if (saved_int_regs & 0x7)
1098 return 0;
1101 /* Can't be done if interworking with Thumb, and any registers have been
1102 stacked. */
1103 if (TARGET_INTERWORK && saved_int_regs != 0)
1104 return 0;
1106 /* On StrongARM, conditional returns are expensive if they aren't
1107 taken and multiple registers have been stacked. */
1108 if (iscond && arm_is_strong)
1110 /* Conditional return when just the LR is stored is a simple
1111 conditional-load instruction, that's not expensive. */
1112 if (saved_int_regs != 0 && saved_int_regs != (1 << LR_REGNUM))
1113 return 0;
1115 if (flag_pic && regs_ever_live[PIC_OFFSET_TABLE_REGNUM])
1116 return 0;
1119 /* If there are saved registers but the LR isn't saved, then we need
1120 two instructions for the return. */
1121 if (saved_int_regs && !(saved_int_regs & (1 << LR_REGNUM)))
1122 return 0;
1124 /* Can't be done if any of the FPA regs are pushed,
1125 since this also requires an insn. */
1126 if (TARGET_HARD_FLOAT)
1127 for (regno = FIRST_ARM_FP_REGNUM; regno <= LAST_ARM_FP_REGNUM; regno++)
1128 if (regs_ever_live[regno] && !call_used_regs[regno])
1129 return 0;
1131 if (TARGET_REALLY_IWMMXT)
1132 for (regno = FIRST_IWMMXT_REGNUM; regno <= LAST_IWMMXT_REGNUM; regno++)
1133 if (regs_ever_live[regno] && ! call_used_regs [regno])
1134 return 0;
1136 return 1;
1139 /* Return TRUE if int I is a valid immediate ARM constant. */
1142 const_ok_for_arm (HOST_WIDE_INT i)
1144 unsigned HOST_WIDE_INT mask = ~(unsigned HOST_WIDE_INT)0xFF;
1146 /* For machines with >32 bit HOST_WIDE_INT, the bits above bit 31 must
1147 be all zero, or all one. */
1148 if ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff) != 0
1149 && ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff)
1150 != ((~(unsigned HOST_WIDE_INT) 0)
1151 & ~(unsigned HOST_WIDE_INT) 0xffffffff)))
1152 return FALSE;
1154 /* Fast return for 0 and powers of 2 */
1155 if ((i & (i - 1)) == 0)
1156 return TRUE;
1160 if ((i & mask & (unsigned HOST_WIDE_INT) 0xffffffff) == 0)
1161 return TRUE;
1162 mask =
1163 (mask << 2) | ((mask & (unsigned HOST_WIDE_INT) 0xffffffff)
1164 >> (32 - 2)) | ~(unsigned HOST_WIDE_INT) 0xffffffff;
1166 while (mask != ~(unsigned HOST_WIDE_INT) 0xFF);
1168 return FALSE;
1171 /* Return true if I is a valid constant for the operation CODE. */
1172 static int
1173 const_ok_for_op (HOST_WIDE_INT i, enum rtx_code code)
1175 if (const_ok_for_arm (i))
1176 return 1;
1178 switch (code)
1180 case PLUS:
1181 return const_ok_for_arm (ARM_SIGN_EXTEND (-i));
1183 case MINUS: /* Should only occur with (MINUS I reg) => rsb */
1184 case XOR:
1185 case IOR:
1186 return 0;
1188 case AND:
1189 return const_ok_for_arm (ARM_SIGN_EXTEND (~i));
1191 default:
1192 abort ();
1196 /* Emit a sequence of insns to handle a large constant.
1197 CODE is the code of the operation required, it can be any of SET, PLUS,
1198 IOR, AND, XOR, MINUS;
1199 MODE is the mode in which the operation is being performed;
1200 VAL is the integer to operate on;
1201 SOURCE is the other operand (a register, or a null-pointer for SET);
1202 SUBTARGETS means it is safe to create scratch registers if that will
1203 either produce a simpler sequence, or we will want to cse the values.
1204 Return value is the number of insns emitted. */
1207 arm_split_constant (enum rtx_code code, enum machine_mode mode,
1208 HOST_WIDE_INT val, rtx target, rtx source, int subtargets)
1210 if (subtargets || code == SET
1211 || (GET_CODE (target) == REG && GET_CODE (source) == REG
1212 && REGNO (target) != REGNO (source)))
1214 /* After arm_reorg has been called, we can't fix up expensive
1215 constants by pushing them into memory so we must synthesize
1216 them in-line, regardless of the cost. This is only likely to
1217 be more costly on chips that have load delay slots and we are
1218 compiling without running the scheduler (so no splitting
1219 occurred before the final instruction emission).
1221 Ref: gcc -O1 -mcpu=strongarm gcc.c-torture/compile/980506-2.c
1223 if (!after_arm_reorg
1224 && (arm_gen_constant (code, mode, val, target, source, 1, 0)
1225 > arm_constant_limit + (code != SET)))
1227 if (code == SET)
1229 /* Currently SET is the only monadic value for CODE, all
1230 the rest are diadic. */
1231 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (val)));
1232 return 1;
1234 else
1236 rtx temp = subtargets ? gen_reg_rtx (mode) : target;
1238 emit_insn (gen_rtx_SET (VOIDmode, temp, GEN_INT (val)));
1239 /* For MINUS, the value is subtracted from, since we never
1240 have subtraction of a constant. */
1241 if (code == MINUS)
1242 emit_insn (gen_rtx_SET (VOIDmode, target,
1243 gen_rtx_MINUS (mode, temp, source)));
1244 else
1245 emit_insn (gen_rtx_SET (VOIDmode, target,
1246 gen_rtx (code, mode, source, temp)));
1247 return 2;
1252 return arm_gen_constant (code, mode, val, target, source, subtargets, 1);
1255 static int
1256 count_insns_for_constant (HOST_WIDE_INT remainder, int i)
1258 HOST_WIDE_INT temp1;
1259 int num_insns = 0;
1262 int end;
1264 if (i <= 0)
1265 i += 32;
1266 if (remainder & (3 << (i - 2)))
1268 end = i - 8;
1269 if (end < 0)
1270 end += 32;
1271 temp1 = remainder & ((0x0ff << end)
1272 | ((i < end) ? (0xff >> (32 - end)) : 0));
1273 remainder &= ~temp1;
1274 num_insns++;
1275 i -= 6;
1277 i -= 2;
1278 } while (remainder);
1279 return num_insns;
1282 /* As above, but extra parameter GENERATE which, if clear, suppresses
1283 RTL generation. */
1285 static int
1286 arm_gen_constant (enum rtx_code code, enum machine_mode mode,
1287 HOST_WIDE_INT val, rtx target, rtx source, int subtargets,
1288 int generate)
1290 int can_invert = 0;
1291 int can_negate = 0;
1292 int can_negate_initial = 0;
1293 int can_shift = 0;
1294 int i;
1295 int num_bits_set = 0;
1296 int set_sign_bit_copies = 0;
1297 int clear_sign_bit_copies = 0;
1298 int clear_zero_bit_copies = 0;
1299 int set_zero_bit_copies = 0;
1300 int insns = 0;
1301 unsigned HOST_WIDE_INT temp1, temp2;
1302 unsigned HOST_WIDE_INT remainder = val & 0xffffffff;
1304 /* Find out which operations are safe for a given CODE. Also do a quick
1305 check for degenerate cases; these can occur when DImode operations
1306 are split. */
1307 switch (code)
1309 case SET:
1310 can_invert = 1;
1311 can_shift = 1;
1312 can_negate = 1;
1313 break;
1315 case PLUS:
1316 can_negate = 1;
1317 can_negate_initial = 1;
1318 break;
1320 case IOR:
1321 if (remainder == 0xffffffff)
1323 if (generate)
1324 emit_insn (gen_rtx_SET (VOIDmode, target,
1325 GEN_INT (ARM_SIGN_EXTEND (val))));
1326 return 1;
1328 if (remainder == 0)
1330 if (reload_completed && rtx_equal_p (target, source))
1331 return 0;
1332 if (generate)
1333 emit_insn (gen_rtx_SET (VOIDmode, target, source));
1334 return 1;
1336 break;
1338 case AND:
1339 if (remainder == 0)
1341 if (generate)
1342 emit_insn (gen_rtx_SET (VOIDmode, target, const0_rtx));
1343 return 1;
1345 if (remainder == 0xffffffff)
1347 if (reload_completed && rtx_equal_p (target, source))
1348 return 0;
1349 if (generate)
1350 emit_insn (gen_rtx_SET (VOIDmode, target, source));
1351 return 1;
1353 can_invert = 1;
1354 break;
1356 case XOR:
1357 if (remainder == 0)
1359 if (reload_completed && rtx_equal_p (target, source))
1360 return 0;
1361 if (generate)
1362 emit_insn (gen_rtx_SET (VOIDmode, target, source));
1363 return 1;
1365 if (remainder == 0xffffffff)
1367 if (generate)
1368 emit_insn (gen_rtx_SET (VOIDmode, target,
1369 gen_rtx_NOT (mode, source)));
1370 return 1;
1373 /* We don't know how to handle this yet below. */
1374 abort ();
1376 case MINUS:
1377 /* We treat MINUS as (val - source), since (source - val) is always
1378 passed as (source + (-val)). */
1379 if (remainder == 0)
1381 if (generate)
1382 emit_insn (gen_rtx_SET (VOIDmode, target,
1383 gen_rtx_NEG (mode, source)));
1384 return 1;
1386 if (const_ok_for_arm (val))
1388 if (generate)
1389 emit_insn (gen_rtx_SET (VOIDmode, target,
1390 gen_rtx_MINUS (mode, GEN_INT (val),
1391 source)));
1392 return 1;
1394 can_negate = 1;
1396 break;
1398 default:
1399 abort ();
1402 /* If we can do it in one insn get out quickly. */
1403 if (const_ok_for_arm (val)
1404 || (can_negate_initial && const_ok_for_arm (-val))
1405 || (can_invert && const_ok_for_arm (~val)))
1407 if (generate)
1408 emit_insn (gen_rtx_SET (VOIDmode, target,
1409 (source ? gen_rtx (code, mode, source,
1410 GEN_INT (val))
1411 : GEN_INT (val))));
1412 return 1;
1415 /* Calculate a few attributes that may be useful for specific
1416 optimizations. */
1417 for (i = 31; i >= 0; i--)
1419 if ((remainder & (1 << i)) == 0)
1420 clear_sign_bit_copies++;
1421 else
1422 break;
1425 for (i = 31; i >= 0; i--)
1427 if ((remainder & (1 << i)) != 0)
1428 set_sign_bit_copies++;
1429 else
1430 break;
1433 for (i = 0; i <= 31; i++)
1435 if ((remainder & (1 << i)) == 0)
1436 clear_zero_bit_copies++;
1437 else
1438 break;
1441 for (i = 0; i <= 31; i++)
1443 if ((remainder & (1 << i)) != 0)
1444 set_zero_bit_copies++;
1445 else
1446 break;
1449 switch (code)
1451 case SET:
1452 /* See if we can do this by sign_extending a constant that is known
1453 to be negative. This is a good, way of doing it, since the shift
1454 may well merge into a subsequent insn. */
1455 if (set_sign_bit_copies > 1)
1457 if (const_ok_for_arm
1458 (temp1 = ARM_SIGN_EXTEND (remainder
1459 << (set_sign_bit_copies - 1))))
1461 if (generate)
1463 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1464 emit_insn (gen_rtx_SET (VOIDmode, new_src,
1465 GEN_INT (temp1)));
1466 emit_insn (gen_ashrsi3 (target, new_src,
1467 GEN_INT (set_sign_bit_copies - 1)));
1469 return 2;
1471 /* For an inverted constant, we will need to set the low bits,
1472 these will be shifted out of harm's way. */
1473 temp1 |= (1 << (set_sign_bit_copies - 1)) - 1;
1474 if (const_ok_for_arm (~temp1))
1476 if (generate)
1478 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1479 emit_insn (gen_rtx_SET (VOIDmode, new_src,
1480 GEN_INT (temp1)));
1481 emit_insn (gen_ashrsi3 (target, new_src,
1482 GEN_INT (set_sign_bit_copies - 1)));
1484 return 2;
1488 /* See if we can generate this by setting the bottom (or the top)
1489 16 bits, and then shifting these into the other half of the
1490 word. We only look for the simplest cases, to do more would cost
1491 too much. Be careful, however, not to generate this when the
1492 alternative would take fewer insns. */
1493 if (val & 0xffff0000)
1495 temp1 = remainder & 0xffff0000;
1496 temp2 = remainder & 0x0000ffff;
1498 /* Overlaps outside this range are best done using other methods. */
1499 for (i = 9; i < 24; i++)
1501 if ((((temp2 | (temp2 << i)) & 0xffffffff) == remainder)
1502 && !const_ok_for_arm (temp2))
1504 rtx new_src = (subtargets
1505 ? (generate ? gen_reg_rtx (mode) : NULL_RTX)
1506 : target);
1507 insns = arm_gen_constant (code, mode, temp2, new_src,
1508 source, subtargets, generate);
1509 source = new_src;
1510 if (generate)
1511 emit_insn (gen_rtx_SET
1512 (VOIDmode, target,
1513 gen_rtx_IOR (mode,
1514 gen_rtx_ASHIFT (mode, source,
1515 GEN_INT (i)),
1516 source)));
1517 return insns + 1;
1521 /* Don't duplicate cases already considered. */
1522 for (i = 17; i < 24; i++)
1524 if (((temp1 | (temp1 >> i)) == remainder)
1525 && !const_ok_for_arm (temp1))
1527 rtx new_src = (subtargets
1528 ? (generate ? gen_reg_rtx (mode) : NULL_RTX)
1529 : target);
1530 insns = arm_gen_constant (code, mode, temp1, new_src,
1531 source, subtargets, generate);
1532 source = new_src;
1533 if (generate)
1534 emit_insn
1535 (gen_rtx_SET (VOIDmode, target,
1536 gen_rtx_IOR
1537 (mode,
1538 gen_rtx_LSHIFTRT (mode, source,
1539 GEN_INT (i)),
1540 source)));
1541 return insns + 1;
1545 break;
1547 case IOR:
1548 case XOR:
1549 /* If we have IOR or XOR, and the constant can be loaded in a
1550 single instruction, and we can find a temporary to put it in,
1551 then this can be done in two instructions instead of 3-4. */
1552 if (subtargets
1553 /* TARGET can't be NULL if SUBTARGETS is 0 */
1554 || (reload_completed && !reg_mentioned_p (target, source)))
1556 if (const_ok_for_arm (ARM_SIGN_EXTEND (~val)))
1558 if (generate)
1560 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1562 emit_insn (gen_rtx_SET (VOIDmode, sub, GEN_INT (val)));
1563 emit_insn (gen_rtx_SET (VOIDmode, target,
1564 gen_rtx (code, mode, source, sub)));
1566 return 2;
1570 if (code == XOR)
1571 break;
1573 if (set_sign_bit_copies > 8
1574 && (val & (-1 << (32 - set_sign_bit_copies))) == val)
1576 if (generate)
1578 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1579 rtx shift = GEN_INT (set_sign_bit_copies);
1581 emit_insn (gen_rtx_SET (VOIDmode, sub,
1582 gen_rtx_NOT (mode,
1583 gen_rtx_ASHIFT (mode,
1584 source,
1585 shift))));
1586 emit_insn (gen_rtx_SET (VOIDmode, target,
1587 gen_rtx_NOT (mode,
1588 gen_rtx_LSHIFTRT (mode, sub,
1589 shift))));
1591 return 2;
1594 if (set_zero_bit_copies > 8
1595 && (remainder & ((1 << set_zero_bit_copies) - 1)) == remainder)
1597 if (generate)
1599 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1600 rtx shift = GEN_INT (set_zero_bit_copies);
1602 emit_insn (gen_rtx_SET (VOIDmode, sub,
1603 gen_rtx_NOT (mode,
1604 gen_rtx_LSHIFTRT (mode,
1605 source,
1606 shift))));
1607 emit_insn (gen_rtx_SET (VOIDmode, target,
1608 gen_rtx_NOT (mode,
1609 gen_rtx_ASHIFT (mode, sub,
1610 shift))));
1612 return 2;
1615 if (const_ok_for_arm (temp1 = ARM_SIGN_EXTEND (~val)))
1617 if (generate)
1619 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1620 emit_insn (gen_rtx_SET (VOIDmode, sub,
1621 gen_rtx_NOT (mode, source)));
1622 source = sub;
1623 if (subtargets)
1624 sub = gen_reg_rtx (mode);
1625 emit_insn (gen_rtx_SET (VOIDmode, sub,
1626 gen_rtx_AND (mode, source,
1627 GEN_INT (temp1))));
1628 emit_insn (gen_rtx_SET (VOIDmode, target,
1629 gen_rtx_NOT (mode, sub)));
1631 return 3;
1633 break;
1635 case AND:
1636 /* See if two shifts will do 2 or more insn's worth of work. */
1637 if (clear_sign_bit_copies >= 16 && clear_sign_bit_copies < 24)
1639 HOST_WIDE_INT shift_mask = ((0xffffffff
1640 << (32 - clear_sign_bit_copies))
1641 & 0xffffffff);
1643 if ((remainder | shift_mask) != 0xffffffff)
1645 if (generate)
1647 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1648 insns = arm_gen_constant (AND, mode, remainder | shift_mask,
1649 new_src, source, subtargets, 1);
1650 source = new_src;
1652 else
1654 rtx targ = subtargets ? NULL_RTX : target;
1655 insns = arm_gen_constant (AND, mode, remainder | shift_mask,
1656 targ, source, subtargets, 0);
1660 if (generate)
1662 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1663 rtx shift = GEN_INT (clear_sign_bit_copies);
1665 emit_insn (gen_ashlsi3 (new_src, source, shift));
1666 emit_insn (gen_lshrsi3 (target, new_src, shift));
1669 return insns + 2;
1672 if (clear_zero_bit_copies >= 16 && clear_zero_bit_copies < 24)
1674 HOST_WIDE_INT shift_mask = (1 << clear_zero_bit_copies) - 1;
1676 if ((remainder | shift_mask) != 0xffffffff)
1678 if (generate)
1680 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1682 insns = arm_gen_constant (AND, mode, remainder | shift_mask,
1683 new_src, source, subtargets, 1);
1684 source = new_src;
1686 else
1688 rtx targ = subtargets ? NULL_RTX : target;
1690 insns = arm_gen_constant (AND, mode, remainder | shift_mask,
1691 targ, source, subtargets, 0);
1695 if (generate)
1697 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1698 rtx shift = GEN_INT (clear_zero_bit_copies);
1700 emit_insn (gen_lshrsi3 (new_src, source, shift));
1701 emit_insn (gen_ashlsi3 (target, new_src, shift));
1704 return insns + 2;
1707 break;
1709 default:
1710 break;
1713 for (i = 0; i < 32; i++)
1714 if (remainder & (1 << i))
1715 num_bits_set++;
1717 if (code == AND || (can_invert && num_bits_set > 16))
1718 remainder = (~remainder) & 0xffffffff;
1719 else if (code == PLUS && num_bits_set > 16)
1720 remainder = (-remainder) & 0xffffffff;
1721 else
1723 can_invert = 0;
1724 can_negate = 0;
1727 /* Now try and find a way of doing the job in either two or three
1728 instructions.
1729 We start by looking for the largest block of zeros that are aligned on
1730 a 2-bit boundary, we then fill up the temps, wrapping around to the
1731 top of the word when we drop off the bottom.
1732 In the worst case this code should produce no more than four insns. */
1734 int best_start = 0;
1735 int best_consecutive_zeros = 0;
1737 for (i = 0; i < 32; i += 2)
1739 int consecutive_zeros = 0;
1741 if (!(remainder & (3 << i)))
1743 while ((i < 32) && !(remainder & (3 << i)))
1745 consecutive_zeros += 2;
1746 i += 2;
1748 if (consecutive_zeros > best_consecutive_zeros)
1750 best_consecutive_zeros = consecutive_zeros;
1751 best_start = i - consecutive_zeros;
1753 i -= 2;
1757 /* So long as it won't require any more insns to do so, it's
1758 desirable to emit a small constant (in bits 0...9) in the last
1759 insn. This way there is more chance that it can be combined with
1760 a later addressing insn to form a pre-indexed load or store
1761 operation. Consider:
1763 *((volatile int *)0xe0000100) = 1;
1764 *((volatile int *)0xe0000110) = 2;
1766 We want this to wind up as:
1768 mov rA, #0xe0000000
1769 mov rB, #1
1770 str rB, [rA, #0x100]
1771 mov rB, #2
1772 str rB, [rA, #0x110]
1774 rather than having to synthesize both large constants from scratch.
1776 Therefore, we calculate how many insns would be required to emit
1777 the constant starting from `best_start', and also starting from
1778 zero (ie with bit 31 first to be output). If `best_start' doesn't
1779 yield a shorter sequence, we may as well use zero. */
1780 if (best_start != 0
1781 && ((((unsigned HOST_WIDE_INT) 1) << best_start) < remainder)
1782 && (count_insns_for_constant (remainder, 0) <=
1783 count_insns_for_constant (remainder, best_start)))
1784 best_start = 0;
1786 /* Now start emitting the insns. */
1787 i = best_start;
1790 int end;
1792 if (i <= 0)
1793 i += 32;
1794 if (remainder & (3 << (i - 2)))
1796 end = i - 8;
1797 if (end < 0)
1798 end += 32;
1799 temp1 = remainder & ((0x0ff << end)
1800 | ((i < end) ? (0xff >> (32 - end)) : 0));
1801 remainder &= ~temp1;
1803 if (generate)
1805 rtx new_src, temp1_rtx;
1807 if (code == SET || code == MINUS)
1809 new_src = (subtargets ? gen_reg_rtx (mode) : target);
1810 if (can_invert && code != MINUS)
1811 temp1 = ~temp1;
1813 else
1815 if (remainder && subtargets)
1816 new_src = gen_reg_rtx (mode);
1817 else
1818 new_src = target;
1819 if (can_invert)
1820 temp1 = ~temp1;
1821 else if (can_negate)
1822 temp1 = -temp1;
1825 temp1 = trunc_int_for_mode (temp1, mode);
1826 temp1_rtx = GEN_INT (temp1);
1828 if (code == SET)
1830 else if (code == MINUS)
1831 temp1_rtx = gen_rtx_MINUS (mode, temp1_rtx, source);
1832 else
1833 temp1_rtx = gen_rtx_fmt_ee (code, mode, source, temp1_rtx);
1835 emit_insn (gen_rtx_SET (VOIDmode, new_src, temp1_rtx));
1836 source = new_src;
1839 if (code == SET)
1841 can_invert = 0;
1842 code = PLUS;
1844 else if (code == MINUS)
1845 code = PLUS;
1847 insns++;
1848 i -= 6;
1850 i -= 2;
1852 while (remainder);
1855 return insns;
1858 /* Canonicalize a comparison so that we are more likely to recognize it.
1859 This can be done for a few constant compares, where we can make the
1860 immediate value easier to load. */
1862 enum rtx_code
1863 arm_canonicalize_comparison (enum rtx_code code, rtx * op1)
1865 unsigned HOST_WIDE_INT i = INTVAL (*op1);
1867 switch (code)
1869 case EQ:
1870 case NE:
1871 return code;
1873 case GT:
1874 case LE:
1875 if (i != ((((unsigned HOST_WIDE_INT) 1) << (HOST_BITS_PER_WIDE_INT - 1)) - 1)
1876 && (const_ok_for_arm (i + 1) || const_ok_for_arm (-(i + 1))))
1878 *op1 = GEN_INT (i + 1);
1879 return code == GT ? GE : LT;
1881 break;
1883 case GE:
1884 case LT:
1885 if (i != (((unsigned HOST_WIDE_INT) 1) << (HOST_BITS_PER_WIDE_INT - 1))
1886 && (const_ok_for_arm (i - 1) || const_ok_for_arm (-(i - 1))))
1888 *op1 = GEN_INT (i - 1);
1889 return code == GE ? GT : LE;
1891 break;
1893 case GTU:
1894 case LEU:
1895 if (i != ~((unsigned HOST_WIDE_INT) 0)
1896 && (const_ok_for_arm (i + 1) || const_ok_for_arm (-(i + 1))))
1898 *op1 = GEN_INT (i + 1);
1899 return code == GTU ? GEU : LTU;
1901 break;
1903 case GEU:
1904 case LTU:
1905 if (i != 0
1906 && (const_ok_for_arm (i - 1) || const_ok_for_arm (-(i - 1))))
1908 *op1 = GEN_INT (i - 1);
1909 return code == GEU ? GTU : LEU;
1911 break;
1913 default:
1914 abort ();
1917 return code;
1920 /* Decide whether a type should be returned in memory (true)
1921 or in a register (false). This is called by the macro
1922 RETURN_IN_MEMORY. */
1924 arm_return_in_memory (tree type)
1926 HOST_WIDE_INT size;
1928 if (!AGGREGATE_TYPE_P (type))
1929 /* All simple types are returned in registers. */
1930 return 0;
1932 size = int_size_in_bytes (type);
1934 if (TARGET_ATPCS)
1936 /* ATPCS returns aggregate types in memory only if they are
1937 larger than a word (or are variable size). */
1938 return (size < 0 || size > UNITS_PER_WORD);
1941 /* For the arm-wince targets we choose to be compatible with Microsoft's
1942 ARM and Thumb compilers, which always return aggregates in memory. */
1943 #ifndef ARM_WINCE
1944 /* All structures/unions bigger than one word are returned in memory.
1945 Also catch the case where int_size_in_bytes returns -1. In this case
1946 the aggregate is either huge or of variable size, and in either case
1947 we will want to return it via memory and not in a register. */
1948 if (size < 0 || size > UNITS_PER_WORD)
1949 return 1;
1951 if (TREE_CODE (type) == RECORD_TYPE)
1953 tree field;
1955 /* For a struct the APCS says that we only return in a register
1956 if the type is 'integer like' and every addressable element
1957 has an offset of zero. For practical purposes this means
1958 that the structure can have at most one non bit-field element
1959 and that this element must be the first one in the structure. */
1961 /* Find the first field, ignoring non FIELD_DECL things which will
1962 have been created by C++. */
1963 for (field = TYPE_FIELDS (type);
1964 field && TREE_CODE (field) != FIELD_DECL;
1965 field = TREE_CHAIN (field))
1966 continue;
1968 if (field == NULL)
1969 return 0; /* An empty structure. Allowed by an extension to ANSI C. */
1971 /* Check that the first field is valid for returning in a register. */
1973 /* ... Floats are not allowed */
1974 if (FLOAT_TYPE_P (TREE_TYPE (field)))
1975 return 1;
1977 /* ... Aggregates that are not themselves valid for returning in
1978 a register are not allowed. */
1979 if (RETURN_IN_MEMORY (TREE_TYPE (field)))
1980 return 1;
1982 /* Now check the remaining fields, if any. Only bitfields are allowed,
1983 since they are not addressable. */
1984 for (field = TREE_CHAIN (field);
1985 field;
1986 field = TREE_CHAIN (field))
1988 if (TREE_CODE (field) != FIELD_DECL)
1989 continue;
1991 if (!DECL_BIT_FIELD_TYPE (field))
1992 return 1;
1995 return 0;
1998 if (TREE_CODE (type) == UNION_TYPE)
2000 tree field;
2002 /* Unions can be returned in registers if every element is
2003 integral, or can be returned in an integer register. */
2004 for (field = TYPE_FIELDS (type);
2005 field;
2006 field = TREE_CHAIN (field))
2008 if (TREE_CODE (field) != FIELD_DECL)
2009 continue;
2011 if (FLOAT_TYPE_P (TREE_TYPE (field)))
2012 return 1;
2014 if (RETURN_IN_MEMORY (TREE_TYPE (field)))
2015 return 1;
2018 return 0;
2020 #endif /* not ARM_WINCE */
2022 /* Return all other types in memory. */
2023 return 1;
2026 /* Indicate whether or not words of a double are in big-endian order. */
2029 arm_float_words_big_endian (void)
2031 if (TARGET_CIRRUS)
2032 return 0;
2034 /* For FPA, float words are always big-endian. For VFP, floats words
2035 follow the memory system mode. */
2037 if (TARGET_HARD_FLOAT)
2039 /* FIXME: TARGET_HARD_FLOAT currently implies FPA. */
2040 return 1;
2043 if (TARGET_VFP)
2044 return (TARGET_BIG_END ? 1 : 0);
2046 return 1;
2049 /* Initialize a variable CUM of type CUMULATIVE_ARGS
2050 for a call to a function whose data type is FNTYPE.
2051 For a library call, FNTYPE is NULL. */
2052 void
2053 arm_init_cumulative_args (CUMULATIVE_ARGS *pcum, tree fntype,
2054 rtx libname ATTRIBUTE_UNUSED,
2055 tree fndecl ATTRIBUTE_UNUSED)
2057 /* On the ARM, the offset starts at 0. */
2058 pcum->nregs = ((fntype && aggregate_value_p (TREE_TYPE (fntype), fntype)) ? 1 : 0);
2059 pcum->iwmmxt_nregs = 0;
2061 pcum->call_cookie = CALL_NORMAL;
2063 if (TARGET_LONG_CALLS)
2064 pcum->call_cookie = CALL_LONG;
2066 /* Check for long call/short call attributes. The attributes
2067 override any command line option. */
2068 if (fntype)
2070 if (lookup_attribute ("short_call", TYPE_ATTRIBUTES (fntype)))
2071 pcum->call_cookie = CALL_SHORT;
2072 else if (lookup_attribute ("long_call", TYPE_ATTRIBUTES (fntype)))
2073 pcum->call_cookie = CALL_LONG;
2076 /* Varargs vectors are treated the same as long long.
2077 named_count avoids having to change the way arm handles 'named' */
2078 pcum->named_count = 0;
2079 pcum->nargs = 0;
2081 if (TARGET_REALLY_IWMMXT && fntype)
2083 tree fn_arg;
2085 for (fn_arg = TYPE_ARG_TYPES (fntype);
2086 fn_arg;
2087 fn_arg = TREE_CHAIN (fn_arg))
2088 pcum->named_count += 1;
2090 if (! pcum->named_count)
2091 pcum->named_count = INT_MAX;
2095 /* Determine where to put an argument to a function.
2096 Value is zero to push the argument on the stack,
2097 or a hard register in which to store the argument.
2099 MODE is the argument's machine mode.
2100 TYPE is the data type of the argument (as a tree).
2101 This is null for libcalls where that information may
2102 not be available.
2103 CUM is a variable of type CUMULATIVE_ARGS which gives info about
2104 the preceding args and about the function being called.
2105 NAMED is nonzero if this argument is a named parameter
2106 (otherwise it is an extra parameter matching an ellipsis). */
2109 arm_function_arg (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
2110 tree type ATTRIBUTE_UNUSED, int named)
2112 if (TARGET_REALLY_IWMMXT)
2114 if (VECTOR_MODE_SUPPORTED_P (mode))
2116 /* varargs vectors are treated the same as long long.
2117 named_count avoids having to change the way arm handles 'named' */
2118 if (pcum->named_count <= pcum->nargs + 1)
2120 if (pcum->nregs == 1)
2121 pcum->nregs += 1;
2122 if (pcum->nregs <= 2)
2123 return gen_rtx_REG (mode, pcum->nregs);
2124 else
2125 return NULL_RTX;
2127 else if (pcum->iwmmxt_nregs <= 9)
2128 return gen_rtx_REG (mode, pcum->iwmmxt_nregs + FIRST_IWMMXT_REGNUM);
2129 else
2130 return NULL_RTX;
2132 else if ((mode == DImode || mode == DFmode) && pcum->nregs & 1)
2133 pcum->nregs += 1;
2136 if (mode == VOIDmode)
2137 /* Compute operand 2 of the call insn. */
2138 return GEN_INT (pcum->call_cookie);
2140 if (!named || pcum->nregs >= NUM_ARG_REGS)
2141 return NULL_RTX;
2143 return gen_rtx_REG (mode, pcum->nregs);
2146 /* Variable sized types are passed by reference. This is a GCC
2147 extension to the ARM ABI. */
2150 arm_function_arg_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
2151 enum machine_mode mode ATTRIBUTE_UNUSED,
2152 tree type, int named ATTRIBUTE_UNUSED)
2154 return type && TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST;
2157 /* Implement va_arg. */
2160 arm_va_arg (tree valist, tree type)
2162 /* Variable sized types are passed by reference. */
2163 if (TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
2165 rtx addr = std_expand_builtin_va_arg (valist, build_pointer_type (type));
2166 return gen_rtx_MEM (ptr_mode, force_reg (Pmode, addr));
2169 if (FUNCTION_ARG_BOUNDARY (TYPE_MODE (type), NULL) == IWMMXT_ALIGNMENT)
2171 tree minus_eight;
2172 tree t;
2174 /* Maintain 64-bit alignment of the valist pointer by
2175 constructing: valist = ((valist + (8 - 1)) & -8). */
2176 minus_eight = build_int_2 (- (IWMMXT_ALIGNMENT / BITS_PER_UNIT), -1);
2177 t = build_int_2 ((IWMMXT_ALIGNMENT / BITS_PER_UNIT) - 1, 0);
2178 t = build (PLUS_EXPR, TREE_TYPE (valist), valist, t);
2179 t = build (BIT_AND_EXPR, TREE_TYPE (t), t, minus_eight);
2180 t = build (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
2181 TREE_SIDE_EFFECTS (t) = 1;
2182 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
2184 /* This is to stop the combine pass optimizing
2185 away the alignment adjustment. */
2186 mark_reg_pointer (arg_pointer_rtx, PARM_BOUNDARY);
2189 return std_expand_builtin_va_arg (valist, type);
2192 /* Encode the current state of the #pragma [no_]long_calls. */
2193 typedef enum
2195 OFF, /* No #pramgma [no_]long_calls is in effect. */
2196 LONG, /* #pragma long_calls is in effect. */
2197 SHORT /* #pragma no_long_calls is in effect. */
2198 } arm_pragma_enum;
2200 static arm_pragma_enum arm_pragma_long_calls = OFF;
2202 void
2203 arm_pr_long_calls (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2205 arm_pragma_long_calls = LONG;
2208 void
2209 arm_pr_no_long_calls (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2211 arm_pragma_long_calls = SHORT;
2214 void
2215 arm_pr_long_calls_off (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2217 arm_pragma_long_calls = OFF;
2220 /* Table of machine attributes. */
2221 const struct attribute_spec arm_attribute_table[] =
2223 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
2224 /* Function calls made to this symbol must be done indirectly, because
2225 it may lie outside of the 26 bit addressing range of a normal function
2226 call. */
2227 { "long_call", 0, 0, false, true, true, NULL },
2228 /* Whereas these functions are always known to reside within the 26 bit
2229 addressing range. */
2230 { "short_call", 0, 0, false, true, true, NULL },
2231 /* Interrupt Service Routines have special prologue and epilogue requirements. */
2232 { "isr", 0, 1, false, false, false, arm_handle_isr_attribute },
2233 { "interrupt", 0, 1, false, false, false, arm_handle_isr_attribute },
2234 { "naked", 0, 0, true, false, false, arm_handle_fndecl_attribute },
2235 #ifdef ARM_PE
2236 /* ARM/PE has three new attributes:
2237 interfacearm - ?
2238 dllexport - for exporting a function/variable that will live in a dll
2239 dllimport - for importing a function/variable from a dll
2241 Microsoft allows multiple declspecs in one __declspec, separating
2242 them with spaces. We do NOT support this. Instead, use __declspec
2243 multiple times.
2245 { "dllimport", 0, 0, true, false, false, NULL },
2246 { "dllexport", 0, 0, true, false, false, NULL },
2247 { "interfacearm", 0, 0, true, false, false, arm_handle_fndecl_attribute },
2248 #endif
2249 { NULL, 0, 0, false, false, false, NULL }
2252 /* Handle an attribute requiring a FUNCTION_DECL;
2253 arguments as in struct attribute_spec.handler. */
2254 static tree
2255 arm_handle_fndecl_attribute (tree *node, tree name, tree args ATTRIBUTE_UNUSED,
2256 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
2258 if (TREE_CODE (*node) != FUNCTION_DECL)
2260 warning ("`%s' attribute only applies to functions",
2261 IDENTIFIER_POINTER (name));
2262 *no_add_attrs = true;
2265 return NULL_TREE;
2268 /* Handle an "interrupt" or "isr" attribute;
2269 arguments as in struct attribute_spec.handler. */
2270 static tree
2271 arm_handle_isr_attribute (tree *node, tree name, tree args, int flags,
2272 bool *no_add_attrs)
2274 if (DECL_P (*node))
2276 if (TREE_CODE (*node) != FUNCTION_DECL)
2278 warning ("`%s' attribute only applies to functions",
2279 IDENTIFIER_POINTER (name));
2280 *no_add_attrs = true;
2282 /* FIXME: the argument if any is checked for type attributes;
2283 should it be checked for decl ones? */
2285 else
2287 if (TREE_CODE (*node) == FUNCTION_TYPE
2288 || TREE_CODE (*node) == METHOD_TYPE)
2290 if (arm_isr_value (args) == ARM_FT_UNKNOWN)
2292 warning ("`%s' attribute ignored", IDENTIFIER_POINTER (name));
2293 *no_add_attrs = true;
2296 else if (TREE_CODE (*node) == POINTER_TYPE
2297 && (TREE_CODE (TREE_TYPE (*node)) == FUNCTION_TYPE
2298 || TREE_CODE (TREE_TYPE (*node)) == METHOD_TYPE)
2299 && arm_isr_value (args) != ARM_FT_UNKNOWN)
2301 *node = build_type_copy (*node);
2302 TREE_TYPE (*node) = build_type_attribute_variant
2303 (TREE_TYPE (*node),
2304 tree_cons (name, args, TYPE_ATTRIBUTES (TREE_TYPE (*node))));
2305 *no_add_attrs = true;
2307 else
2309 /* Possibly pass this attribute on from the type to a decl. */
2310 if (flags & ((int) ATTR_FLAG_DECL_NEXT
2311 | (int) ATTR_FLAG_FUNCTION_NEXT
2312 | (int) ATTR_FLAG_ARRAY_NEXT))
2314 *no_add_attrs = true;
2315 return tree_cons (name, args, NULL_TREE);
2317 else
2319 warning ("`%s' attribute ignored", IDENTIFIER_POINTER (name));
2324 return NULL_TREE;
2327 /* Return 0 if the attributes for two types are incompatible, 1 if they
2328 are compatible, and 2 if they are nearly compatible (which causes a
2329 warning to be generated). */
2330 static int
2331 arm_comp_type_attributes (tree type1, tree type2)
2333 int l1, l2, s1, s2;
2335 /* Check for mismatch of non-default calling convention. */
2336 if (TREE_CODE (type1) != FUNCTION_TYPE)
2337 return 1;
2339 /* Check for mismatched call attributes. */
2340 l1 = lookup_attribute ("long_call", TYPE_ATTRIBUTES (type1)) != NULL;
2341 l2 = lookup_attribute ("long_call", TYPE_ATTRIBUTES (type2)) != NULL;
2342 s1 = lookup_attribute ("short_call", TYPE_ATTRIBUTES (type1)) != NULL;
2343 s2 = lookup_attribute ("short_call", TYPE_ATTRIBUTES (type2)) != NULL;
2345 /* Only bother to check if an attribute is defined. */
2346 if (l1 | l2 | s1 | s2)
2348 /* If one type has an attribute, the other must have the same attribute. */
2349 if ((l1 != l2) || (s1 != s2))
2350 return 0;
2352 /* Disallow mixed attributes. */
2353 if ((l1 & s2) || (l2 & s1))
2354 return 0;
2357 /* Check for mismatched ISR attribute. */
2358 l1 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type1)) != NULL;
2359 if (! l1)
2360 l1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type1)) != NULL;
2361 l2 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type2)) != NULL;
2362 if (! l2)
2363 l1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type2)) != NULL;
2364 if (l1 != l2)
2365 return 0;
2367 return 1;
2370 /* Encode long_call or short_call attribute by prefixing
2371 symbol name in DECL with a special character FLAG. */
2372 void
2373 arm_encode_call_attribute (tree decl, int flag)
2375 const char * str = XSTR (XEXP (DECL_RTL (decl), 0), 0);
2376 int len = strlen (str);
2377 char * newstr;
2379 /* Do not allow weak functions to be treated as short call. */
2380 if (DECL_WEAK (decl) && flag == SHORT_CALL_FLAG_CHAR)
2381 return;
2383 newstr = alloca (len + 2);
2384 newstr[0] = flag;
2385 strcpy (newstr + 1, str);
2387 newstr = (char *) ggc_alloc_string (newstr, len + 1);
2388 XSTR (XEXP (DECL_RTL (decl), 0), 0) = newstr;
2391 /* Assigns default attributes to newly defined type. This is used to
2392 set short_call/long_call attributes for function types of
2393 functions defined inside corresponding #pragma scopes. */
2394 static void
2395 arm_set_default_type_attributes (tree type)
2397 /* Add __attribute__ ((long_call)) to all functions, when
2398 inside #pragma long_calls or __attribute__ ((short_call)),
2399 when inside #pragma no_long_calls. */
2400 if (TREE_CODE (type) == FUNCTION_TYPE || TREE_CODE (type) == METHOD_TYPE)
2402 tree type_attr_list, attr_name;
2403 type_attr_list = TYPE_ATTRIBUTES (type);
2405 if (arm_pragma_long_calls == LONG)
2406 attr_name = get_identifier ("long_call");
2407 else if (arm_pragma_long_calls == SHORT)
2408 attr_name = get_identifier ("short_call");
2409 else
2410 return;
2412 type_attr_list = tree_cons (attr_name, NULL_TREE, type_attr_list);
2413 TYPE_ATTRIBUTES (type) = type_attr_list;
2417 /* Return 1 if the operand is a SYMBOL_REF for a function known to be
2418 defined within the current compilation unit. If this cannot be
2419 determined, then 0 is returned. */
2420 static int
2421 current_file_function_operand (rtx sym_ref)
2423 /* This is a bit of a fib. A function will have a short call flag
2424 applied to its name if it has the short call attribute, or it has
2425 already been defined within the current compilation unit. */
2426 if (ENCODED_SHORT_CALL_ATTR_P (XSTR (sym_ref, 0)))
2427 return 1;
2429 /* The current function is always defined within the current compilation
2430 unit. if it s a weak definition however, then this may not be the real
2431 definition of the function, and so we have to say no. */
2432 if (sym_ref == XEXP (DECL_RTL (current_function_decl), 0)
2433 && !DECL_WEAK (current_function_decl))
2434 return 1;
2436 /* We cannot make the determination - default to returning 0. */
2437 return 0;
2440 /* Return nonzero if a 32 bit "long_call" should be generated for
2441 this call. We generate a long_call if the function:
2443 a. has an __attribute__((long call))
2444 or b. is within the scope of a #pragma long_calls
2445 or c. the -mlong-calls command line switch has been specified
2447 However we do not generate a long call if the function:
2449 d. has an __attribute__ ((short_call))
2450 or e. is inside the scope of a #pragma no_long_calls
2451 or f. has an __attribute__ ((section))
2452 or g. is defined within the current compilation unit.
2454 This function will be called by C fragments contained in the machine
2455 description file. CALL_REF and CALL_COOKIE correspond to the matched
2456 rtl operands. CALL_SYMBOL is used to distinguish between
2457 two different callers of the function. It is set to 1 in the
2458 "call_symbol" and "call_symbol_value" patterns and to 0 in the "call"
2459 and "call_value" patterns. This is because of the difference in the
2460 SYM_REFs passed by these patterns. */
2462 arm_is_longcall_p (rtx sym_ref, int call_cookie, int call_symbol)
2464 if (!call_symbol)
2466 if (GET_CODE (sym_ref) != MEM)
2467 return 0;
2469 sym_ref = XEXP (sym_ref, 0);
2472 if (GET_CODE (sym_ref) != SYMBOL_REF)
2473 return 0;
2475 if (call_cookie & CALL_SHORT)
2476 return 0;
2478 if (TARGET_LONG_CALLS && flag_function_sections)
2479 return 1;
2481 if (current_file_function_operand (sym_ref))
2482 return 0;
2484 return (call_cookie & CALL_LONG)
2485 || ENCODED_LONG_CALL_ATTR_P (XSTR (sym_ref, 0))
2486 || TARGET_LONG_CALLS;
2489 /* Return nonzero if it is ok to make a tail-call to DECL. */
2490 static bool
2491 arm_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
2493 int call_type = TARGET_LONG_CALLS ? CALL_LONG : CALL_NORMAL;
2495 if (cfun->machine->sibcall_blocked)
2496 return false;
2498 /* Never tailcall something for which we have no decl, or if we
2499 are in Thumb mode. */
2500 if (decl == NULL || TARGET_THUMB)
2501 return false;
2503 /* Get the calling method. */
2504 if (lookup_attribute ("short_call", TYPE_ATTRIBUTES (TREE_TYPE (decl))))
2505 call_type = CALL_SHORT;
2506 else if (lookup_attribute ("long_call", TYPE_ATTRIBUTES (TREE_TYPE (decl))))
2507 call_type = CALL_LONG;
2509 /* Cannot tail-call to long calls, since these are out of range of
2510 a branch instruction. However, if not compiling PIC, we know
2511 we can reach the symbol if it is in this compilation unit. */
2512 if (call_type == CALL_LONG && (flag_pic || !TREE_ASM_WRITTEN (decl)))
2513 return false;
2515 /* If we are interworking and the function is not declared static
2516 then we can't tail-call it unless we know that it exists in this
2517 compilation unit (since it might be a Thumb routine). */
2518 if (TARGET_INTERWORK && TREE_PUBLIC (decl) && !TREE_ASM_WRITTEN (decl))
2519 return false;
2521 /* Never tailcall from an ISR routine - it needs a special exit sequence. */
2522 if (IS_INTERRUPT (arm_current_func_type ()))
2523 return false;
2525 /* Everything else is ok. */
2526 return true;
2530 /* Addressing mode support functions. */
2532 /* Return nonzero if X is a legitimate immediate operand when compiling
2533 for PIC. */
2535 legitimate_pic_operand_p (rtx x)
2537 if (CONSTANT_P (x)
2538 && flag_pic
2539 && (GET_CODE (x) == SYMBOL_REF
2540 || (GET_CODE (x) == CONST
2541 && GET_CODE (XEXP (x, 0)) == PLUS
2542 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF)))
2543 return 0;
2545 return 1;
2549 legitimize_pic_address (rtx orig, enum machine_mode mode, rtx reg)
2551 if (GET_CODE (orig) == SYMBOL_REF
2552 || GET_CODE (orig) == LABEL_REF)
2554 #ifndef AOF_ASSEMBLER
2555 rtx pic_ref, address;
2556 #endif
2557 rtx insn;
2558 int subregs = 0;
2560 if (reg == 0)
2562 if (no_new_pseudos)
2563 abort ();
2564 else
2565 reg = gen_reg_rtx (Pmode);
2567 subregs = 1;
2570 #ifdef AOF_ASSEMBLER
2571 /* The AOF assembler can generate relocations for these directly, and
2572 understands that the PIC register has to be added into the offset. */
2573 insn = emit_insn (gen_pic_load_addr_based (reg, orig));
2574 #else
2575 if (subregs)
2576 address = gen_reg_rtx (Pmode);
2577 else
2578 address = reg;
2580 if (TARGET_ARM)
2581 emit_insn (gen_pic_load_addr_arm (address, orig));
2582 else
2583 emit_insn (gen_pic_load_addr_thumb (address, orig));
2585 if ((GET_CODE (orig) == LABEL_REF
2586 || (GET_CODE (orig) == SYMBOL_REF &&
2587 SYMBOL_REF_LOCAL_P (orig)))
2588 && NEED_GOT_RELOC)
2589 pic_ref = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, address);
2590 else
2592 pic_ref = gen_rtx_MEM (Pmode,
2593 gen_rtx_PLUS (Pmode, pic_offset_table_rtx,
2594 address));
2595 RTX_UNCHANGING_P (pic_ref) = 1;
2598 insn = emit_move_insn (reg, pic_ref);
2599 #endif
2600 current_function_uses_pic_offset_table = 1;
2601 /* Put a REG_EQUAL note on this insn, so that it can be optimized
2602 by loop. */
2603 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EQUAL, orig,
2604 REG_NOTES (insn));
2605 return reg;
2607 else if (GET_CODE (orig) == CONST)
2609 rtx base, offset;
2611 if (GET_CODE (XEXP (orig, 0)) == PLUS
2612 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
2613 return orig;
2615 if (reg == 0)
2617 if (no_new_pseudos)
2618 abort ();
2619 else
2620 reg = gen_reg_rtx (Pmode);
2623 if (GET_CODE (XEXP (orig, 0)) == PLUS)
2625 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
2626 offset = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
2627 base == reg ? 0 : reg);
2629 else
2630 abort ();
2632 if (GET_CODE (offset) == CONST_INT)
2634 /* The base register doesn't really matter, we only want to
2635 test the index for the appropriate mode. */
2636 if (!arm_legitimate_index_p (mode, offset, 0))
2638 if (!no_new_pseudos)
2639 offset = force_reg (Pmode, offset);
2640 else
2641 abort ();
2644 if (GET_CODE (offset) == CONST_INT)
2645 return plus_constant (base, INTVAL (offset));
2648 if (GET_MODE_SIZE (mode) > 4
2649 && (GET_MODE_CLASS (mode) == MODE_INT
2650 || TARGET_SOFT_FLOAT))
2652 emit_insn (gen_addsi3 (reg, base, offset));
2653 return reg;
2656 return gen_rtx_PLUS (Pmode, base, offset);
2659 return orig;
2662 /* Generate code to load the PIC register. PROLOGUE is true if
2663 called from arm_expand_prologue (in which case we want the
2664 generated insns at the start of the function); false if called
2665 by an exception receiver that needs the PIC register reloaded
2666 (in which case the insns are just dumped at the current location). */
2667 void
2668 arm_finalize_pic (int prologue ATTRIBUTE_UNUSED)
2670 #ifndef AOF_ASSEMBLER
2671 rtx l1, pic_tmp, pic_tmp2, seq, pic_rtx;
2672 rtx global_offset_table;
2674 if (current_function_uses_pic_offset_table == 0 || TARGET_SINGLE_PIC_BASE)
2675 return;
2677 if (!flag_pic)
2678 abort ();
2680 start_sequence ();
2681 l1 = gen_label_rtx ();
2683 global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
2684 /* On the ARM the PC register contains 'dot + 8' at the time of the
2685 addition, on the Thumb it is 'dot + 4'. */
2686 pic_tmp = plus_constant (gen_rtx_LABEL_REF (Pmode, l1), TARGET_ARM ? 8 : 4);
2687 if (GOT_PCREL)
2688 pic_tmp2 = gen_rtx_CONST (VOIDmode,
2689 gen_rtx_PLUS (Pmode, global_offset_table, pc_rtx));
2690 else
2691 pic_tmp2 = gen_rtx_CONST (VOIDmode, global_offset_table);
2693 pic_rtx = gen_rtx_CONST (Pmode, gen_rtx_MINUS (Pmode, pic_tmp2, pic_tmp));
2695 if (TARGET_ARM)
2697 emit_insn (gen_pic_load_addr_arm (pic_offset_table_rtx, pic_rtx));
2698 emit_insn (gen_pic_add_dot_plus_eight (pic_offset_table_rtx, l1));
2700 else
2702 emit_insn (gen_pic_load_addr_thumb (pic_offset_table_rtx, pic_rtx));
2703 emit_insn (gen_pic_add_dot_plus_four (pic_offset_table_rtx, l1));
2706 seq = get_insns ();
2707 end_sequence ();
2708 if (prologue)
2709 emit_insn_after (seq, get_insns ());
2710 else
2711 emit_insn (seq);
2713 /* Need to emit this whether or not we obey regdecls,
2714 since setjmp/longjmp can cause life info to screw up. */
2715 emit_insn (gen_rtx_USE (VOIDmode, pic_offset_table_rtx));
2716 #endif /* AOF_ASSEMBLER */
2719 /* Return nonzero if X is valid as an ARM state addressing register. */
2720 static int
2721 arm_address_register_rtx_p (rtx x, int strict_p)
2723 int regno;
2725 if (GET_CODE (x) != REG)
2726 return 0;
2728 regno = REGNO (x);
2730 if (strict_p)
2731 return ARM_REGNO_OK_FOR_BASE_P (regno);
2733 return (regno <= LAST_ARM_REGNUM
2734 || regno >= FIRST_PSEUDO_REGISTER
2735 || regno == FRAME_POINTER_REGNUM
2736 || regno == ARG_POINTER_REGNUM);
2739 /* Return nonzero if X is a valid ARM state address operand. */
2741 arm_legitimate_address_p (enum machine_mode mode, rtx x, int strict_p)
2743 if (arm_address_register_rtx_p (x, strict_p))
2744 return 1;
2746 else if (GET_CODE (x) == POST_INC || GET_CODE (x) == PRE_DEC)
2747 return arm_address_register_rtx_p (XEXP (x, 0), strict_p);
2749 else if ((GET_CODE (x) == POST_MODIFY || GET_CODE (x) == PRE_MODIFY)
2750 && GET_MODE_SIZE (mode) <= 4
2751 && arm_address_register_rtx_p (XEXP (x, 0), strict_p)
2752 && GET_CODE (XEXP (x, 1)) == PLUS
2753 && XEXP (XEXP (x, 1), 0) == XEXP (x, 0))
2754 return arm_legitimate_index_p (mode, XEXP (XEXP (x, 1), 1), strict_p);
2756 /* After reload constants split into minipools will have addresses
2757 from a LABEL_REF. */
2758 else if (reload_completed
2759 && (GET_CODE (x) == LABEL_REF
2760 || (GET_CODE (x) == CONST
2761 && GET_CODE (XEXP (x, 0)) == PLUS
2762 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
2763 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
2764 return 1;
2766 else if (mode == TImode)
2767 return 0;
2769 else if (mode == DImode || (TARGET_SOFT_FLOAT && mode == DFmode))
2771 if (GET_CODE (x) == PLUS
2772 && arm_address_register_rtx_p (XEXP (x, 0), strict_p)
2773 && GET_CODE (XEXP (x, 1)) == CONST_INT)
2775 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
2777 if (val == 4 || val == -4 || val == -8)
2778 return 1;
2782 else if (GET_CODE (x) == PLUS)
2784 rtx xop0 = XEXP (x, 0);
2785 rtx xop1 = XEXP (x, 1);
2787 return ((arm_address_register_rtx_p (xop0, strict_p)
2788 && arm_legitimate_index_p (mode, xop1, strict_p))
2789 || (arm_address_register_rtx_p (xop1, strict_p)
2790 && arm_legitimate_index_p (mode, xop0, strict_p)));
2793 #if 0
2794 /* Reload currently can't handle MINUS, so disable this for now */
2795 else if (GET_CODE (x) == MINUS)
2797 rtx xop0 = XEXP (x, 0);
2798 rtx xop1 = XEXP (x, 1);
2800 return (arm_address_register_rtx_p (xop0, strict_p)
2801 && arm_legitimate_index_p (mode, xop1, strict_p));
2803 #endif
2805 else if (GET_MODE_CLASS (mode) != MODE_FLOAT
2806 && GET_CODE (x) == SYMBOL_REF
2807 && CONSTANT_POOL_ADDRESS_P (x)
2808 && ! (flag_pic
2809 && symbol_mentioned_p (get_pool_constant (x))))
2810 return 1;
2812 else if ((GET_CODE (x) == PRE_INC || GET_CODE (x) == POST_DEC)
2813 && (GET_MODE_SIZE (mode) <= 4)
2814 && arm_address_register_rtx_p (XEXP (x, 0), strict_p))
2815 return 1;
2817 return 0;
2820 /* Return nonzero if INDEX is valid for an address index operand in
2821 ARM state. */
2822 static int
2823 arm_legitimate_index_p (enum machine_mode mode, rtx index, int strict_p)
2825 HOST_WIDE_INT range;
2826 enum rtx_code code = GET_CODE (index);
2828 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
2829 return (code == CONST_INT && INTVAL (index) < 1024
2830 && INTVAL (index) > -1024
2831 && (INTVAL (index) & 3) == 0);
2833 if (TARGET_CIRRUS
2834 && (GET_MODE_CLASS (mode) == MODE_FLOAT || mode == DImode))
2835 return (code == CONST_INT
2836 && INTVAL (index) < 255
2837 && INTVAL (index) > -255);
2839 if (arm_address_register_rtx_p (index, strict_p)
2840 && GET_MODE_SIZE (mode) <= 4)
2841 return 1;
2843 if (TARGET_REALLY_IWMMXT && VALID_IWMMXT_REG_MODE (mode))
2844 return (code == CONST_INT
2845 && INTVAL (index) < 256
2846 && INTVAL (index) > -256);
2848 /* XXX What about ldrsb? */
2849 if (GET_MODE_SIZE (mode) <= 4 && code == MULT
2850 && (!arm_arch4 || (mode) != HImode))
2852 rtx xiop0 = XEXP (index, 0);
2853 rtx xiop1 = XEXP (index, 1);
2855 return ((arm_address_register_rtx_p (xiop0, strict_p)
2856 && power_of_two_operand (xiop1, SImode))
2857 || (arm_address_register_rtx_p (xiop1, strict_p)
2858 && power_of_two_operand (xiop0, SImode)));
2861 if (GET_MODE_SIZE (mode) <= 4
2862 && (code == LSHIFTRT || code == ASHIFTRT
2863 || code == ASHIFT || code == ROTATERT)
2864 && (!arm_arch4 || (mode) != HImode))
2866 rtx op = XEXP (index, 1);
2868 return (arm_address_register_rtx_p (XEXP (index, 0), strict_p)
2869 && GET_CODE (op) == CONST_INT
2870 && INTVAL (op) > 0
2871 && INTVAL (op) <= 31);
2874 /* XXX For ARM v4 we may be doing a sign-extend operation during the
2875 load, but that has a restricted addressing range and we are unable
2876 to tell here whether that is the case. To be safe we restrict all
2877 loads to that range. */
2878 range = ((mode) == HImode || (mode) == QImode)
2879 ? (arm_arch4 ? 256 : 4095) : 4096;
2881 return (code == CONST_INT
2882 && INTVAL (index) < range
2883 && INTVAL (index) > -range);
2886 /* Return nonzero if X is valid as a Thumb state base register. */
2887 static int
2888 thumb_base_register_rtx_p (rtx x, enum machine_mode mode, int strict_p)
2890 int regno;
2892 if (GET_CODE (x) != REG)
2893 return 0;
2895 regno = REGNO (x);
2897 if (strict_p)
2898 return THUMB_REGNO_MODE_OK_FOR_BASE_P (regno, mode);
2900 return (regno <= LAST_LO_REGNUM
2901 || regno > LAST_VIRTUAL_REGISTER
2902 || regno == FRAME_POINTER_REGNUM
2903 || (GET_MODE_SIZE (mode) >= 4
2904 && (regno == STACK_POINTER_REGNUM
2905 || regno >= FIRST_PSEUDO_REGISTER
2906 || x == hard_frame_pointer_rtx
2907 || x == arg_pointer_rtx)));
2910 /* Return nonzero if x is a legitimate index register. This is the case
2911 for any base register that can access a QImode object. */
2912 inline static int
2913 thumb_index_register_rtx_p (rtx x, int strict_p)
2915 return thumb_base_register_rtx_p (x, QImode, strict_p);
2918 /* Return nonzero if x is a legitimate Thumb-state address.
2920 The AP may be eliminated to either the SP or the FP, so we use the
2921 least common denominator, e.g. SImode, and offsets from 0 to 64.
2923 ??? Verify whether the above is the right approach.
2925 ??? Also, the FP may be eliminated to the SP, so perhaps that
2926 needs special handling also.
2928 ??? Look at how the mips16 port solves this problem. It probably uses
2929 better ways to solve some of these problems.
2931 Although it is not incorrect, we don't accept QImode and HImode
2932 addresses based on the frame pointer or arg pointer until the
2933 reload pass starts. This is so that eliminating such addresses
2934 into stack based ones won't produce impossible code. */
2936 thumb_legitimate_address_p (enum machine_mode mode, rtx x, int strict_p)
2938 /* ??? Not clear if this is right. Experiment. */
2939 if (GET_MODE_SIZE (mode) < 4
2940 && !(reload_in_progress || reload_completed)
2941 && (reg_mentioned_p (frame_pointer_rtx, x)
2942 || reg_mentioned_p (arg_pointer_rtx, x)
2943 || reg_mentioned_p (virtual_incoming_args_rtx, x)
2944 || reg_mentioned_p (virtual_outgoing_args_rtx, x)
2945 || reg_mentioned_p (virtual_stack_dynamic_rtx, x)
2946 || reg_mentioned_p (virtual_stack_vars_rtx, x)))
2947 return 0;
2949 /* Accept any base register. SP only in SImode or larger. */
2950 else if (thumb_base_register_rtx_p (x, mode, strict_p))
2951 return 1;
2953 /* This is PC relative data before arm_reorg runs. */
2954 else if (GET_MODE_SIZE (mode) >= 4 && CONSTANT_P (x)
2955 && GET_CODE (x) == SYMBOL_REF
2956 && CONSTANT_POOL_ADDRESS_P (x) && ! flag_pic)
2957 return 1;
2959 /* This is PC relative data after arm_reorg runs. */
2960 else if (GET_MODE_SIZE (mode) >= 4 && reload_completed
2961 && (GET_CODE (x) == LABEL_REF
2962 || (GET_CODE (x) == CONST
2963 && GET_CODE (XEXP (x, 0)) == PLUS
2964 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
2965 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
2966 return 1;
2968 /* Post-inc indexing only supported for SImode and larger. */
2969 else if (GET_CODE (x) == POST_INC && GET_MODE_SIZE (mode) >= 4
2970 && thumb_index_register_rtx_p (XEXP (x, 0), strict_p))
2971 return 1;
2973 else if (GET_CODE (x) == PLUS)
2975 /* REG+REG address can be any two index registers. */
2976 /* We disallow FRAME+REG addressing since we know that FRAME
2977 will be replaced with STACK, and SP relative addressing only
2978 permits SP+OFFSET. */
2979 if (GET_MODE_SIZE (mode) <= 4
2980 && XEXP (x, 0) != frame_pointer_rtx
2981 && XEXP (x, 1) != frame_pointer_rtx
2982 && thumb_index_register_rtx_p (XEXP (x, 0), strict_p)
2983 && thumb_index_register_rtx_p (XEXP (x, 1), strict_p))
2984 return 1;
2986 /* REG+const has 5-7 bit offset for non-SP registers. */
2987 else if ((thumb_index_register_rtx_p (XEXP (x, 0), strict_p)
2988 || XEXP (x, 0) == arg_pointer_rtx)
2989 && GET_CODE (XEXP (x, 1)) == CONST_INT
2990 && thumb_legitimate_offset_p (mode, INTVAL (XEXP (x, 1))))
2991 return 1;
2993 /* REG+const has 10 bit offset for SP, but only SImode and
2994 larger is supported. */
2995 /* ??? Should probably check for DI/DFmode overflow here
2996 just like GO_IF_LEGITIMATE_OFFSET does. */
2997 else if (GET_CODE (XEXP (x, 0)) == REG
2998 && REGNO (XEXP (x, 0)) == STACK_POINTER_REGNUM
2999 && GET_MODE_SIZE (mode) >= 4
3000 && GET_CODE (XEXP (x, 1)) == CONST_INT
3001 && INTVAL (XEXP (x, 1)) >= 0
3002 && INTVAL (XEXP (x, 1)) + GET_MODE_SIZE (mode) <= 1024
3003 && (INTVAL (XEXP (x, 1)) & 3) == 0)
3004 return 1;
3006 else if (GET_CODE (XEXP (x, 0)) == REG
3007 && REGNO (XEXP (x, 0)) == FRAME_POINTER_REGNUM
3008 && GET_MODE_SIZE (mode) >= 4
3009 && GET_CODE (XEXP (x, 1)) == CONST_INT
3010 && (INTVAL (XEXP (x, 1)) & 3) == 0)
3011 return 1;
3014 else if (GET_MODE_CLASS (mode) != MODE_FLOAT
3015 && GET_MODE_SIZE (mode) == 4
3016 && GET_CODE (x) == SYMBOL_REF
3017 && CONSTANT_POOL_ADDRESS_P (x)
3018 && !(flag_pic
3019 && symbol_mentioned_p (get_pool_constant (x))))
3020 return 1;
3022 return 0;
3025 /* Return nonzero if VAL can be used as an offset in a Thumb-state address
3026 instruction of mode MODE. */
3028 thumb_legitimate_offset_p (enum machine_mode mode, HOST_WIDE_INT val)
3030 switch (GET_MODE_SIZE (mode))
3032 case 1:
3033 return val >= 0 && val < 32;
3035 case 2:
3036 return val >= 0 && val < 64 && (val & 1) == 0;
3038 default:
3039 return (val >= 0
3040 && (val + GET_MODE_SIZE (mode)) <= 128
3041 && (val & 3) == 0);
3045 /* Try machine-dependent ways of modifying an illegitimate address
3046 to be legitimate. If we find one, return the new, valid address. */
3048 arm_legitimize_address (rtx x, rtx orig_x, enum machine_mode mode)
3050 if (GET_CODE (x) == PLUS)
3052 rtx xop0 = XEXP (x, 0);
3053 rtx xop1 = XEXP (x, 1);
3055 if (CONSTANT_P (xop0) && !symbol_mentioned_p (xop0))
3056 xop0 = force_reg (SImode, xop0);
3058 if (CONSTANT_P (xop1) && !symbol_mentioned_p (xop1))
3059 xop1 = force_reg (SImode, xop1);
3061 if (ARM_BASE_REGISTER_RTX_P (xop0)
3062 && GET_CODE (xop1) == CONST_INT)
3064 HOST_WIDE_INT n, low_n;
3065 rtx base_reg, val;
3066 n = INTVAL (xop1);
3068 if (mode == DImode || (TARGET_SOFT_FLOAT && mode == DFmode))
3070 low_n = n & 0x0f;
3071 n &= ~0x0f;
3072 if (low_n > 4)
3074 n += 16;
3075 low_n -= 16;
3078 else
3080 low_n = ((mode) == TImode ? 0
3081 : n >= 0 ? (n & 0xfff) : -((-n) & 0xfff));
3082 n -= low_n;
3085 base_reg = gen_reg_rtx (SImode);
3086 val = force_operand (gen_rtx_PLUS (SImode, xop0,
3087 GEN_INT (n)), NULL_RTX);
3088 emit_move_insn (base_reg, val);
3089 x = (low_n == 0 ? base_reg
3090 : gen_rtx_PLUS (SImode, base_reg, GEN_INT (low_n)));
3092 else if (xop0 != XEXP (x, 0) || xop1 != XEXP (x, 1))
3093 x = gen_rtx_PLUS (SImode, xop0, xop1);
3096 /* XXX We don't allow MINUS any more -- see comment in
3097 arm_legitimate_address_p (). */
3098 else if (GET_CODE (x) == MINUS)
3100 rtx xop0 = XEXP (x, 0);
3101 rtx xop1 = XEXP (x, 1);
3103 if (CONSTANT_P (xop0))
3104 xop0 = force_reg (SImode, xop0);
3106 if (CONSTANT_P (xop1) && ! symbol_mentioned_p (xop1))
3107 xop1 = force_reg (SImode, xop1);
3109 if (xop0 != XEXP (x, 0) || xop1 != XEXP (x, 1))
3110 x = gen_rtx_MINUS (SImode, xop0, xop1);
3113 if (flag_pic)
3115 /* We need to find and carefully transform any SYMBOL and LABEL
3116 references; so go back to the original address expression. */
3117 rtx new_x = legitimize_pic_address (orig_x, mode, NULL_RTX);
3119 if (new_x != orig_x)
3120 x = new_x;
3123 return x;
3128 #define REG_OR_SUBREG_REG(X) \
3129 (GET_CODE (X) == REG \
3130 || (GET_CODE (X) == SUBREG && GET_CODE (SUBREG_REG (X)) == REG))
3132 #define REG_OR_SUBREG_RTX(X) \
3133 (GET_CODE (X) == REG ? (X) : SUBREG_REG (X))
3135 #ifndef COSTS_N_INSNS
3136 #define COSTS_N_INSNS(N) ((N) * 4 - 2)
3137 #endif
3138 /* Worker routine for arm_rtx_costs. */
3139 static inline int
3140 arm_rtx_costs_1 (rtx x, enum rtx_code code, enum rtx_code outer)
3142 enum machine_mode mode = GET_MODE (x);
3143 enum rtx_code subcode;
3144 int extra_cost;
3146 if (TARGET_THUMB)
3148 switch (code)
3150 case ASHIFT:
3151 case ASHIFTRT:
3152 case LSHIFTRT:
3153 case ROTATERT:
3154 case PLUS:
3155 case MINUS:
3156 case COMPARE:
3157 case NEG:
3158 case NOT:
3159 return COSTS_N_INSNS (1);
3161 case MULT:
3162 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
3164 int cycles = 0;
3165 unsigned HOST_WIDE_INT i = INTVAL (XEXP (x, 1));
3167 while (i)
3169 i >>= 2;
3170 cycles++;
3172 return COSTS_N_INSNS (2) + cycles;
3174 return COSTS_N_INSNS (1) + 16;
3176 case SET:
3177 return (COSTS_N_INSNS (1)
3178 + 4 * ((GET_CODE (SET_SRC (x)) == MEM)
3179 + GET_CODE (SET_DEST (x)) == MEM));
3181 case CONST_INT:
3182 if (outer == SET)
3184 if ((unsigned HOST_WIDE_INT) INTVAL (x) < 256)
3185 return 0;
3186 if (thumb_shiftable_const (INTVAL (x)))
3187 return COSTS_N_INSNS (2);
3188 return COSTS_N_INSNS (3);
3190 else if ((outer == PLUS || outer == COMPARE)
3191 && INTVAL (x) < 256 && INTVAL (x) > -256)
3192 return 0;
3193 else if (outer == AND
3194 && INTVAL (x) < 256 && INTVAL (x) >= -256)
3195 return COSTS_N_INSNS (1);
3196 else if (outer == ASHIFT || outer == ASHIFTRT
3197 || outer == LSHIFTRT)
3198 return 0;
3199 return COSTS_N_INSNS (2);
3201 case CONST:
3202 case CONST_DOUBLE:
3203 case LABEL_REF:
3204 case SYMBOL_REF:
3205 return COSTS_N_INSNS (3);
3207 case UDIV:
3208 case UMOD:
3209 case DIV:
3210 case MOD:
3211 return 100;
3213 case TRUNCATE:
3214 return 99;
3216 case AND:
3217 case XOR:
3218 case IOR:
3219 /* XXX guess. */
3220 return 8;
3222 case ADDRESSOF:
3223 case MEM:
3224 /* XXX another guess. */
3225 /* Memory costs quite a lot for the first word, but subsequent words
3226 load at the equivalent of a single insn each. */
3227 return (10 + 4 * ((GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD)
3228 + ((GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
3229 ? 4 : 0));
3231 case IF_THEN_ELSE:
3232 /* XXX a guess. */
3233 if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
3234 return 14;
3235 return 2;
3237 case ZERO_EXTEND:
3238 /* XXX still guessing. */
3239 switch (GET_MODE (XEXP (x, 0)))
3241 case QImode:
3242 return (1 + (mode == DImode ? 4 : 0)
3243 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3245 case HImode:
3246 return (4 + (mode == DImode ? 4 : 0)
3247 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3249 case SImode:
3250 return (1 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3252 default:
3253 return 99;
3256 default:
3257 return 99;
3261 switch (code)
3263 case MEM:
3264 /* Memory costs quite a lot for the first word, but subsequent words
3265 load at the equivalent of a single insn each. */
3266 return (10 + 4 * ((GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD)
3267 + (GET_CODE (x) == SYMBOL_REF
3268 && CONSTANT_POOL_ADDRESS_P (x) ? 4 : 0));
3270 case DIV:
3271 case MOD:
3272 case UDIV:
3273 case UMOD:
3274 return optimize_size ? COSTS_N_INSNS (2) : 100;
3276 case ROTATE:
3277 if (mode == SImode && GET_CODE (XEXP (x, 1)) == REG)
3278 return 4;
3279 /* Fall through */
3280 case ROTATERT:
3281 if (mode != SImode)
3282 return 8;
3283 /* Fall through */
3284 case ASHIFT: case LSHIFTRT: case ASHIFTRT:
3285 if (mode == DImode)
3286 return (8 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : 8)
3287 + ((GET_CODE (XEXP (x, 0)) == REG
3288 || (GET_CODE (XEXP (x, 0)) == SUBREG
3289 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG))
3290 ? 0 : 8));
3291 return (1 + ((GET_CODE (XEXP (x, 0)) == REG
3292 || (GET_CODE (XEXP (x, 0)) == SUBREG
3293 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG))
3294 ? 0 : 4)
3295 + ((GET_CODE (XEXP (x, 1)) == REG
3296 || (GET_CODE (XEXP (x, 1)) == SUBREG
3297 && GET_CODE (SUBREG_REG (XEXP (x, 1))) == REG)
3298 || (GET_CODE (XEXP (x, 1)) == CONST_INT))
3299 ? 0 : 4));
3301 case MINUS:
3302 if (mode == DImode)
3303 return (4 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 8)
3304 + ((REG_OR_SUBREG_REG (XEXP (x, 0))
3305 || (GET_CODE (XEXP (x, 0)) == CONST_INT
3306 && const_ok_for_arm (INTVAL (XEXP (x, 0)))))
3307 ? 0 : 8));
3309 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
3310 return (2 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
3311 || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
3312 && const_double_rtx_ok_for_fpa (XEXP (x, 1))))
3313 ? 0 : 8)
3314 + ((REG_OR_SUBREG_REG (XEXP (x, 0))
3315 || (GET_CODE (XEXP (x, 0)) == CONST_DOUBLE
3316 && const_double_rtx_ok_for_fpa (XEXP (x, 0))))
3317 ? 0 : 8));
3319 if (((GET_CODE (XEXP (x, 0)) == CONST_INT
3320 && const_ok_for_arm (INTVAL (XEXP (x, 0)))
3321 && REG_OR_SUBREG_REG (XEXP (x, 1))))
3322 || (((subcode = GET_CODE (XEXP (x, 1))) == ASHIFT
3323 || subcode == ASHIFTRT || subcode == LSHIFTRT
3324 || subcode == ROTATE || subcode == ROTATERT
3325 || (subcode == MULT
3326 && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
3327 && ((INTVAL (XEXP (XEXP (x, 1), 1)) &
3328 (INTVAL (XEXP (XEXP (x, 1), 1)) - 1)) == 0)))
3329 && REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 0))
3330 && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 1))
3331 || GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT)
3332 && REG_OR_SUBREG_REG (XEXP (x, 0))))
3333 return 1;
3334 /* Fall through */
3336 case PLUS:
3337 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
3338 return (2 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8)
3339 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
3340 || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
3341 && const_double_rtx_ok_for_fpa (XEXP (x, 1))))
3342 ? 0 : 8));
3344 /* Fall through */
3345 case AND: case XOR: case IOR:
3346 extra_cost = 0;
3348 /* Normally the frame registers will be spilt into reg+const during
3349 reload, so it is a bad idea to combine them with other instructions,
3350 since then they might not be moved outside of loops. As a compromise
3351 we allow integration with ops that have a constant as their second
3352 operand. */
3353 if ((REG_OR_SUBREG_REG (XEXP (x, 0))
3354 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0)))
3355 && GET_CODE (XEXP (x, 1)) != CONST_INT)
3356 || (REG_OR_SUBREG_REG (XEXP (x, 0))
3357 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0)))))
3358 extra_cost = 4;
3360 if (mode == DImode)
3361 return (4 + extra_cost + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8)
3362 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
3363 || (GET_CODE (XEXP (x, 1)) == CONST_INT
3364 && const_ok_for_op (INTVAL (XEXP (x, 1)), code)))
3365 ? 0 : 8));
3367 if (REG_OR_SUBREG_REG (XEXP (x, 0)))
3368 return (1 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : extra_cost)
3369 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
3370 || (GET_CODE (XEXP (x, 1)) == CONST_INT
3371 && const_ok_for_op (INTVAL (XEXP (x, 1)), code)))
3372 ? 0 : 4));
3374 else if (REG_OR_SUBREG_REG (XEXP (x, 1)))
3375 return (1 + extra_cost
3376 + ((((subcode = GET_CODE (XEXP (x, 0))) == ASHIFT
3377 || subcode == LSHIFTRT || subcode == ASHIFTRT
3378 || subcode == ROTATE || subcode == ROTATERT
3379 || (subcode == MULT
3380 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
3381 && ((INTVAL (XEXP (XEXP (x, 0), 1)) &
3382 (INTVAL (XEXP (XEXP (x, 0), 1)) - 1)) == 0)))
3383 && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 0)))
3384 && ((REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 1)))
3385 || GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT))
3386 ? 0 : 4));
3388 return 8;
3390 case MULT:
3391 /* There is no point basing this on the tuning, since it is always the
3392 fast variant if it exists at all. */
3393 if (arm_fast_multiply && mode == DImode
3394 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
3395 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
3396 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
3397 return 8;
3399 if (GET_MODE_CLASS (mode) == MODE_FLOAT
3400 || mode == DImode)
3401 return 30;
3403 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
3405 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
3406 & (unsigned HOST_WIDE_INT) 0xffffffff);
3407 int cost, const_ok = const_ok_for_arm (i);
3408 int j, booth_unit_size;
3410 if (arm_tune_xscale)
3412 unsigned HOST_WIDE_INT masked_const;
3414 /* The cost will be related to two insns.
3415 First a load of the constant (MOV or LDR), then a multiply. */
3416 cost = 2;
3417 if (! const_ok)
3418 cost += 1; /* LDR is probably more expensive because
3419 of longer result latency. */
3420 masked_const = i & 0xffff8000;
3421 if (masked_const != 0 && masked_const != 0xffff8000)
3423 masked_const = i & 0xf8000000;
3424 if (masked_const == 0 || masked_const == 0xf8000000)
3425 cost += 1;
3426 else
3427 cost += 2;
3429 return cost;
3432 /* Tune as appropriate. */
3433 cost = const_ok ? 4 : 8;
3434 booth_unit_size = ((tune_flags & FL_FAST_MULT) ? 8 : 2);
3435 for (j = 0; i && j < 32; j += booth_unit_size)
3437 i >>= booth_unit_size;
3438 cost += 2;
3441 return cost;
3444 return (((tune_flags & FL_FAST_MULT) ? 8 : 30)
3445 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
3446 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4));
3448 case TRUNCATE:
3449 if (arm_fast_multiply && mode == SImode
3450 && GET_CODE (XEXP (x, 0)) == LSHIFTRT
3451 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
3452 && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0))
3453 == GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)))
3454 && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == ZERO_EXTEND
3455 || GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == SIGN_EXTEND))
3456 return 8;
3457 return 99;
3459 case NEG:
3460 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
3461 return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 6);
3462 /* Fall through */
3463 case NOT:
3464 if (mode == DImode)
3465 return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4);
3467 return 1 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4);
3469 case IF_THEN_ELSE:
3470 if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
3471 return 14;
3472 return 2;
3474 case COMPARE:
3475 return 1;
3477 case ABS:
3478 return 4 + (mode == DImode ? 4 : 0);
3480 case SIGN_EXTEND:
3481 if (GET_MODE (XEXP (x, 0)) == QImode)
3482 return (4 + (mode == DImode ? 4 : 0)
3483 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3484 /* Fall through */
3485 case ZERO_EXTEND:
3486 switch (GET_MODE (XEXP (x, 0)))
3488 case QImode:
3489 return (1 + (mode == DImode ? 4 : 0)
3490 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3492 case HImode:
3493 return (4 + (mode == DImode ? 4 : 0)
3494 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3496 case SImode:
3497 return (1 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3499 case V8QImode:
3500 case V4HImode:
3501 case V2SImode:
3502 case V4QImode:
3503 case V2HImode:
3504 return 1;
3506 default:
3507 break;
3509 abort ();
3511 case CONST_INT:
3512 if (const_ok_for_arm (INTVAL (x)))
3513 return outer == SET ? 2 : -1;
3514 else if (outer == AND
3515 && const_ok_for_arm (~INTVAL (x)))
3516 return -1;
3517 else if ((outer == COMPARE
3518 || outer == PLUS || outer == MINUS)
3519 && const_ok_for_arm (-INTVAL (x)))
3520 return -1;
3521 else
3522 return 5;
3524 case CONST:
3525 case LABEL_REF:
3526 case SYMBOL_REF:
3527 return 6;
3529 case CONST_DOUBLE:
3530 if (const_double_rtx_ok_for_fpa (x))
3531 return outer == SET ? 2 : -1;
3532 else if ((outer == COMPARE || outer == PLUS)
3533 && neg_const_double_rtx_ok_for_fpa (x))
3534 return -1;
3535 return 7;
3537 default:
3538 return 99;
3542 static bool
3543 arm_rtx_costs (rtx x, int code, int outer_code, int *total)
3545 *total = arm_rtx_costs_1 (x, code, outer_code);
3546 return true;
3549 /* All address computations that can be done are free, but rtx cost returns
3550 the same for practically all of them. So we weight the different types
3551 of address here in the order (most pref first):
3552 PRE/POST_INC/DEC, SHIFT or NON-INT sum, INT sum, REG, MEM or LABEL. */
3553 static inline int
3554 arm_arm_address_cost (rtx x)
3556 enum rtx_code c = GET_CODE (x);
3558 if (c == PRE_INC || c == PRE_DEC || c == POST_INC || c == POST_DEC)
3559 return 0;
3560 if (c == MEM || c == LABEL_REF || c == SYMBOL_REF)
3561 return 10;
3563 if (c == PLUS || c == MINUS)
3565 char cl0 = GET_RTX_CLASS (GET_CODE (XEXP (x, 0)));
3566 char cl1 = GET_RTX_CLASS (GET_CODE (XEXP (x, 1)));
3568 if (GET_CODE (XEXP (x, 0)) == CONST_INT)
3569 return 2;
3571 if (cl0 == '2' || cl0 == 'c' || cl1 == '2' || cl1 == 'c')
3572 return 3;
3574 return 4;
3577 return 6;
3580 static inline int
3581 arm_thumb_address_cost (rtx x)
3583 enum rtx_code c = GET_CODE (x);
3585 if (c == REG)
3586 return 1;
3587 if (c == PLUS
3588 && GET_CODE (XEXP (x, 0)) == REG
3589 && GET_CODE (XEXP (x, 1)) == CONST_INT)
3590 return 1;
3592 return 2;
3595 static int
3596 arm_address_cost (rtx x)
3598 return TARGET_ARM ? arm_arm_address_cost (x) : arm_thumb_address_cost (x);
3601 static int
3602 arm_use_dfa_pipeline_interface (void)
3604 return true;
3607 static int
3608 arm_adjust_cost (rtx insn, rtx link, rtx dep, int cost)
3610 rtx i_pat, d_pat;
3612 /* Some true dependencies can have a higher cost depending
3613 on precisely how certain input operands are used. */
3614 if (arm_tune_xscale
3615 && REG_NOTE_KIND (link) == 0
3616 && recog_memoized (insn) >= 0
3617 && recog_memoized (dep) >= 0)
3619 int shift_opnum = get_attr_shift (insn);
3620 enum attr_type attr_type = get_attr_type (dep);
3622 /* If nonzero, SHIFT_OPNUM contains the operand number of a shifted
3623 operand for INSN. If we have a shifted input operand and the
3624 instruction we depend on is another ALU instruction, then we may
3625 have to account for an additional stall. */
3626 if (shift_opnum != 0 && attr_type == TYPE_NORMAL)
3628 rtx shifted_operand;
3629 int opno;
3631 /* Get the shifted operand. */
3632 extract_insn (insn);
3633 shifted_operand = recog_data.operand[shift_opnum];
3635 /* Iterate over all the operands in DEP. If we write an operand
3636 that overlaps with SHIFTED_OPERAND, then we have increase the
3637 cost of this dependency. */
3638 extract_insn (dep);
3639 preprocess_constraints ();
3640 for (opno = 0; opno < recog_data.n_operands; opno++)
3642 /* We can ignore strict inputs. */
3643 if (recog_data.operand_type[opno] == OP_IN)
3644 continue;
3646 if (reg_overlap_mentioned_p (recog_data.operand[opno],
3647 shifted_operand))
3648 return 2;
3653 /* XXX This is not strictly true for the FPA. */
3654 if (REG_NOTE_KIND (link) == REG_DEP_ANTI
3655 || REG_NOTE_KIND (link) == REG_DEP_OUTPUT)
3656 return 0;
3658 /* Call insns don't incur a stall, even if they follow a load. */
3659 if (REG_NOTE_KIND (link) == 0
3660 && GET_CODE (insn) == CALL_INSN)
3661 return 1;
3663 if ((i_pat = single_set (insn)) != NULL
3664 && GET_CODE (SET_SRC (i_pat)) == MEM
3665 && (d_pat = single_set (dep)) != NULL
3666 && GET_CODE (SET_DEST (d_pat)) == MEM)
3668 rtx src_mem = XEXP (SET_SRC (i_pat), 0);
3669 /* This is a load after a store, there is no conflict if the load reads
3670 from a cached area. Assume that loads from the stack, and from the
3671 constant pool are cached, and that others will miss. This is a
3672 hack. */
3674 if ((GET_CODE (src_mem) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (src_mem))
3675 || reg_mentioned_p (stack_pointer_rtx, src_mem)
3676 || reg_mentioned_p (frame_pointer_rtx, src_mem)
3677 || reg_mentioned_p (hard_frame_pointer_rtx, src_mem))
3678 return 1;
3681 return cost;
3684 static int fpa_consts_inited = 0;
3686 static const char * const strings_fpa[8] =
3688 "0", "1", "2", "3",
3689 "4", "5", "0.5", "10"
3692 static REAL_VALUE_TYPE values_fpa[8];
3694 static void
3695 init_fpa_table (void)
3697 int i;
3698 REAL_VALUE_TYPE r;
3700 for (i = 0; i < 8; i++)
3702 r = REAL_VALUE_ATOF (strings_fpa[i], DFmode);
3703 values_fpa[i] = r;
3706 fpa_consts_inited = 1;
3709 /* Return TRUE if rtx X is a valid immediate FPA constant. */
3711 const_double_rtx_ok_for_fpa (rtx x)
3713 REAL_VALUE_TYPE r;
3714 int i;
3716 if (!fpa_consts_inited)
3717 init_fpa_table ();
3719 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
3720 if (REAL_VALUE_MINUS_ZERO (r))
3721 return 0;
3723 for (i = 0; i < 8; i++)
3724 if (REAL_VALUES_EQUAL (r, values_fpa[i]))
3725 return 1;
3727 return 0;
3730 /* Return TRUE if rtx X is a valid immediate FPA constant. */
3732 neg_const_double_rtx_ok_for_fpa (rtx x)
3734 REAL_VALUE_TYPE r;
3735 int i;
3737 if (!fpa_consts_inited)
3738 init_fpa_table ();
3740 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
3741 r = REAL_VALUE_NEGATE (r);
3742 if (REAL_VALUE_MINUS_ZERO (r))
3743 return 0;
3745 for (i = 0; i < 8; i++)
3746 if (REAL_VALUES_EQUAL (r, values_fpa[i]))
3747 return 1;
3749 return 0;
3752 /* Predicates for `match_operand' and `match_operator'. */
3754 /* s_register_operand is the same as register_operand, but it doesn't accept
3755 (SUBREG (MEM)...).
3757 This function exists because at the time it was put in it led to better
3758 code. SUBREG(MEM) always needs a reload in the places where
3759 s_register_operand is used, and this seemed to lead to excessive
3760 reloading. */
3762 s_register_operand (rtx op, enum machine_mode mode)
3764 if (GET_MODE (op) != mode && mode != VOIDmode)
3765 return 0;
3767 if (GET_CODE (op) == SUBREG)
3768 op = SUBREG_REG (op);
3770 /* We don't consider registers whose class is NO_REGS
3771 to be a register operand. */
3772 /* XXX might have to check for lo regs only for thumb ??? */
3773 return (GET_CODE (op) == REG
3774 && (REGNO (op) >= FIRST_PSEUDO_REGISTER
3775 || REGNO_REG_CLASS (REGNO (op)) != NO_REGS));
3778 /* A hard register operand (even before reload. */
3780 arm_hard_register_operand (rtx op, enum machine_mode mode)
3782 if (GET_MODE (op) != mode && mode != VOIDmode)
3783 return 0;
3785 return (GET_CODE (op) == REG
3786 && REGNO (op) < FIRST_PSEUDO_REGISTER);
3789 /* Only accept reg, subreg(reg), const_int. */
3791 reg_or_int_operand (rtx op, enum machine_mode mode)
3793 if (GET_CODE (op) == CONST_INT)
3794 return 1;
3796 if (GET_MODE (op) != mode && mode != VOIDmode)
3797 return 0;
3799 if (GET_CODE (op) == SUBREG)
3800 op = SUBREG_REG (op);
3802 /* We don't consider registers whose class is NO_REGS
3803 to be a register operand. */
3804 return (GET_CODE (op) == REG
3805 && (REGNO (op) >= FIRST_PSEUDO_REGISTER
3806 || REGNO_REG_CLASS (REGNO (op)) != NO_REGS));
3809 /* Return 1 if OP is an item in memory, given that we are in reload. */
3811 arm_reload_memory_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3813 int regno = true_regnum (op);
3815 return (!CONSTANT_P (op)
3816 && (regno == -1
3817 || (GET_CODE (op) == REG
3818 && REGNO (op) >= FIRST_PSEUDO_REGISTER)));
3821 /* Return 1 if OP is a valid memory address, but not valid for a signed byte
3822 memory access (architecture V4).
3823 MODE is QImode if called when computing constraints, or VOIDmode when
3824 emitting patterns. In this latter case we cannot use memory_operand()
3825 because it will fail on badly formed MEMs, which is precisely what we are
3826 trying to catch. */
3828 bad_signed_byte_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3830 if (GET_CODE (op) != MEM)
3831 return 0;
3833 op = XEXP (op, 0);
3835 /* A sum of anything more complex than reg + reg or reg + const is bad. */
3836 if ((GET_CODE (op) == PLUS || GET_CODE (op) == MINUS)
3837 && (!s_register_operand (XEXP (op, 0), VOIDmode)
3838 || (!s_register_operand (XEXP (op, 1), VOIDmode)
3839 && GET_CODE (XEXP (op, 1)) != CONST_INT)))
3840 return 1;
3842 /* Big constants are also bad. */
3843 if (GET_CODE (op) == PLUS && GET_CODE (XEXP (op, 1)) == CONST_INT
3844 && (INTVAL (XEXP (op, 1)) > 0xff
3845 || -INTVAL (XEXP (op, 1)) > 0xff))
3846 return 1;
3848 /* Everything else is good, or can will automatically be made so. */
3849 return 0;
3852 /* Return TRUE for valid operands for the rhs of an ARM instruction. */
3854 arm_rhs_operand (rtx op, enum machine_mode mode)
3856 return (s_register_operand (op, mode)
3857 || (GET_CODE (op) == CONST_INT && const_ok_for_arm (INTVAL (op))));
3860 /* Return TRUE for valid operands for the
3861 rhs of an ARM instruction, or a load. */
3863 arm_rhsm_operand (rtx op, enum machine_mode mode)
3865 return (s_register_operand (op, mode)
3866 || (GET_CODE (op) == CONST_INT && const_ok_for_arm (INTVAL (op)))
3867 || memory_operand (op, mode));
3870 /* Return TRUE for valid operands for the rhs of an ARM instruction, or if a
3871 constant that is valid when negated. */
3873 arm_add_operand (rtx op, enum machine_mode mode)
3875 if (TARGET_THUMB)
3876 return thumb_cmp_operand (op, mode);
3878 return (s_register_operand (op, mode)
3879 || (GET_CODE (op) == CONST_INT
3880 && (const_ok_for_arm (INTVAL (op))
3881 || const_ok_for_arm (-INTVAL (op)))));
3884 /* Return TRUE for valid ARM constants (or when valid if negated). */
3886 arm_addimm_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3888 return (GET_CODE (op) == CONST_INT
3889 && (const_ok_for_arm (INTVAL (op))
3890 || const_ok_for_arm (-INTVAL (op))));
3894 arm_not_operand (rtx op, enum machine_mode mode)
3896 return (s_register_operand (op, mode)
3897 || (GET_CODE (op) == CONST_INT
3898 && (const_ok_for_arm (INTVAL (op))
3899 || const_ok_for_arm (~INTVAL (op)))));
3902 /* Return TRUE if the operand is a memory reference which contains an
3903 offsettable address. */
3905 offsettable_memory_operand (rtx op, enum machine_mode mode)
3907 if (mode == VOIDmode)
3908 mode = GET_MODE (op);
3910 return (mode == GET_MODE (op)
3911 && GET_CODE (op) == MEM
3912 && offsettable_address_p (reload_completed | reload_in_progress,
3913 mode, XEXP (op, 0)));
3916 /* Return TRUE if the operand is a memory reference which is, or can be
3917 made word aligned by adjusting the offset. */
3919 alignable_memory_operand (rtx op, enum machine_mode mode)
3921 rtx reg;
3923 if (mode == VOIDmode)
3924 mode = GET_MODE (op);
3926 if (mode != GET_MODE (op) || GET_CODE (op) != MEM)
3927 return 0;
3929 op = XEXP (op, 0);
3931 return ((GET_CODE (reg = op) == REG
3932 || (GET_CODE (op) == SUBREG
3933 && GET_CODE (reg = SUBREG_REG (op)) == REG)
3934 || (GET_CODE (op) == PLUS
3935 && GET_CODE (XEXP (op, 1)) == CONST_INT
3936 && (GET_CODE (reg = XEXP (op, 0)) == REG
3937 || (GET_CODE (XEXP (op, 0)) == SUBREG
3938 && GET_CODE (reg = SUBREG_REG (XEXP (op, 0))) == REG))))
3939 && REGNO_POINTER_ALIGN (REGNO (reg)) >= 32);
3942 /* Similar to s_register_operand, but does not allow hard integer
3943 registers. */
3945 f_register_operand (rtx op, enum machine_mode mode)
3947 if (GET_MODE (op) != mode && mode != VOIDmode)
3948 return 0;
3950 if (GET_CODE (op) == SUBREG)
3951 op = SUBREG_REG (op);
3953 /* We don't consider registers whose class is NO_REGS
3954 to be a register operand. */
3955 return (GET_CODE (op) == REG
3956 && (REGNO (op) >= FIRST_PSEUDO_REGISTER
3957 || REGNO_REG_CLASS (REGNO (op)) == FPA_REGS));
3960 /* Return TRUE for valid operands for the rhs of an FPA instruction. */
3962 fpa_rhs_operand (rtx op, enum machine_mode mode)
3964 if (s_register_operand (op, mode))
3965 return TRUE;
3967 if (GET_MODE (op) != mode && mode != VOIDmode)
3968 return FALSE;
3970 if (GET_CODE (op) == CONST_DOUBLE)
3971 return const_double_rtx_ok_for_fpa (op);
3973 return FALSE;
3977 fpa_add_operand (rtx op, enum machine_mode mode)
3979 if (s_register_operand (op, mode))
3980 return TRUE;
3982 if (GET_MODE (op) != mode && mode != VOIDmode)
3983 return FALSE;
3985 if (GET_CODE (op) == CONST_DOUBLE)
3986 return (const_double_rtx_ok_for_fpa (op)
3987 || neg_const_double_rtx_ok_for_fpa (op));
3989 return FALSE;
3992 /* Return nonzero if OP is a valid Cirrus memory address pattern. */
3994 cirrus_memory_offset (rtx op)
3996 /* Reject eliminable registers. */
3997 if (! (reload_in_progress || reload_completed)
3998 && ( reg_mentioned_p (frame_pointer_rtx, op)
3999 || reg_mentioned_p (arg_pointer_rtx, op)
4000 || reg_mentioned_p (virtual_incoming_args_rtx, op)
4001 || reg_mentioned_p (virtual_outgoing_args_rtx, op)
4002 || reg_mentioned_p (virtual_stack_dynamic_rtx, op)
4003 || reg_mentioned_p (virtual_stack_vars_rtx, op)))
4004 return 0;
4006 if (GET_CODE (op) == MEM)
4008 rtx ind;
4010 ind = XEXP (op, 0);
4012 /* Match: (mem (reg)). */
4013 if (GET_CODE (ind) == REG)
4014 return 1;
4016 /* Match:
4017 (mem (plus (reg)
4018 (const))). */
4019 if (GET_CODE (ind) == PLUS
4020 && GET_CODE (XEXP (ind, 0)) == REG
4021 && REG_MODE_OK_FOR_BASE_P (XEXP (ind, 0), VOIDmode)
4022 && GET_CODE (XEXP (ind, 1)) == CONST_INT)
4023 return 1;
4026 return 0;
4029 /* Return nonzero if OP is a Cirrus or general register. */
4031 cirrus_register_operand (rtx op, enum machine_mode mode)
4033 if (GET_MODE (op) != mode && mode != VOIDmode)
4034 return FALSE;
4036 if (GET_CODE (op) == SUBREG)
4037 op = SUBREG_REG (op);
4039 return (GET_CODE (op) == REG
4040 && (REGNO_REG_CLASS (REGNO (op)) == CIRRUS_REGS
4041 || REGNO_REG_CLASS (REGNO (op)) == GENERAL_REGS));
4044 /* Return nonzero if OP is a cirrus FP register. */
4046 cirrus_fp_register (rtx op, enum machine_mode mode)
4048 if (GET_MODE (op) != mode && mode != VOIDmode)
4049 return FALSE;
4051 if (GET_CODE (op) == SUBREG)
4052 op = SUBREG_REG (op);
4054 return (GET_CODE (op) == REG
4055 && (REGNO (op) >= FIRST_PSEUDO_REGISTER
4056 || REGNO_REG_CLASS (REGNO (op)) == CIRRUS_REGS));
4059 /* Return nonzero if OP is a 6bit constant (0..63). */
4061 cirrus_shift_const (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
4063 return (GET_CODE (op) == CONST_INT
4064 && INTVAL (op) >= 0
4065 && INTVAL (op) < 64);
4068 /* Returns TRUE if INSN is an "LDR REG, ADDR" instruction.
4069 Use by the Cirrus Maverick code which has to workaround
4070 a hardware bug triggered by such instructions. */
4071 static bool
4072 arm_memory_load_p (rtx insn)
4074 rtx body, lhs, rhs;;
4076 if (insn == NULL_RTX || GET_CODE (insn) != INSN)
4077 return false;
4079 body = PATTERN (insn);
4081 if (GET_CODE (body) != SET)
4082 return false;
4084 lhs = XEXP (body, 0);
4085 rhs = XEXP (body, 1);
4087 lhs = REG_OR_SUBREG_RTX (lhs);
4089 /* If the destination is not a general purpose
4090 register we do not have to worry. */
4091 if (GET_CODE (lhs) != REG
4092 || REGNO_REG_CLASS (REGNO (lhs)) != GENERAL_REGS)
4093 return false;
4095 /* As well as loads from memory we also have to react
4096 to loads of invalid constants which will be turned
4097 into loads from the minipool. */
4098 return (GET_CODE (rhs) == MEM
4099 || GET_CODE (rhs) == SYMBOL_REF
4100 || note_invalid_constants (insn, -1, false));
4103 /* Return TRUE if INSN is a Cirrus instruction. */
4104 static bool
4105 arm_cirrus_insn_p (rtx insn)
4107 enum attr_cirrus attr;
4109 /* get_attr aborts on USE and CLOBBER. */
4110 if (!insn
4111 || GET_CODE (insn) != INSN
4112 || GET_CODE (PATTERN (insn)) == USE
4113 || GET_CODE (PATTERN (insn)) == CLOBBER)
4114 return 0;
4116 attr = get_attr_cirrus (insn);
4118 return attr != CIRRUS_NOT;
4121 /* Cirrus reorg for invalid instruction combinations. */
4122 static void
4123 cirrus_reorg (rtx first)
4125 enum attr_cirrus attr;
4126 rtx body = PATTERN (first);
4127 rtx t;
4128 int nops;
4130 /* Any branch must be followed by 2 non Cirrus instructions. */
4131 if (GET_CODE (first) == JUMP_INSN && GET_CODE (body) != RETURN)
4133 nops = 0;
4134 t = next_nonnote_insn (first);
4136 if (arm_cirrus_insn_p (t))
4137 ++ nops;
4139 if (arm_cirrus_insn_p (next_nonnote_insn (t)))
4140 ++ nops;
4142 while (nops --)
4143 emit_insn_after (gen_nop (), first);
4145 return;
4148 /* (float (blah)) is in parallel with a clobber. */
4149 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) > 0)
4150 body = XVECEXP (body, 0, 0);
4152 if (GET_CODE (body) == SET)
4154 rtx lhs = XEXP (body, 0), rhs = XEXP (body, 1);
4156 /* cfldrd, cfldr64, cfstrd, cfstr64 must
4157 be followed by a non Cirrus insn. */
4158 if (get_attr_cirrus (first) == CIRRUS_DOUBLE)
4160 if (arm_cirrus_insn_p (next_nonnote_insn (first)))
4161 emit_insn_after (gen_nop (), first);
4163 return;
4165 else if (arm_memory_load_p (first))
4167 unsigned int arm_regno;
4169 /* Any ldr/cfmvdlr, ldr/cfmvdhr, ldr/cfmvsr, ldr/cfmv64lr,
4170 ldr/cfmv64hr combination where the Rd field is the same
4171 in both instructions must be split with a non Cirrus
4172 insn. Example:
4174 ldr r0, blah
4176 cfmvsr mvf0, r0. */
4178 /* Get Arm register number for ldr insn. */
4179 if (GET_CODE (lhs) == REG)
4180 arm_regno = REGNO (lhs);
4181 else if (GET_CODE (rhs) == REG)
4182 arm_regno = REGNO (rhs);
4183 else
4184 abort ();
4186 /* Next insn. */
4187 first = next_nonnote_insn (first);
4189 if (! arm_cirrus_insn_p (first))
4190 return;
4192 body = PATTERN (first);
4194 /* (float (blah)) is in parallel with a clobber. */
4195 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0))
4196 body = XVECEXP (body, 0, 0);
4198 if (GET_CODE (body) == FLOAT)
4199 body = XEXP (body, 0);
4201 if (get_attr_cirrus (first) == CIRRUS_MOVE
4202 && GET_CODE (XEXP (body, 1)) == REG
4203 && arm_regno == REGNO (XEXP (body, 1)))
4204 emit_insn_after (gen_nop (), first);
4206 return;
4210 /* get_attr aborts on USE and CLOBBER. */
4211 if (!first
4212 || GET_CODE (first) != INSN
4213 || GET_CODE (PATTERN (first)) == USE
4214 || GET_CODE (PATTERN (first)) == CLOBBER)
4215 return;
4217 attr = get_attr_cirrus (first);
4219 /* Any coprocessor compare instruction (cfcmps, cfcmpd, ...)
4220 must be followed by a non-coprocessor instruction. */
4221 if (attr == CIRRUS_COMPARE)
4223 nops = 0;
4225 t = next_nonnote_insn (first);
4227 if (arm_cirrus_insn_p (t))
4228 ++ nops;
4230 if (arm_cirrus_insn_p (next_nonnote_insn (t)))
4231 ++ nops;
4233 while (nops --)
4234 emit_insn_after (gen_nop (), first);
4236 return;
4240 /* Return nonzero if OP is a constant power of two. */
4242 power_of_two_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
4244 if (GET_CODE (op) == CONST_INT)
4246 HOST_WIDE_INT value = INTVAL (op);
4248 return value != 0 && (value & (value - 1)) == 0;
4251 return FALSE;
4254 /* Return TRUE for a valid operand of a DImode operation.
4255 Either: REG, SUBREG, CONST_DOUBLE or MEM(DImode_address).
4256 Note that this disallows MEM(REG+REG), but allows
4257 MEM(PRE/POST_INC/DEC(REG)). */
4259 di_operand (rtx op, enum machine_mode mode)
4261 if (s_register_operand (op, mode))
4262 return TRUE;
4264 if (mode != VOIDmode && GET_MODE (op) != VOIDmode && GET_MODE (op) != DImode)
4265 return FALSE;
4267 if (GET_CODE (op) == SUBREG)
4268 op = SUBREG_REG (op);
4270 switch (GET_CODE (op))
4272 case CONST_DOUBLE:
4273 case CONST_INT:
4274 return TRUE;
4276 case MEM:
4277 return memory_address_p (DImode, XEXP (op, 0));
4279 default:
4280 return FALSE;
4284 /* Like di_operand, but don't accept constants. */
4286 nonimmediate_di_operand (rtx op, enum machine_mode mode)
4288 if (s_register_operand (op, mode))
4289 return TRUE;
4291 if (mode != VOIDmode && GET_MODE (op) != VOIDmode && GET_MODE (op) != DImode)
4292 return FALSE;
4294 if (GET_CODE (op) == SUBREG)
4295 op = SUBREG_REG (op);
4297 if (GET_CODE (op) == MEM)
4298 return memory_address_p (DImode, XEXP (op, 0));
4300 return FALSE;
4303 /* Return TRUE for a valid operand of a DFmode operation when -msoft-float.
4304 Either: REG, SUBREG, CONST_DOUBLE or MEM(DImode_address).
4305 Note that this disallows MEM(REG+REG), but allows
4306 MEM(PRE/POST_INC/DEC(REG)). */
4308 soft_df_operand (rtx op, enum machine_mode mode)
4310 if (s_register_operand (op, mode))
4311 return TRUE;
4313 if (mode != VOIDmode && GET_MODE (op) != mode)
4314 return FALSE;
4316 if (GET_CODE (op) == SUBREG && CONSTANT_P (SUBREG_REG (op)))
4317 return FALSE;
4319 if (GET_CODE (op) == SUBREG)
4320 op = SUBREG_REG (op);
4322 switch (GET_CODE (op))
4324 case CONST_DOUBLE:
4325 return TRUE;
4327 case MEM:
4328 return memory_address_p (DFmode, XEXP (op, 0));
4330 default:
4331 return FALSE;
4335 /* Like soft_df_operand, but don't accept constants. */
4337 nonimmediate_soft_df_operand (rtx op, enum machine_mode mode)
4339 if (s_register_operand (op, mode))
4340 return TRUE;
4342 if (mode != VOIDmode && GET_MODE (op) != mode)
4343 return FALSE;
4345 if (GET_CODE (op) == SUBREG)
4346 op = SUBREG_REG (op);
4348 if (GET_CODE (op) == MEM)
4349 return memory_address_p (DFmode, XEXP (op, 0));
4350 return FALSE;
4353 /* Return TRUE for valid index operands. */
4355 index_operand (rtx op, enum machine_mode mode)
4357 return (s_register_operand (op, mode)
4358 || (immediate_operand (op, mode)
4359 && (GET_CODE (op) != CONST_INT
4360 || (INTVAL (op) < 4096 && INTVAL (op) > -4096))));
4363 /* Return TRUE for valid shifts by a constant. This also accepts any
4364 power of two on the (somewhat overly relaxed) assumption that the
4365 shift operator in this case was a mult. */
4367 const_shift_operand (rtx op, enum machine_mode mode)
4369 return (power_of_two_operand (op, mode)
4370 || (immediate_operand (op, mode)
4371 && (GET_CODE (op) != CONST_INT
4372 || (INTVAL (op) < 32 && INTVAL (op) > 0))));
4375 /* Return TRUE for arithmetic operators which can be combined with a multiply
4376 (shift). */
4378 shiftable_operator (rtx x, enum machine_mode mode)
4380 enum rtx_code code;
4382 if (GET_MODE (x) != mode)
4383 return FALSE;
4385 code = GET_CODE (x);
4387 return (code == PLUS || code == MINUS
4388 || code == IOR || code == XOR || code == AND);
4391 /* Return TRUE for binary logical operators. */
4393 logical_binary_operator (rtx x, enum machine_mode mode)
4395 enum rtx_code code;
4397 if (GET_MODE (x) != mode)
4398 return FALSE;
4400 code = GET_CODE (x);
4402 return (code == IOR || code == XOR || code == AND);
4405 /* Return TRUE for shift operators. */
4407 shift_operator (rtx x,enum machine_mode mode)
4409 enum rtx_code code;
4411 if (GET_MODE (x) != mode)
4412 return FALSE;
4414 code = GET_CODE (x);
4416 if (code == MULT)
4417 return power_of_two_operand (XEXP (x, 1), mode);
4419 return (code == ASHIFT || code == ASHIFTRT || code == LSHIFTRT
4420 || code == ROTATERT);
4423 /* Return TRUE if x is EQ or NE. */
4425 equality_operator (rtx x, enum machine_mode mode ATTRIBUTE_UNUSED)
4427 return GET_CODE (x) == EQ || GET_CODE (x) == NE;
4430 /* Return TRUE if x is a comparison operator other than LTGT or UNEQ. */
4432 arm_comparison_operator (rtx x, enum machine_mode mode)
4434 return (comparison_operator (x, mode)
4435 && GET_CODE (x) != LTGT
4436 && GET_CODE (x) != UNEQ);
4439 /* Return TRUE for SMIN SMAX UMIN UMAX operators. */
4441 minmax_operator (rtx x, enum machine_mode mode)
4443 enum rtx_code code = GET_CODE (x);
4445 if (GET_MODE (x) != mode)
4446 return FALSE;
4448 return code == SMIN || code == SMAX || code == UMIN || code == UMAX;
4451 /* Return TRUE if this is the condition code register, if we aren't given
4452 a mode, accept any class CCmode register. */
4454 cc_register (rtx x, enum machine_mode mode)
4456 if (mode == VOIDmode)
4458 mode = GET_MODE (x);
4460 if (GET_MODE_CLASS (mode) != MODE_CC)
4461 return FALSE;
4464 if ( GET_MODE (x) == mode
4465 && GET_CODE (x) == REG
4466 && REGNO (x) == CC_REGNUM)
4467 return TRUE;
4469 return FALSE;
4472 /* Return TRUE if this is the condition code register, if we aren't given
4473 a mode, accept any class CCmode register which indicates a dominance
4474 expression. */
4476 dominant_cc_register (rtx x, enum machine_mode mode)
4478 if (mode == VOIDmode)
4480 mode = GET_MODE (x);
4482 if (GET_MODE_CLASS (mode) != MODE_CC)
4483 return FALSE;
4486 if (mode != CC_DNEmode && mode != CC_DEQmode
4487 && mode != CC_DLEmode && mode != CC_DLTmode
4488 && mode != CC_DGEmode && mode != CC_DGTmode
4489 && mode != CC_DLEUmode && mode != CC_DLTUmode
4490 && mode != CC_DGEUmode && mode != CC_DGTUmode)
4491 return FALSE;
4493 return cc_register (x, mode);
4496 /* Return TRUE if X references a SYMBOL_REF. */
4498 symbol_mentioned_p (rtx x)
4500 const char * fmt;
4501 int i;
4503 if (GET_CODE (x) == SYMBOL_REF)
4504 return 1;
4506 fmt = GET_RTX_FORMAT (GET_CODE (x));
4508 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
4510 if (fmt[i] == 'E')
4512 int j;
4514 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
4515 if (symbol_mentioned_p (XVECEXP (x, i, j)))
4516 return 1;
4518 else if (fmt[i] == 'e' && symbol_mentioned_p (XEXP (x, i)))
4519 return 1;
4522 return 0;
4525 /* Return TRUE if X references a LABEL_REF. */
4527 label_mentioned_p (rtx x)
4529 const char * fmt;
4530 int i;
4532 if (GET_CODE (x) == LABEL_REF)
4533 return 1;
4535 fmt = GET_RTX_FORMAT (GET_CODE (x));
4536 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
4538 if (fmt[i] == 'E')
4540 int j;
4542 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
4543 if (label_mentioned_p (XVECEXP (x, i, j)))
4544 return 1;
4546 else if (fmt[i] == 'e' && label_mentioned_p (XEXP (x, i)))
4547 return 1;
4550 return 0;
4553 enum rtx_code
4554 minmax_code (rtx x)
4556 enum rtx_code code = GET_CODE (x);
4558 if (code == SMAX)
4559 return GE;
4560 else if (code == SMIN)
4561 return LE;
4562 else if (code == UMIN)
4563 return LEU;
4564 else if (code == UMAX)
4565 return GEU;
4567 abort ();
4570 /* Return 1 if memory locations are adjacent. */
4572 adjacent_mem_locations (rtx a, rtx b)
4574 if ((GET_CODE (XEXP (a, 0)) == REG
4575 || (GET_CODE (XEXP (a, 0)) == PLUS
4576 && GET_CODE (XEXP (XEXP (a, 0), 1)) == CONST_INT))
4577 && (GET_CODE (XEXP (b, 0)) == REG
4578 || (GET_CODE (XEXP (b, 0)) == PLUS
4579 && GET_CODE (XEXP (XEXP (b, 0), 1)) == CONST_INT)))
4581 int val0 = 0, val1 = 0;
4582 int reg0, reg1;
4584 if (GET_CODE (XEXP (a, 0)) == PLUS)
4586 reg0 = REGNO (XEXP (XEXP (a, 0), 0));
4587 val0 = INTVAL (XEXP (XEXP (a, 0), 1));
4589 else
4590 reg0 = REGNO (XEXP (a, 0));
4592 if (GET_CODE (XEXP (b, 0)) == PLUS)
4594 reg1 = REGNO (XEXP (XEXP (b, 0), 0));
4595 val1 = INTVAL (XEXP (XEXP (b, 0), 1));
4597 else
4598 reg1 = REGNO (XEXP (b, 0));
4600 /* Don't accept any offset that will require multiple
4601 instructions to handle, since this would cause the
4602 arith_adjacentmem pattern to output an overlong sequence. */
4603 if (!const_ok_for_op (PLUS, val0) || !const_ok_for_op (PLUS, val1))
4604 return 0;
4606 return (reg0 == reg1) && ((val1 - val0) == 4 || (val0 - val1) == 4);
4608 return 0;
4611 /* Return 1 if OP is a load multiple operation. It is known to be
4612 parallel and the first section will be tested. */
4614 load_multiple_operation (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
4616 HOST_WIDE_INT count = XVECLEN (op, 0);
4617 int dest_regno;
4618 rtx src_addr;
4619 HOST_WIDE_INT i = 1, base = 0;
4620 rtx elt;
4622 if (count <= 1
4623 || GET_CODE (XVECEXP (op, 0, 0)) != SET)
4624 return 0;
4626 /* Check to see if this might be a write-back. */
4627 if (GET_CODE (SET_SRC (elt = XVECEXP (op, 0, 0))) == PLUS)
4629 i++;
4630 base = 1;
4632 /* Now check it more carefully. */
4633 if (GET_CODE (SET_DEST (elt)) != REG
4634 || GET_CODE (XEXP (SET_SRC (elt), 0)) != REG
4635 || REGNO (XEXP (SET_SRC (elt), 0)) != REGNO (SET_DEST (elt))
4636 || GET_CODE (XEXP (SET_SRC (elt), 1)) != CONST_INT
4637 || INTVAL (XEXP (SET_SRC (elt), 1)) != (count - 1) * 4)
4638 return 0;
4641 /* Perform a quick check so we don't blow up below. */
4642 if (count <= i
4643 || GET_CODE (XVECEXP (op, 0, i - 1)) != SET
4644 || GET_CODE (SET_DEST (XVECEXP (op, 0, i - 1))) != REG
4645 || GET_CODE (SET_SRC (XVECEXP (op, 0, i - 1))) != MEM)
4646 return 0;
4648 dest_regno = REGNO (SET_DEST (XVECEXP (op, 0, i - 1)));
4649 src_addr = XEXP (SET_SRC (XVECEXP (op, 0, i - 1)), 0);
4651 for (; i < count; i++)
4653 elt = XVECEXP (op, 0, i);
4655 if (GET_CODE (elt) != SET
4656 || GET_CODE (SET_DEST (elt)) != REG
4657 || GET_MODE (SET_DEST (elt)) != SImode
4658 || REGNO (SET_DEST (elt)) != (unsigned int)(dest_regno + i - base)
4659 || GET_CODE (SET_SRC (elt)) != MEM
4660 || GET_MODE (SET_SRC (elt)) != SImode
4661 || GET_CODE (XEXP (SET_SRC (elt), 0)) != PLUS
4662 || !rtx_equal_p (XEXP (XEXP (SET_SRC (elt), 0), 0), src_addr)
4663 || GET_CODE (XEXP (XEXP (SET_SRC (elt), 0), 1)) != CONST_INT
4664 || INTVAL (XEXP (XEXP (SET_SRC (elt), 0), 1)) != (i - base) * 4)
4665 return 0;
4668 return 1;
4671 /* Return 1 if OP is a store multiple operation. It is known to be
4672 parallel and the first section will be tested. */
4674 store_multiple_operation (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
4676 HOST_WIDE_INT count = XVECLEN (op, 0);
4677 int src_regno;
4678 rtx dest_addr;
4679 HOST_WIDE_INT i = 1, base = 0;
4680 rtx elt;
4682 if (count <= 1
4683 || GET_CODE (XVECEXP (op, 0, 0)) != SET)
4684 return 0;
4686 /* Check to see if this might be a write-back. */
4687 if (GET_CODE (SET_SRC (elt = XVECEXP (op, 0, 0))) == PLUS)
4689 i++;
4690 base = 1;
4692 /* Now check it more carefully. */
4693 if (GET_CODE (SET_DEST (elt)) != REG
4694 || GET_CODE (XEXP (SET_SRC (elt), 0)) != REG
4695 || REGNO (XEXP (SET_SRC (elt), 0)) != REGNO (SET_DEST (elt))
4696 || GET_CODE (XEXP (SET_SRC (elt), 1)) != CONST_INT
4697 || INTVAL (XEXP (SET_SRC (elt), 1)) != (count - 1) * 4)
4698 return 0;
4701 /* Perform a quick check so we don't blow up below. */
4702 if (count <= i
4703 || GET_CODE (XVECEXP (op, 0, i - 1)) != SET
4704 || GET_CODE (SET_DEST (XVECEXP (op, 0, i - 1))) != MEM
4705 || GET_CODE (SET_SRC (XVECEXP (op, 0, i - 1))) != REG)
4706 return 0;
4708 src_regno = REGNO (SET_SRC (XVECEXP (op, 0, i - 1)));
4709 dest_addr = XEXP (SET_DEST (XVECEXP (op, 0, i - 1)), 0);
4711 for (; i < count; i++)
4713 elt = XVECEXP (op, 0, i);
4715 if (GET_CODE (elt) != SET
4716 || GET_CODE (SET_SRC (elt)) != REG
4717 || GET_MODE (SET_SRC (elt)) != SImode
4718 || REGNO (SET_SRC (elt)) != (unsigned int)(src_regno + i - base)
4719 || GET_CODE (SET_DEST (elt)) != MEM
4720 || GET_MODE (SET_DEST (elt)) != SImode
4721 || GET_CODE (XEXP (SET_DEST (elt), 0)) != PLUS
4722 || !rtx_equal_p (XEXP (XEXP (SET_DEST (elt), 0), 0), dest_addr)
4723 || GET_CODE (XEXP (XEXP (SET_DEST (elt), 0), 1)) != CONST_INT
4724 || INTVAL (XEXP (XEXP (SET_DEST (elt), 0), 1)) != (i - base) * 4)
4725 return 0;
4728 return 1;
4732 load_multiple_sequence (rtx *operands, int nops, int *regs, int *base,
4733 HOST_WIDE_INT *load_offset)
4735 int unsorted_regs[4];
4736 HOST_WIDE_INT unsorted_offsets[4];
4737 int order[4];
4738 int base_reg = -1;
4739 int i;
4741 /* Can only handle 2, 3, or 4 insns at present,
4742 though could be easily extended if required. */
4743 if (nops < 2 || nops > 4)
4744 abort ();
4746 /* Loop over the operands and check that the memory references are
4747 suitable (ie immediate offsets from the same base register). At
4748 the same time, extract the target register, and the memory
4749 offsets. */
4750 for (i = 0; i < nops; i++)
4752 rtx reg;
4753 rtx offset;
4755 /* Convert a subreg of a mem into the mem itself. */
4756 if (GET_CODE (operands[nops + i]) == SUBREG)
4757 operands[nops + i] = alter_subreg (operands + (nops + i));
4759 if (GET_CODE (operands[nops + i]) != MEM)
4760 abort ();
4762 /* Don't reorder volatile memory references; it doesn't seem worth
4763 looking for the case where the order is ok anyway. */
4764 if (MEM_VOLATILE_P (operands[nops + i]))
4765 return 0;
4767 offset = const0_rtx;
4769 if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG
4770 || (GET_CODE (reg) == SUBREG
4771 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
4772 || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
4773 && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0))
4774 == REG)
4775 || (GET_CODE (reg) == SUBREG
4776 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
4777 && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1))
4778 == CONST_INT)))
4780 if (i == 0)
4782 base_reg = REGNO (reg);
4783 unsorted_regs[0] = (GET_CODE (operands[i]) == REG
4784 ? REGNO (operands[i])
4785 : REGNO (SUBREG_REG (operands[i])));
4786 order[0] = 0;
4788 else
4790 if (base_reg != (int) REGNO (reg))
4791 /* Not addressed from the same base register. */
4792 return 0;
4794 unsorted_regs[i] = (GET_CODE (operands[i]) == REG
4795 ? REGNO (operands[i])
4796 : REGNO (SUBREG_REG (operands[i])));
4797 if (unsorted_regs[i] < unsorted_regs[order[0]])
4798 order[0] = i;
4801 /* If it isn't an integer register, or if it overwrites the
4802 base register but isn't the last insn in the list, then
4803 we can't do this. */
4804 if (unsorted_regs[i] < 0 || unsorted_regs[i] > 14
4805 || (i != nops - 1 && unsorted_regs[i] == base_reg))
4806 return 0;
4808 unsorted_offsets[i] = INTVAL (offset);
4810 else
4811 /* Not a suitable memory address. */
4812 return 0;
4815 /* All the useful information has now been extracted from the
4816 operands into unsorted_regs and unsorted_offsets; additionally,
4817 order[0] has been set to the lowest numbered register in the
4818 list. Sort the registers into order, and check that the memory
4819 offsets are ascending and adjacent. */
4821 for (i = 1; i < nops; i++)
4823 int j;
4825 order[i] = order[i - 1];
4826 for (j = 0; j < nops; j++)
4827 if (unsorted_regs[j] > unsorted_regs[order[i - 1]]
4828 && (order[i] == order[i - 1]
4829 || unsorted_regs[j] < unsorted_regs[order[i]]))
4830 order[i] = j;
4832 /* Have we found a suitable register? if not, one must be used more
4833 than once. */
4834 if (order[i] == order[i - 1])
4835 return 0;
4837 /* Is the memory address adjacent and ascending? */
4838 if (unsorted_offsets[order[i]] != unsorted_offsets[order[i - 1]] + 4)
4839 return 0;
4842 if (base)
4844 *base = base_reg;
4846 for (i = 0; i < nops; i++)
4847 regs[i] = unsorted_regs[order[i]];
4849 *load_offset = unsorted_offsets[order[0]];
4852 if (unsorted_offsets[order[0]] == 0)
4853 return 1; /* ldmia */
4855 if (unsorted_offsets[order[0]] == 4)
4856 return 2; /* ldmib */
4858 if (unsorted_offsets[order[nops - 1]] == 0)
4859 return 3; /* ldmda */
4861 if (unsorted_offsets[order[nops - 1]] == -4)
4862 return 4; /* ldmdb */
4864 /* For ARM8,9 & StrongARM, 2 ldr instructions are faster than an ldm
4865 if the offset isn't small enough. The reason 2 ldrs are faster
4866 is because these ARMs are able to do more than one cache access
4867 in a single cycle. The ARM9 and StrongARM have Harvard caches,
4868 whilst the ARM8 has a double bandwidth cache. This means that
4869 these cores can do both an instruction fetch and a data fetch in
4870 a single cycle, so the trick of calculating the address into a
4871 scratch register (one of the result regs) and then doing a load
4872 multiple actually becomes slower (and no smaller in code size).
4873 That is the transformation
4875 ldr rd1, [rbase + offset]
4876 ldr rd2, [rbase + offset + 4]
4880 add rd1, rbase, offset
4881 ldmia rd1, {rd1, rd2}
4883 produces worse code -- '3 cycles + any stalls on rd2' instead of
4884 '2 cycles + any stalls on rd2'. On ARMs with only one cache
4885 access per cycle, the first sequence could never complete in less
4886 than 6 cycles, whereas the ldm sequence would only take 5 and
4887 would make better use of sequential accesses if not hitting the
4888 cache.
4890 We cheat here and test 'arm_ld_sched' which we currently know to
4891 only be true for the ARM8, ARM9 and StrongARM. If this ever
4892 changes, then the test below needs to be reworked. */
4893 if (nops == 2 && arm_ld_sched)
4894 return 0;
4896 /* Can't do it without setting up the offset, only do this if it takes
4897 no more than one insn. */
4898 return (const_ok_for_arm (unsorted_offsets[order[0]])
4899 || const_ok_for_arm (-unsorted_offsets[order[0]])) ? 5 : 0;
4902 const char *
4903 emit_ldm_seq (rtx *operands, int nops)
4905 int regs[4];
4906 int base_reg;
4907 HOST_WIDE_INT offset;
4908 char buf[100];
4909 int i;
4911 switch (load_multiple_sequence (operands, nops, regs, &base_reg, &offset))
4913 case 1:
4914 strcpy (buf, "ldm%?ia\t");
4915 break;
4917 case 2:
4918 strcpy (buf, "ldm%?ib\t");
4919 break;
4921 case 3:
4922 strcpy (buf, "ldm%?da\t");
4923 break;
4925 case 4:
4926 strcpy (buf, "ldm%?db\t");
4927 break;
4929 case 5:
4930 if (offset >= 0)
4931 sprintf (buf, "add%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX,
4932 reg_names[regs[0]], REGISTER_PREFIX, reg_names[base_reg],
4933 (long) offset);
4934 else
4935 sprintf (buf, "sub%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX,
4936 reg_names[regs[0]], REGISTER_PREFIX, reg_names[base_reg],
4937 (long) -offset);
4938 output_asm_insn (buf, operands);
4939 base_reg = regs[0];
4940 strcpy (buf, "ldm%?ia\t");
4941 break;
4943 default:
4944 abort ();
4947 sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
4948 reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
4950 for (i = 1; i < nops; i++)
4951 sprintf (buf + strlen (buf), ", %s%s", REGISTER_PREFIX,
4952 reg_names[regs[i]]);
4954 strcat (buf, "}\t%@ phole ldm");
4956 output_asm_insn (buf, operands);
4957 return "";
4961 store_multiple_sequence (rtx *operands, int nops, int *regs, int *base,
4962 HOST_WIDE_INT * load_offset)
4964 int unsorted_regs[4];
4965 HOST_WIDE_INT unsorted_offsets[4];
4966 int order[4];
4967 int base_reg = -1;
4968 int i;
4970 /* Can only handle 2, 3, or 4 insns at present, though could be easily
4971 extended if required. */
4972 if (nops < 2 || nops > 4)
4973 abort ();
4975 /* Loop over the operands and check that the memory references are
4976 suitable (ie immediate offsets from the same base register). At
4977 the same time, extract the target register, and the memory
4978 offsets. */
4979 for (i = 0; i < nops; i++)
4981 rtx reg;
4982 rtx offset;
4984 /* Convert a subreg of a mem into the mem itself. */
4985 if (GET_CODE (operands[nops + i]) == SUBREG)
4986 operands[nops + i] = alter_subreg (operands + (nops + i));
4988 if (GET_CODE (operands[nops + i]) != MEM)
4989 abort ();
4991 /* Don't reorder volatile memory references; it doesn't seem worth
4992 looking for the case where the order is ok anyway. */
4993 if (MEM_VOLATILE_P (operands[nops + i]))
4994 return 0;
4996 offset = const0_rtx;
4998 if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG
4999 || (GET_CODE (reg) == SUBREG
5000 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5001 || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
5002 && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0))
5003 == REG)
5004 || (GET_CODE (reg) == SUBREG
5005 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5006 && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1))
5007 == CONST_INT)))
5009 if (i == 0)
5011 base_reg = REGNO (reg);
5012 unsorted_regs[0] = (GET_CODE (operands[i]) == REG
5013 ? REGNO (operands[i])
5014 : REGNO (SUBREG_REG (operands[i])));
5015 order[0] = 0;
5017 else
5019 if (base_reg != (int) REGNO (reg))
5020 /* Not addressed from the same base register. */
5021 return 0;
5023 unsorted_regs[i] = (GET_CODE (operands[i]) == REG
5024 ? REGNO (operands[i])
5025 : REGNO (SUBREG_REG (operands[i])));
5026 if (unsorted_regs[i] < unsorted_regs[order[0]])
5027 order[0] = i;
5030 /* If it isn't an integer register, then we can't do this. */
5031 if (unsorted_regs[i] < 0 || unsorted_regs[i] > 14)
5032 return 0;
5034 unsorted_offsets[i] = INTVAL (offset);
5036 else
5037 /* Not a suitable memory address. */
5038 return 0;
5041 /* All the useful information has now been extracted from the
5042 operands into unsorted_regs and unsorted_offsets; additionally,
5043 order[0] has been set to the lowest numbered register in the
5044 list. Sort the registers into order, and check that the memory
5045 offsets are ascending and adjacent. */
5047 for (i = 1; i < nops; i++)
5049 int j;
5051 order[i] = order[i - 1];
5052 for (j = 0; j < nops; j++)
5053 if (unsorted_regs[j] > unsorted_regs[order[i - 1]]
5054 && (order[i] == order[i - 1]
5055 || unsorted_regs[j] < unsorted_regs[order[i]]))
5056 order[i] = j;
5058 /* Have we found a suitable register? if not, one must be used more
5059 than once. */
5060 if (order[i] == order[i - 1])
5061 return 0;
5063 /* Is the memory address adjacent and ascending? */
5064 if (unsorted_offsets[order[i]] != unsorted_offsets[order[i - 1]] + 4)
5065 return 0;
5068 if (base)
5070 *base = base_reg;
5072 for (i = 0; i < nops; i++)
5073 regs[i] = unsorted_regs[order[i]];
5075 *load_offset = unsorted_offsets[order[0]];
5078 if (unsorted_offsets[order[0]] == 0)
5079 return 1; /* stmia */
5081 if (unsorted_offsets[order[0]] == 4)
5082 return 2; /* stmib */
5084 if (unsorted_offsets[order[nops - 1]] == 0)
5085 return 3; /* stmda */
5087 if (unsorted_offsets[order[nops - 1]] == -4)
5088 return 4; /* stmdb */
5090 return 0;
5093 const char *
5094 emit_stm_seq (rtx *operands, int nops)
5096 int regs[4];
5097 int base_reg;
5098 HOST_WIDE_INT offset;
5099 char buf[100];
5100 int i;
5102 switch (store_multiple_sequence (operands, nops, regs, &base_reg, &offset))
5104 case 1:
5105 strcpy (buf, "stm%?ia\t");
5106 break;
5108 case 2:
5109 strcpy (buf, "stm%?ib\t");
5110 break;
5112 case 3:
5113 strcpy (buf, "stm%?da\t");
5114 break;
5116 case 4:
5117 strcpy (buf, "stm%?db\t");
5118 break;
5120 default:
5121 abort ();
5124 sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
5125 reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
5127 for (i = 1; i < nops; i++)
5128 sprintf (buf + strlen (buf), ", %s%s", REGISTER_PREFIX,
5129 reg_names[regs[i]]);
5131 strcat (buf, "}\t%@ phole stm");
5133 output_asm_insn (buf, operands);
5134 return "";
5138 multi_register_push (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
5140 if (GET_CODE (op) != PARALLEL
5141 || (GET_CODE (XVECEXP (op, 0, 0)) != SET)
5142 || (GET_CODE (SET_SRC (XVECEXP (op, 0, 0))) != UNSPEC)
5143 || (XINT (SET_SRC (XVECEXP (op, 0, 0)), 1) != UNSPEC_PUSH_MULT))
5144 return 0;
5146 return 1;
5149 /* Routines for use in generating RTL. */
5152 arm_gen_load_multiple (int base_regno, int count, rtx from, int up,
5153 int write_back, int unchanging_p, int in_struct_p,
5154 int scalar_p)
5156 int i = 0, j;
5157 rtx result;
5158 int sign = up ? 1 : -1;
5159 rtx mem;
5161 /* XScale has load-store double instructions, but they have stricter
5162 alignment requirements than load-store multiple, so we can not
5163 use them.
5165 For XScale ldm requires 2 + NREGS cycles to complete and blocks
5166 the pipeline until completion.
5168 NREGS CYCLES
5174 An ldr instruction takes 1-3 cycles, but does not block the
5175 pipeline.
5177 NREGS CYCLES
5178 1 1-3
5179 2 2-6
5180 3 3-9
5181 4 4-12
5183 Best case ldr will always win. However, the more ldr instructions
5184 we issue, the less likely we are to be able to schedule them well.
5185 Using ldr instructions also increases code size.
5187 As a compromise, we use ldr for counts of 1 or 2 regs, and ldm
5188 for counts of 3 or 4 regs. */
5189 if (arm_tune_xscale && count <= 2 && ! optimize_size)
5191 rtx seq;
5193 start_sequence ();
5195 for (i = 0; i < count; i++)
5197 mem = gen_rtx_MEM (SImode, plus_constant (from, i * 4 * sign));
5198 RTX_UNCHANGING_P (mem) = unchanging_p;
5199 MEM_IN_STRUCT_P (mem) = in_struct_p;
5200 MEM_SCALAR_P (mem) = scalar_p;
5201 emit_move_insn (gen_rtx_REG (SImode, base_regno + i), mem);
5204 if (write_back)
5205 emit_move_insn (from, plus_constant (from, count * 4 * sign));
5207 seq = get_insns ();
5208 end_sequence ();
5210 return seq;
5213 result = gen_rtx_PARALLEL (VOIDmode,
5214 rtvec_alloc (count + (write_back ? 1 : 0)));
5215 if (write_back)
5217 XVECEXP (result, 0, 0)
5218 = gen_rtx_SET (GET_MODE (from), from,
5219 plus_constant (from, count * 4 * sign));
5220 i = 1;
5221 count++;
5224 for (j = 0; i < count; i++, j++)
5226 mem = gen_rtx_MEM (SImode, plus_constant (from, j * 4 * sign));
5227 RTX_UNCHANGING_P (mem) = unchanging_p;
5228 MEM_IN_STRUCT_P (mem) = in_struct_p;
5229 MEM_SCALAR_P (mem) = scalar_p;
5230 XVECEXP (result, 0, i)
5231 = gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, base_regno + j), mem);
5234 return result;
5238 arm_gen_store_multiple (int base_regno, int count, rtx to, int up,
5239 int write_back, int unchanging_p, int in_struct_p,
5240 int scalar_p)
5242 int i = 0, j;
5243 rtx result;
5244 int sign = up ? 1 : -1;
5245 rtx mem;
5247 /* See arm_gen_load_multiple for discussion of
5248 the pros/cons of ldm/stm usage for XScale. */
5249 if (arm_tune_xscale && count <= 2 && ! optimize_size)
5251 rtx seq;
5253 start_sequence ();
5255 for (i = 0; i < count; i++)
5257 mem = gen_rtx_MEM (SImode, plus_constant (to, i * 4 * sign));
5258 RTX_UNCHANGING_P (mem) = unchanging_p;
5259 MEM_IN_STRUCT_P (mem) = in_struct_p;
5260 MEM_SCALAR_P (mem) = scalar_p;
5261 emit_move_insn (mem, gen_rtx_REG (SImode, base_regno + i));
5264 if (write_back)
5265 emit_move_insn (to, plus_constant (to, count * 4 * sign));
5267 seq = get_insns ();
5268 end_sequence ();
5270 return seq;
5273 result = gen_rtx_PARALLEL (VOIDmode,
5274 rtvec_alloc (count + (write_back ? 1 : 0)));
5275 if (write_back)
5277 XVECEXP (result, 0, 0)
5278 = gen_rtx_SET (GET_MODE (to), to,
5279 plus_constant (to, count * 4 * sign));
5280 i = 1;
5281 count++;
5284 for (j = 0; i < count; i++, j++)
5286 mem = gen_rtx_MEM (SImode, plus_constant (to, j * 4 * sign));
5287 RTX_UNCHANGING_P (mem) = unchanging_p;
5288 MEM_IN_STRUCT_P (mem) = in_struct_p;
5289 MEM_SCALAR_P (mem) = scalar_p;
5291 XVECEXP (result, 0, i)
5292 = gen_rtx_SET (VOIDmode, mem, gen_rtx_REG (SImode, base_regno + j));
5295 return result;
5299 arm_gen_movstrqi (rtx *operands)
5301 HOST_WIDE_INT in_words_to_go, out_words_to_go, last_bytes;
5302 int i;
5303 rtx src, dst;
5304 rtx st_src, st_dst, fin_src, fin_dst;
5305 rtx part_bytes_reg = NULL;
5306 rtx mem;
5307 int dst_unchanging_p, dst_in_struct_p, src_unchanging_p, src_in_struct_p;
5308 int dst_scalar_p, src_scalar_p;
5310 if (GET_CODE (operands[2]) != CONST_INT
5311 || GET_CODE (operands[3]) != CONST_INT
5312 || INTVAL (operands[2]) > 64
5313 || INTVAL (operands[3]) & 3)
5314 return 0;
5316 st_dst = XEXP (operands[0], 0);
5317 st_src = XEXP (operands[1], 0);
5319 dst_unchanging_p = RTX_UNCHANGING_P (operands[0]);
5320 dst_in_struct_p = MEM_IN_STRUCT_P (operands[0]);
5321 dst_scalar_p = MEM_SCALAR_P (operands[0]);
5322 src_unchanging_p = RTX_UNCHANGING_P (operands[1]);
5323 src_in_struct_p = MEM_IN_STRUCT_P (operands[1]);
5324 src_scalar_p = MEM_SCALAR_P (operands[1]);
5326 fin_dst = dst = copy_to_mode_reg (SImode, st_dst);
5327 fin_src = src = copy_to_mode_reg (SImode, st_src);
5329 in_words_to_go = ARM_NUM_INTS (INTVAL (operands[2]));
5330 out_words_to_go = INTVAL (operands[2]) / 4;
5331 last_bytes = INTVAL (operands[2]) & 3;
5333 if (out_words_to_go != in_words_to_go && ((in_words_to_go - 1) & 3) != 0)
5334 part_bytes_reg = gen_rtx_REG (SImode, (in_words_to_go - 1) & 3);
5336 for (i = 0; in_words_to_go >= 2; i+=4)
5338 if (in_words_to_go > 4)
5339 emit_insn (arm_gen_load_multiple (0, 4, src, TRUE, TRUE,
5340 src_unchanging_p,
5341 src_in_struct_p,
5342 src_scalar_p));
5343 else
5344 emit_insn (arm_gen_load_multiple (0, in_words_to_go, src, TRUE,
5345 FALSE, src_unchanging_p,
5346 src_in_struct_p, src_scalar_p));
5348 if (out_words_to_go)
5350 if (out_words_to_go > 4)
5351 emit_insn (arm_gen_store_multiple (0, 4, dst, TRUE, TRUE,
5352 dst_unchanging_p,
5353 dst_in_struct_p,
5354 dst_scalar_p));
5355 else if (out_words_to_go != 1)
5356 emit_insn (arm_gen_store_multiple (0, out_words_to_go,
5357 dst, TRUE,
5358 (last_bytes == 0
5359 ? FALSE : TRUE),
5360 dst_unchanging_p,
5361 dst_in_struct_p,
5362 dst_scalar_p));
5363 else
5365 mem = gen_rtx_MEM (SImode, dst);
5366 RTX_UNCHANGING_P (mem) = dst_unchanging_p;
5367 MEM_IN_STRUCT_P (mem) = dst_in_struct_p;
5368 MEM_SCALAR_P (mem) = dst_scalar_p;
5369 emit_move_insn (mem, gen_rtx_REG (SImode, 0));
5370 if (last_bytes != 0)
5371 emit_insn (gen_addsi3 (dst, dst, GEN_INT (4)));
5375 in_words_to_go -= in_words_to_go < 4 ? in_words_to_go : 4;
5376 out_words_to_go -= out_words_to_go < 4 ? out_words_to_go : 4;
5379 /* OUT_WORDS_TO_GO will be zero here if there are byte stores to do. */
5380 if (out_words_to_go)
5382 rtx sreg;
5384 mem = gen_rtx_MEM (SImode, src);
5385 RTX_UNCHANGING_P (mem) = src_unchanging_p;
5386 MEM_IN_STRUCT_P (mem) = src_in_struct_p;
5387 MEM_SCALAR_P (mem) = src_scalar_p;
5388 emit_move_insn (sreg = gen_reg_rtx (SImode), mem);
5389 emit_move_insn (fin_src = gen_reg_rtx (SImode), plus_constant (src, 4));
5391 mem = gen_rtx_MEM (SImode, dst);
5392 RTX_UNCHANGING_P (mem) = dst_unchanging_p;
5393 MEM_IN_STRUCT_P (mem) = dst_in_struct_p;
5394 MEM_SCALAR_P (mem) = dst_scalar_p;
5395 emit_move_insn (mem, sreg);
5396 emit_move_insn (fin_dst = gen_reg_rtx (SImode), plus_constant (dst, 4));
5397 in_words_to_go--;
5399 if (in_words_to_go) /* Sanity check */
5400 abort ();
5403 if (in_words_to_go)
5405 if (in_words_to_go < 0)
5406 abort ();
5408 mem = gen_rtx_MEM (SImode, src);
5409 RTX_UNCHANGING_P (mem) = src_unchanging_p;
5410 MEM_IN_STRUCT_P (mem) = src_in_struct_p;
5411 MEM_SCALAR_P (mem) = src_scalar_p;
5412 part_bytes_reg = copy_to_mode_reg (SImode, mem);
5415 if (last_bytes && part_bytes_reg == NULL)
5416 abort ();
5418 if (BYTES_BIG_ENDIAN && last_bytes)
5420 rtx tmp = gen_reg_rtx (SImode);
5422 /* The bytes we want are in the top end of the word. */
5423 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg,
5424 GEN_INT (8 * (4 - last_bytes))));
5425 part_bytes_reg = tmp;
5427 while (last_bytes)
5429 mem = gen_rtx_MEM (QImode, plus_constant (dst, last_bytes - 1));
5430 RTX_UNCHANGING_P (mem) = dst_unchanging_p;
5431 MEM_IN_STRUCT_P (mem) = dst_in_struct_p;
5432 MEM_SCALAR_P (mem) = dst_scalar_p;
5433 emit_move_insn (mem, gen_lowpart (QImode, part_bytes_reg));
5435 if (--last_bytes)
5437 tmp = gen_reg_rtx (SImode);
5438 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (8)));
5439 part_bytes_reg = tmp;
5444 else
5446 if (last_bytes > 1)
5448 mem = gen_rtx_MEM (HImode, dst);
5449 RTX_UNCHANGING_P (mem) = dst_unchanging_p;
5450 MEM_IN_STRUCT_P (mem) = dst_in_struct_p;
5451 MEM_SCALAR_P (mem) = dst_scalar_p;
5452 emit_move_insn (mem, gen_lowpart (HImode, part_bytes_reg));
5453 last_bytes -= 2;
5454 if (last_bytes)
5456 rtx tmp = gen_reg_rtx (SImode);
5458 emit_insn (gen_addsi3 (dst, dst, GEN_INT (2)));
5459 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (16)));
5460 part_bytes_reg = tmp;
5464 if (last_bytes)
5466 mem = gen_rtx_MEM (QImode, dst);
5467 RTX_UNCHANGING_P (mem) = dst_unchanging_p;
5468 MEM_IN_STRUCT_P (mem) = dst_in_struct_p;
5469 MEM_SCALAR_P (mem) = dst_scalar_p;
5470 emit_move_insn (mem, gen_lowpart (QImode, part_bytes_reg));
5474 return 1;
5477 /* Generate a memory reference for a half word, such that it will be loaded
5478 into the top 16 bits of the word. We can assume that the address is
5479 known to be alignable and of the form reg, or plus (reg, const). */
5482 arm_gen_rotated_half_load (rtx memref)
5484 HOST_WIDE_INT offset = 0;
5485 rtx base = XEXP (memref, 0);
5487 if (GET_CODE (base) == PLUS)
5489 offset = INTVAL (XEXP (base, 1));
5490 base = XEXP (base, 0);
5493 /* If we aren't allowed to generate unaligned addresses, then fail. */
5494 if (TARGET_MMU_TRAPS
5495 && ((BYTES_BIG_ENDIAN ? 1 : 0) ^ ((offset & 2) == 0)))
5496 return NULL;
5498 base = gen_rtx_MEM (SImode, plus_constant (base, offset & ~2));
5500 if ((BYTES_BIG_ENDIAN ? 1 : 0) ^ ((offset & 2) == 2))
5501 return base;
5503 return gen_rtx_ROTATE (SImode, base, GEN_INT (16));
5506 /* Select a dominance comparison mode if possible for a test of the general
5507 form (OP (COND_OR (X) (Y)) (const_int 0)). We support three forms.
5508 COND_OR == DOM_CC_X_AND_Y => (X && Y)
5509 COND_OR == DOM_CC_NX_OR_Y => ((! X) || Y)
5510 COND_OR == DOM_CC_X_OR_Y => (X || Y)
5511 In all cases OP will be either EQ or NE, but we don't need to know which
5512 here. If we are unable to support a dominance comparison we return
5513 CC mode. This will then fail to match for the RTL expressions that
5514 generate this call. */
5515 enum machine_mode
5516 arm_select_dominance_cc_mode (rtx x, rtx y, HOST_WIDE_INT cond_or)
5518 enum rtx_code cond1, cond2;
5519 int swapped = 0;
5521 /* Currently we will probably get the wrong result if the individual
5522 comparisons are not simple. This also ensures that it is safe to
5523 reverse a comparison if necessary. */
5524 if ((arm_select_cc_mode (cond1 = GET_CODE (x), XEXP (x, 0), XEXP (x, 1))
5525 != CCmode)
5526 || (arm_select_cc_mode (cond2 = GET_CODE (y), XEXP (y, 0), XEXP (y, 1))
5527 != CCmode))
5528 return CCmode;
5530 /* The if_then_else variant of this tests the second condition if the
5531 first passes, but is true if the first fails. Reverse the first
5532 condition to get a true "inclusive-or" expression. */
5533 if (cond_or == DOM_CC_NX_OR_Y)
5534 cond1 = reverse_condition (cond1);
5536 /* If the comparisons are not equal, and one doesn't dominate the other,
5537 then we can't do this. */
5538 if (cond1 != cond2
5539 && !comparison_dominates_p (cond1, cond2)
5540 && (swapped = 1, !comparison_dominates_p (cond2, cond1)))
5541 return CCmode;
5543 if (swapped)
5545 enum rtx_code temp = cond1;
5546 cond1 = cond2;
5547 cond2 = temp;
5550 switch (cond1)
5552 case EQ:
5553 if (cond2 == EQ || cond_or == DOM_CC_X_AND_Y)
5554 return CC_DEQmode;
5556 switch (cond2)
5558 case LE: return CC_DLEmode;
5559 case LEU: return CC_DLEUmode;
5560 case GE: return CC_DGEmode;
5561 case GEU: return CC_DGEUmode;
5562 default: break;
5565 break;
5567 case LT:
5568 if (cond2 == LT || cond_or == DOM_CC_X_AND_Y)
5569 return CC_DLTmode;
5570 if (cond2 == LE)
5571 return CC_DLEmode;
5572 if (cond2 == NE)
5573 return CC_DNEmode;
5574 break;
5576 case GT:
5577 if (cond2 == GT || cond_or == DOM_CC_X_AND_Y)
5578 return CC_DGTmode;
5579 if (cond2 == GE)
5580 return CC_DGEmode;
5581 if (cond2 == NE)
5582 return CC_DNEmode;
5583 break;
5585 case LTU:
5586 if (cond2 == LTU || cond_or == DOM_CC_X_AND_Y)
5587 return CC_DLTUmode;
5588 if (cond2 == LEU)
5589 return CC_DLEUmode;
5590 if (cond2 == NE)
5591 return CC_DNEmode;
5592 break;
5594 case GTU:
5595 if (cond2 == GTU || cond_or == DOM_CC_X_AND_Y)
5596 return CC_DGTUmode;
5597 if (cond2 == GEU)
5598 return CC_DGEUmode;
5599 if (cond2 == NE)
5600 return CC_DNEmode;
5601 break;
5603 /* The remaining cases only occur when both comparisons are the
5604 same. */
5605 case NE:
5606 return CC_DNEmode;
5608 case LE:
5609 return CC_DLEmode;
5611 case GE:
5612 return CC_DGEmode;
5614 case LEU:
5615 return CC_DLEUmode;
5617 case GEU:
5618 return CC_DGEUmode;
5620 default:
5621 break;
5624 abort ();
5627 enum machine_mode
5628 arm_select_cc_mode (enum rtx_code op, rtx x, rtx y)
5630 /* All floating point compares return CCFP if it is an equality
5631 comparison, and CCFPE otherwise. */
5632 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
5634 switch (op)
5636 case EQ:
5637 case NE:
5638 case UNORDERED:
5639 case ORDERED:
5640 case UNLT:
5641 case UNLE:
5642 case UNGT:
5643 case UNGE:
5644 case UNEQ:
5645 case LTGT:
5646 return CCFPmode;
5648 case LT:
5649 case LE:
5650 case GT:
5651 case GE:
5652 if (TARGET_CIRRUS)
5653 return CCFPmode;
5654 return CCFPEmode;
5656 default:
5657 abort ();
5661 /* A compare with a shifted operand. Because of canonicalization, the
5662 comparison will have to be swapped when we emit the assembler. */
5663 if (GET_MODE (y) == SImode && GET_CODE (y) == REG
5664 && (GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
5665 || GET_CODE (x) == LSHIFTRT || GET_CODE (x) == ROTATE
5666 || GET_CODE (x) == ROTATERT))
5667 return CC_SWPmode;
5669 /* This is a special case that is used by combine to allow a
5670 comparison of a shifted byte load to be split into a zero-extend
5671 followed by a comparison of the shifted integer (only valid for
5672 equalities and unsigned inequalities). */
5673 if (GET_MODE (x) == SImode
5674 && GET_CODE (x) == ASHIFT
5675 && GET_CODE (XEXP (x, 1)) == CONST_INT && INTVAL (XEXP (x, 1)) == 24
5676 && GET_CODE (XEXP (x, 0)) == SUBREG
5677 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == MEM
5678 && GET_MODE (SUBREG_REG (XEXP (x, 0))) == QImode
5679 && (op == EQ || op == NE
5680 || op == GEU || op == GTU || op == LTU || op == LEU)
5681 && GET_CODE (y) == CONST_INT)
5682 return CC_Zmode;
5684 /* A construct for a conditional compare, if the false arm contains
5685 0, then both conditions must be true, otherwise either condition
5686 must be true. Not all conditions are possible, so CCmode is
5687 returned if it can't be done. */
5688 if (GET_CODE (x) == IF_THEN_ELSE
5689 && (XEXP (x, 2) == const0_rtx
5690 || XEXP (x, 2) == const1_rtx)
5691 && GET_RTX_CLASS (GET_CODE (XEXP (x, 0))) == '<'
5692 && GET_RTX_CLASS (GET_CODE (XEXP (x, 1))) == '<')
5693 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
5694 INTVAL (XEXP (x, 2)));
5696 /* Alternate canonicalizations of the above. These are somewhat cleaner. */
5697 if (GET_CODE (x) == AND
5698 && GET_RTX_CLASS (GET_CODE (XEXP (x, 0))) == '<'
5699 && GET_RTX_CLASS (GET_CODE (XEXP (x, 1))) == '<')
5700 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
5701 DOM_CC_X_AND_Y);
5703 if (GET_CODE (x) == IOR
5704 && GET_RTX_CLASS (GET_CODE (XEXP (x, 0))) == '<'
5705 && GET_RTX_CLASS (GET_CODE (XEXP (x, 1))) == '<')
5706 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
5707 DOM_CC_X_OR_Y);
5709 /* An operation (on Thumb) where we want to test for a single bit.
5710 This is done by shifting that bit up into the top bit of a
5711 scratch register; we can then branch on the sign bit. */
5712 if (TARGET_THUMB
5713 && GET_MODE (x) == SImode
5714 && (op == EQ || op == NE)
5715 && (GET_CODE (x) == ZERO_EXTRACT))
5716 return CC_Nmode;
5718 /* An operation that sets the condition codes as a side-effect, the
5719 V flag is not set correctly, so we can only use comparisons where
5720 this doesn't matter. (For LT and GE we can use "mi" and "pl"
5721 instead.) */
5722 if (GET_MODE (x) == SImode
5723 && y == const0_rtx
5724 && (op == EQ || op == NE || op == LT || op == GE)
5725 && (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
5726 || GET_CODE (x) == AND || GET_CODE (x) == IOR
5727 || GET_CODE (x) == XOR || GET_CODE (x) == MULT
5728 || GET_CODE (x) == NOT || GET_CODE (x) == NEG
5729 || GET_CODE (x) == LSHIFTRT
5730 || GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
5731 || GET_CODE (x) == ROTATERT
5732 || (TARGET_ARM && GET_CODE (x) == ZERO_EXTRACT)))
5733 return CC_NOOVmode;
5735 if (GET_MODE (x) == QImode && (op == EQ || op == NE))
5736 return CC_Zmode;
5738 if (GET_MODE (x) == SImode && (op == LTU || op == GEU)
5739 && GET_CODE (x) == PLUS
5740 && (rtx_equal_p (XEXP (x, 0), y) || rtx_equal_p (XEXP (x, 1), y)))
5741 return CC_Cmode;
5743 return CCmode;
5746 /* X and Y are two things to compare using CODE. Emit the compare insn and
5747 return the rtx for register 0 in the proper mode. FP means this is a
5748 floating point compare: I don't think that it is needed on the arm. */
5750 arm_gen_compare_reg (enum rtx_code code, rtx x, rtx y)
5752 enum machine_mode mode = SELECT_CC_MODE (code, x, y);
5753 rtx cc_reg = gen_rtx_REG (mode, CC_REGNUM);
5755 emit_insn (gen_rtx_SET (VOIDmode, cc_reg,
5756 gen_rtx_COMPARE (mode, x, y)));
5758 return cc_reg;
5761 /* Generate a sequence of insns that will generate the correct return
5762 address mask depending on the physical architecture that the program
5763 is running on. */
5765 arm_gen_return_addr_mask (void)
5767 rtx reg = gen_reg_rtx (Pmode);
5769 emit_insn (gen_return_addr_mask (reg));
5770 return reg;
5773 void
5774 arm_reload_in_hi (rtx *operands)
5776 rtx ref = operands[1];
5777 rtx base, scratch;
5778 HOST_WIDE_INT offset = 0;
5780 if (GET_CODE (ref) == SUBREG)
5782 offset = SUBREG_BYTE (ref);
5783 ref = SUBREG_REG (ref);
5786 if (GET_CODE (ref) == REG)
5788 /* We have a pseudo which has been spilt onto the stack; there
5789 are two cases here: the first where there is a simple
5790 stack-slot replacement and a second where the stack-slot is
5791 out of range, or is used as a subreg. */
5792 if (reg_equiv_mem[REGNO (ref)])
5794 ref = reg_equiv_mem[REGNO (ref)];
5795 base = find_replacement (&XEXP (ref, 0));
5797 else
5798 /* The slot is out of range, or was dressed up in a SUBREG. */
5799 base = reg_equiv_address[REGNO (ref)];
5801 else
5802 base = find_replacement (&XEXP (ref, 0));
5804 /* Handle the case where the address is too complex to be offset by 1. */
5805 if (GET_CODE (base) == MINUS
5806 || (GET_CODE (base) == PLUS && GET_CODE (XEXP (base, 1)) != CONST_INT))
5808 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
5810 emit_insn (gen_rtx_SET (VOIDmode, base_plus, base));
5811 base = base_plus;
5813 else if (GET_CODE (base) == PLUS)
5815 /* The addend must be CONST_INT, or we would have dealt with it above. */
5816 HOST_WIDE_INT hi, lo;
5818 offset += INTVAL (XEXP (base, 1));
5819 base = XEXP (base, 0);
5821 /* Rework the address into a legal sequence of insns. */
5822 /* Valid range for lo is -4095 -> 4095 */
5823 lo = (offset >= 0
5824 ? (offset & 0xfff)
5825 : -((-offset) & 0xfff));
5827 /* Corner case, if lo is the max offset then we would be out of range
5828 once we have added the additional 1 below, so bump the msb into the
5829 pre-loading insn(s). */
5830 if (lo == 4095)
5831 lo &= 0x7ff;
5833 hi = ((((offset - lo) & (HOST_WIDE_INT) 0xffffffff)
5834 ^ (HOST_WIDE_INT) 0x80000000)
5835 - (HOST_WIDE_INT) 0x80000000);
5837 if (hi + lo != offset)
5838 abort ();
5840 if (hi != 0)
5842 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
5844 /* Get the base address; addsi3 knows how to handle constants
5845 that require more than one insn. */
5846 emit_insn (gen_addsi3 (base_plus, base, GEN_INT (hi)));
5847 base = base_plus;
5848 offset = lo;
5852 /* Operands[2] may overlap operands[0] (though it won't overlap
5853 operands[1]), that's why we asked for a DImode reg -- so we can
5854 use the bit that does not overlap. */
5855 if (REGNO (operands[2]) == REGNO (operands[0]))
5856 scratch = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
5857 else
5858 scratch = gen_rtx_REG (SImode, REGNO (operands[2]));
5860 emit_insn (gen_zero_extendqisi2 (scratch,
5861 gen_rtx_MEM (QImode,
5862 plus_constant (base,
5863 offset))));
5864 emit_insn (gen_zero_extendqisi2 (gen_rtx_SUBREG (SImode, operands[0], 0),
5865 gen_rtx_MEM (QImode,
5866 plus_constant (base,
5867 offset + 1))));
5868 if (!BYTES_BIG_ENDIAN)
5869 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_SUBREG (SImode, operands[0], 0),
5870 gen_rtx_IOR (SImode,
5871 gen_rtx_ASHIFT
5872 (SImode,
5873 gen_rtx_SUBREG (SImode, operands[0], 0),
5874 GEN_INT (8)),
5875 scratch)));
5876 else
5877 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_SUBREG (SImode, operands[0], 0),
5878 gen_rtx_IOR (SImode,
5879 gen_rtx_ASHIFT (SImode, scratch,
5880 GEN_INT (8)),
5881 gen_rtx_SUBREG (SImode, operands[0],
5882 0))));
5885 /* Handle storing a half-word to memory during reload by synthesizing as two
5886 byte stores. Take care not to clobber the input values until after we
5887 have moved them somewhere safe. This code assumes that if the DImode
5888 scratch in operands[2] overlaps either the input value or output address
5889 in some way, then that value must die in this insn (we absolutely need
5890 two scratch registers for some corner cases). */
5891 void
5892 arm_reload_out_hi (rtx *operands)
5894 rtx ref = operands[0];
5895 rtx outval = operands[1];
5896 rtx base, scratch;
5897 HOST_WIDE_INT offset = 0;
5899 if (GET_CODE (ref) == SUBREG)
5901 offset = SUBREG_BYTE (ref);
5902 ref = SUBREG_REG (ref);
5905 if (GET_CODE (ref) == REG)
5907 /* We have a pseudo which has been spilt onto the stack; there
5908 are two cases here: the first where there is a simple
5909 stack-slot replacement and a second where the stack-slot is
5910 out of range, or is used as a subreg. */
5911 if (reg_equiv_mem[REGNO (ref)])
5913 ref = reg_equiv_mem[REGNO (ref)];
5914 base = find_replacement (&XEXP (ref, 0));
5916 else
5917 /* The slot is out of range, or was dressed up in a SUBREG. */
5918 base = reg_equiv_address[REGNO (ref)];
5920 else
5921 base = find_replacement (&XEXP (ref, 0));
5923 scratch = gen_rtx_REG (SImode, REGNO (operands[2]));
5925 /* Handle the case where the address is too complex to be offset by 1. */
5926 if (GET_CODE (base) == MINUS
5927 || (GET_CODE (base) == PLUS && GET_CODE (XEXP (base, 1)) != CONST_INT))
5929 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
5931 /* Be careful not to destroy OUTVAL. */
5932 if (reg_overlap_mentioned_p (base_plus, outval))
5934 /* Updating base_plus might destroy outval, see if we can
5935 swap the scratch and base_plus. */
5936 if (!reg_overlap_mentioned_p (scratch, outval))
5938 rtx tmp = scratch;
5939 scratch = base_plus;
5940 base_plus = tmp;
5942 else
5944 rtx scratch_hi = gen_rtx_REG (HImode, REGNO (operands[2]));
5946 /* Be conservative and copy OUTVAL into the scratch now,
5947 this should only be necessary if outval is a subreg
5948 of something larger than a word. */
5949 /* XXX Might this clobber base? I can't see how it can,
5950 since scratch is known to overlap with OUTVAL, and
5951 must be wider than a word. */
5952 emit_insn (gen_movhi (scratch_hi, outval));
5953 outval = scratch_hi;
5957 emit_insn (gen_rtx_SET (VOIDmode, base_plus, base));
5958 base = base_plus;
5960 else if (GET_CODE (base) == PLUS)
5962 /* The addend must be CONST_INT, or we would have dealt with it above. */
5963 HOST_WIDE_INT hi, lo;
5965 offset += INTVAL (XEXP (base, 1));
5966 base = XEXP (base, 0);
5968 /* Rework the address into a legal sequence of insns. */
5969 /* Valid range for lo is -4095 -> 4095 */
5970 lo = (offset >= 0
5971 ? (offset & 0xfff)
5972 : -((-offset) & 0xfff));
5974 /* Corner case, if lo is the max offset then we would be out of range
5975 once we have added the additional 1 below, so bump the msb into the
5976 pre-loading insn(s). */
5977 if (lo == 4095)
5978 lo &= 0x7ff;
5980 hi = ((((offset - lo) & (HOST_WIDE_INT) 0xffffffff)
5981 ^ (HOST_WIDE_INT) 0x80000000)
5982 - (HOST_WIDE_INT) 0x80000000);
5984 if (hi + lo != offset)
5985 abort ();
5987 if (hi != 0)
5989 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
5991 /* Be careful not to destroy OUTVAL. */
5992 if (reg_overlap_mentioned_p (base_plus, outval))
5994 /* Updating base_plus might destroy outval, see if we
5995 can swap the scratch and base_plus. */
5996 if (!reg_overlap_mentioned_p (scratch, outval))
5998 rtx tmp = scratch;
5999 scratch = base_plus;
6000 base_plus = tmp;
6002 else
6004 rtx scratch_hi = gen_rtx_REG (HImode, REGNO (operands[2]));
6006 /* Be conservative and copy outval into scratch now,
6007 this should only be necessary if outval is a
6008 subreg of something larger than a word. */
6009 /* XXX Might this clobber base? I can't see how it
6010 can, since scratch is known to overlap with
6011 outval. */
6012 emit_insn (gen_movhi (scratch_hi, outval));
6013 outval = scratch_hi;
6017 /* Get the base address; addsi3 knows how to handle constants
6018 that require more than one insn. */
6019 emit_insn (gen_addsi3 (base_plus, base, GEN_INT (hi)));
6020 base = base_plus;
6021 offset = lo;
6025 if (BYTES_BIG_ENDIAN)
6027 emit_insn (gen_movqi (gen_rtx_MEM (QImode,
6028 plus_constant (base, offset + 1)),
6029 gen_lowpart (QImode, outval)));
6030 emit_insn (gen_lshrsi3 (scratch,
6031 gen_rtx_SUBREG (SImode, outval, 0),
6032 GEN_INT (8)));
6033 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (base, offset)),
6034 gen_lowpart (QImode, scratch)));
6036 else
6038 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (base, offset)),
6039 gen_lowpart (QImode, outval)));
6040 emit_insn (gen_lshrsi3 (scratch,
6041 gen_rtx_SUBREG (SImode, outval, 0),
6042 GEN_INT (8)));
6043 emit_insn (gen_movqi (gen_rtx_MEM (QImode,
6044 plus_constant (base, offset + 1)),
6045 gen_lowpart (QImode, scratch)));
6049 /* Print a symbolic form of X to the debug file, F. */
6050 static void
6051 arm_print_value (FILE *f, rtx x)
6053 switch (GET_CODE (x))
6055 case CONST_INT:
6056 fprintf (f, HOST_WIDE_INT_PRINT_HEX, INTVAL (x));
6057 return;
6059 case CONST_DOUBLE:
6060 fprintf (f, "<0x%lx,0x%lx>", (long)XWINT (x, 2), (long)XWINT (x, 3));
6061 return;
6063 case CONST_VECTOR:
6065 int i;
6067 fprintf (f, "<");
6068 for (i = 0; i < CONST_VECTOR_NUNITS (x); i++)
6070 fprintf (f, HOST_WIDE_INT_PRINT_HEX, INTVAL (CONST_VECTOR_ELT (x, i)));
6071 if (i < (CONST_VECTOR_NUNITS (x) - 1))
6072 fputc (',', f);
6074 fprintf (f, ">");
6076 return;
6078 case CONST_STRING:
6079 fprintf (f, "\"%s\"", XSTR (x, 0));
6080 return;
6082 case SYMBOL_REF:
6083 fprintf (f, "`%s'", XSTR (x, 0));
6084 return;
6086 case LABEL_REF:
6087 fprintf (f, "L%d", INSN_UID (XEXP (x, 0)));
6088 return;
6090 case CONST:
6091 arm_print_value (f, XEXP (x, 0));
6092 return;
6094 case PLUS:
6095 arm_print_value (f, XEXP (x, 0));
6096 fprintf (f, "+");
6097 arm_print_value (f, XEXP (x, 1));
6098 return;
6100 case PC:
6101 fprintf (f, "pc");
6102 return;
6104 default:
6105 fprintf (f, "????");
6106 return;
6110 /* Routines for manipulation of the constant pool. */
6112 /* Arm instructions cannot load a large constant directly into a
6113 register; they have to come from a pc relative load. The constant
6114 must therefore be placed in the addressable range of the pc
6115 relative load. Depending on the precise pc relative load
6116 instruction the range is somewhere between 256 bytes and 4k. This
6117 means that we often have to dump a constant inside a function, and
6118 generate code to branch around it.
6120 It is important to minimize this, since the branches will slow
6121 things down and make the code larger.
6123 Normally we can hide the table after an existing unconditional
6124 branch so that there is no interruption of the flow, but in the
6125 worst case the code looks like this:
6127 ldr rn, L1
6129 b L2
6130 align
6131 L1: .long value
6135 ldr rn, L3
6137 b L4
6138 align
6139 L3: .long value
6143 We fix this by performing a scan after scheduling, which notices
6144 which instructions need to have their operands fetched from the
6145 constant table and builds the table.
6147 The algorithm starts by building a table of all the constants that
6148 need fixing up and all the natural barriers in the function (places
6149 where a constant table can be dropped without breaking the flow).
6150 For each fixup we note how far the pc-relative replacement will be
6151 able to reach and the offset of the instruction into the function.
6153 Having built the table we then group the fixes together to form
6154 tables that are as large as possible (subject to addressing
6155 constraints) and emit each table of constants after the last
6156 barrier that is within range of all the instructions in the group.
6157 If a group does not contain a barrier, then we forcibly create one
6158 by inserting a jump instruction into the flow. Once the table has
6159 been inserted, the insns are then modified to reference the
6160 relevant entry in the pool.
6162 Possible enhancements to the algorithm (not implemented) are:
6164 1) For some processors and object formats, there may be benefit in
6165 aligning the pools to the start of cache lines; this alignment
6166 would need to be taken into account when calculating addressability
6167 of a pool. */
6169 /* These typedefs are located at the start of this file, so that
6170 they can be used in the prototypes there. This comment is to
6171 remind readers of that fact so that the following structures
6172 can be understood more easily.
6174 typedef struct minipool_node Mnode;
6175 typedef struct minipool_fixup Mfix; */
6177 struct minipool_node
6179 /* Doubly linked chain of entries. */
6180 Mnode * next;
6181 Mnode * prev;
6182 /* The maximum offset into the code that this entry can be placed. While
6183 pushing fixes for forward references, all entries are sorted in order
6184 of increasing max_address. */
6185 HOST_WIDE_INT max_address;
6186 /* Similarly for an entry inserted for a backwards ref. */
6187 HOST_WIDE_INT min_address;
6188 /* The number of fixes referencing this entry. This can become zero
6189 if we "unpush" an entry. In this case we ignore the entry when we
6190 come to emit the code. */
6191 int refcount;
6192 /* The offset from the start of the minipool. */
6193 HOST_WIDE_INT offset;
6194 /* The value in table. */
6195 rtx value;
6196 /* The mode of value. */
6197 enum machine_mode mode;
6198 /* The size of the value. With iWMMXt enabled
6199 sizes > 4 also imply an alignment of 8-bytes. */
6200 int fix_size;
6203 struct minipool_fixup
6205 Mfix * next;
6206 rtx insn;
6207 HOST_WIDE_INT address;
6208 rtx * loc;
6209 enum machine_mode mode;
6210 int fix_size;
6211 rtx value;
6212 Mnode * minipool;
6213 HOST_WIDE_INT forwards;
6214 HOST_WIDE_INT backwards;
6217 /* Fixes less than a word need padding out to a word boundary. */
6218 #define MINIPOOL_FIX_SIZE(mode) \
6219 (GET_MODE_SIZE ((mode)) >= 4 ? GET_MODE_SIZE ((mode)) : 4)
6221 static Mnode * minipool_vector_head;
6222 static Mnode * minipool_vector_tail;
6223 static rtx minipool_vector_label;
6225 /* The linked list of all minipool fixes required for this function. */
6226 Mfix * minipool_fix_head;
6227 Mfix * minipool_fix_tail;
6228 /* The fix entry for the current minipool, once it has been placed. */
6229 Mfix * minipool_barrier;
6231 /* Determines if INSN is the start of a jump table. Returns the end
6232 of the TABLE or NULL_RTX. */
6233 static rtx
6234 is_jump_table (rtx insn)
6236 rtx table;
6238 if (GET_CODE (insn) == JUMP_INSN
6239 && JUMP_LABEL (insn) != NULL
6240 && ((table = next_real_insn (JUMP_LABEL (insn)))
6241 == next_real_insn (insn))
6242 && table != NULL
6243 && GET_CODE (table) == JUMP_INSN
6244 && (GET_CODE (PATTERN (table)) == ADDR_VEC
6245 || GET_CODE (PATTERN (table)) == ADDR_DIFF_VEC))
6246 return table;
6248 return NULL_RTX;
6251 #ifndef JUMP_TABLES_IN_TEXT_SECTION
6252 #define JUMP_TABLES_IN_TEXT_SECTION 0
6253 #endif
6255 static HOST_WIDE_INT
6256 get_jump_table_size (rtx insn)
6258 /* ADDR_VECs only take room if read-only data does into the text
6259 section. */
6260 if (JUMP_TABLES_IN_TEXT_SECTION
6261 #if !defined(READONLY_DATA_SECTION) && !defined(READONLY_DATA_SECTION_ASM_OP)
6262 || 1
6263 #endif
6266 rtx body = PATTERN (insn);
6267 int elt = GET_CODE (body) == ADDR_DIFF_VEC ? 1 : 0;
6269 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, elt);
6272 return 0;
6275 /* Move a minipool fix MP from its current location to before MAX_MP.
6276 If MAX_MP is NULL, then MP doesn't need moving, but the addressing
6277 constraints may need updating. */
6278 static Mnode *
6279 move_minipool_fix_forward_ref (Mnode *mp, Mnode *max_mp,
6280 HOST_WIDE_INT max_address)
6282 /* This should never be true and the code below assumes these are
6283 different. */
6284 if (mp == max_mp)
6285 abort ();
6287 if (max_mp == NULL)
6289 if (max_address < mp->max_address)
6290 mp->max_address = max_address;
6292 else
6294 if (max_address > max_mp->max_address - mp->fix_size)
6295 mp->max_address = max_mp->max_address - mp->fix_size;
6296 else
6297 mp->max_address = max_address;
6299 /* Unlink MP from its current position. Since max_mp is non-null,
6300 mp->prev must be non-null. */
6301 mp->prev->next = mp->next;
6302 if (mp->next != NULL)
6303 mp->next->prev = mp->prev;
6304 else
6305 minipool_vector_tail = mp->prev;
6307 /* Re-insert it before MAX_MP. */
6308 mp->next = max_mp;
6309 mp->prev = max_mp->prev;
6310 max_mp->prev = mp;
6312 if (mp->prev != NULL)
6313 mp->prev->next = mp;
6314 else
6315 minipool_vector_head = mp;
6318 /* Save the new entry. */
6319 max_mp = mp;
6321 /* Scan over the preceding entries and adjust their addresses as
6322 required. */
6323 while (mp->prev != NULL
6324 && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
6326 mp->prev->max_address = mp->max_address - mp->prev->fix_size;
6327 mp = mp->prev;
6330 return max_mp;
6333 /* Add a constant to the minipool for a forward reference. Returns the
6334 node added or NULL if the constant will not fit in this pool. */
6335 static Mnode *
6336 add_minipool_forward_ref (Mfix *fix)
6338 /* If set, max_mp is the first pool_entry that has a lower
6339 constraint than the one we are trying to add. */
6340 Mnode * max_mp = NULL;
6341 HOST_WIDE_INT max_address = fix->address + fix->forwards;
6342 Mnode * mp;
6344 /* If this fix's address is greater than the address of the first
6345 entry, then we can't put the fix in this pool. We subtract the
6346 size of the current fix to ensure that if the table is fully
6347 packed we still have enough room to insert this value by suffling
6348 the other fixes forwards. */
6349 if (minipool_vector_head &&
6350 fix->address >= minipool_vector_head->max_address - fix->fix_size)
6351 return NULL;
6353 /* Scan the pool to see if a constant with the same value has
6354 already been added. While we are doing this, also note the
6355 location where we must insert the constant if it doesn't already
6356 exist. */
6357 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
6359 if (GET_CODE (fix->value) == GET_CODE (mp->value)
6360 && fix->mode == mp->mode
6361 && (GET_CODE (fix->value) != CODE_LABEL
6362 || (CODE_LABEL_NUMBER (fix->value)
6363 == CODE_LABEL_NUMBER (mp->value)))
6364 && rtx_equal_p (fix->value, mp->value))
6366 /* More than one fix references this entry. */
6367 mp->refcount++;
6368 return move_minipool_fix_forward_ref (mp, max_mp, max_address);
6371 /* Note the insertion point if necessary. */
6372 if (max_mp == NULL
6373 && mp->max_address > max_address)
6374 max_mp = mp;
6376 /* If we are inserting an 8-bytes aligned quantity and
6377 we have not already found an insertion point, then
6378 make sure that all such 8-byte aligned quantities are
6379 placed at the start of the pool. */
6380 if (TARGET_REALLY_IWMMXT
6381 && max_mp == NULL
6382 && fix->fix_size == 8
6383 && mp->fix_size != 8)
6385 max_mp = mp;
6386 max_address = mp->max_address;
6390 /* The value is not currently in the minipool, so we need to create
6391 a new entry for it. If MAX_MP is NULL, the entry will be put on
6392 the end of the list since the placement is less constrained than
6393 any existing entry. Otherwise, we insert the new fix before
6394 MAX_MP and, if necessary, adjust the constraints on the other
6395 entries. */
6396 mp = xmalloc (sizeof (* mp));
6397 mp->fix_size = fix->fix_size;
6398 mp->mode = fix->mode;
6399 mp->value = fix->value;
6400 mp->refcount = 1;
6401 /* Not yet required for a backwards ref. */
6402 mp->min_address = -65536;
6404 if (max_mp == NULL)
6406 mp->max_address = max_address;
6407 mp->next = NULL;
6408 mp->prev = minipool_vector_tail;
6410 if (mp->prev == NULL)
6412 minipool_vector_head = mp;
6413 minipool_vector_label = gen_label_rtx ();
6415 else
6416 mp->prev->next = mp;
6418 minipool_vector_tail = mp;
6420 else
6422 if (max_address > max_mp->max_address - mp->fix_size)
6423 mp->max_address = max_mp->max_address - mp->fix_size;
6424 else
6425 mp->max_address = max_address;
6427 mp->next = max_mp;
6428 mp->prev = max_mp->prev;
6429 max_mp->prev = mp;
6430 if (mp->prev != NULL)
6431 mp->prev->next = mp;
6432 else
6433 minipool_vector_head = mp;
6436 /* Save the new entry. */
6437 max_mp = mp;
6439 /* Scan over the preceding entries and adjust their addresses as
6440 required. */
6441 while (mp->prev != NULL
6442 && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
6444 mp->prev->max_address = mp->max_address - mp->prev->fix_size;
6445 mp = mp->prev;
6448 return max_mp;
6451 static Mnode *
6452 move_minipool_fix_backward_ref (Mnode *mp, Mnode *min_mp,
6453 HOST_WIDE_INT min_address)
6455 HOST_WIDE_INT offset;
6457 /* This should never be true, and the code below assumes these are
6458 different. */
6459 if (mp == min_mp)
6460 abort ();
6462 if (min_mp == NULL)
6464 if (min_address > mp->min_address)
6465 mp->min_address = min_address;
6467 else
6469 /* We will adjust this below if it is too loose. */
6470 mp->min_address = min_address;
6472 /* Unlink MP from its current position. Since min_mp is non-null,
6473 mp->next must be non-null. */
6474 mp->next->prev = mp->prev;
6475 if (mp->prev != NULL)
6476 mp->prev->next = mp->next;
6477 else
6478 minipool_vector_head = mp->next;
6480 /* Reinsert it after MIN_MP. */
6481 mp->prev = min_mp;
6482 mp->next = min_mp->next;
6483 min_mp->next = mp;
6484 if (mp->next != NULL)
6485 mp->next->prev = mp;
6486 else
6487 minipool_vector_tail = mp;
6490 min_mp = mp;
6492 offset = 0;
6493 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
6495 mp->offset = offset;
6496 if (mp->refcount > 0)
6497 offset += mp->fix_size;
6499 if (mp->next && mp->next->min_address < mp->min_address + mp->fix_size)
6500 mp->next->min_address = mp->min_address + mp->fix_size;
6503 return min_mp;
6506 /* Add a constant to the minipool for a backward reference. Returns the
6507 node added or NULL if the constant will not fit in this pool.
6509 Note that the code for insertion for a backwards reference can be
6510 somewhat confusing because the calculated offsets for each fix do
6511 not take into account the size of the pool (which is still under
6512 construction. */
6513 static Mnode *
6514 add_minipool_backward_ref (Mfix *fix)
6516 /* If set, min_mp is the last pool_entry that has a lower constraint
6517 than the one we are trying to add. */
6518 Mnode *min_mp = NULL;
6519 /* This can be negative, since it is only a constraint. */
6520 HOST_WIDE_INT min_address = fix->address - fix->backwards;
6521 Mnode *mp;
6523 /* If we can't reach the current pool from this insn, or if we can't
6524 insert this entry at the end of the pool without pushing other
6525 fixes out of range, then we don't try. This ensures that we
6526 can't fail later on. */
6527 if (min_address >= minipool_barrier->address
6528 || (minipool_vector_tail->min_address + fix->fix_size
6529 >= minipool_barrier->address))
6530 return NULL;
6532 /* Scan the pool to see if a constant with the same value has
6533 already been added. While we are doing this, also note the
6534 location where we must insert the constant if it doesn't already
6535 exist. */
6536 for (mp = minipool_vector_tail; mp != NULL; mp = mp->prev)
6538 if (GET_CODE (fix->value) == GET_CODE (mp->value)
6539 && fix->mode == mp->mode
6540 && (GET_CODE (fix->value) != CODE_LABEL
6541 || (CODE_LABEL_NUMBER (fix->value)
6542 == CODE_LABEL_NUMBER (mp->value)))
6543 && rtx_equal_p (fix->value, mp->value)
6544 /* Check that there is enough slack to move this entry to the
6545 end of the table (this is conservative). */
6546 && (mp->max_address
6547 > (minipool_barrier->address
6548 + minipool_vector_tail->offset
6549 + minipool_vector_tail->fix_size)))
6551 mp->refcount++;
6552 return move_minipool_fix_backward_ref (mp, min_mp, min_address);
6555 if (min_mp != NULL)
6556 mp->min_address += fix->fix_size;
6557 else
6559 /* Note the insertion point if necessary. */
6560 if (mp->min_address < min_address)
6562 /* For now, we do not allow the insertion of 8-byte alignment
6563 requiring nodes anywhere but at the start of the pool. */
6564 if (TARGET_REALLY_IWMMXT && fix->fix_size == 8 && mp->fix_size != 8)
6565 return NULL;
6566 else
6567 min_mp = mp;
6569 else if (mp->max_address
6570 < minipool_barrier->address + mp->offset + fix->fix_size)
6572 /* Inserting before this entry would push the fix beyond
6573 its maximum address (which can happen if we have
6574 re-located a forwards fix); force the new fix to come
6575 after it. */
6576 min_mp = mp;
6577 min_address = mp->min_address + fix->fix_size;
6579 /* If we are inserting an 8-bytes aligned quantity and
6580 we have not already found an insertion point, then
6581 make sure that all such 8-byte aligned quantities are
6582 placed at the start of the pool. */
6583 else if (TARGET_REALLY_IWMMXT
6584 && min_mp == NULL
6585 && fix->fix_size == 8
6586 && mp->fix_size < 8)
6588 min_mp = mp;
6589 min_address = mp->min_address + fix->fix_size;
6594 /* We need to create a new entry. */
6595 mp = xmalloc (sizeof (* mp));
6596 mp->fix_size = fix->fix_size;
6597 mp->mode = fix->mode;
6598 mp->value = fix->value;
6599 mp->refcount = 1;
6600 mp->max_address = minipool_barrier->address + 65536;
6602 mp->min_address = min_address;
6604 if (min_mp == NULL)
6606 mp->prev = NULL;
6607 mp->next = minipool_vector_head;
6609 if (mp->next == NULL)
6611 minipool_vector_tail = mp;
6612 minipool_vector_label = gen_label_rtx ();
6614 else
6615 mp->next->prev = mp;
6617 minipool_vector_head = mp;
6619 else
6621 mp->next = min_mp->next;
6622 mp->prev = min_mp;
6623 min_mp->next = mp;
6625 if (mp->next != NULL)
6626 mp->next->prev = mp;
6627 else
6628 minipool_vector_tail = mp;
6631 /* Save the new entry. */
6632 min_mp = mp;
6634 if (mp->prev)
6635 mp = mp->prev;
6636 else
6637 mp->offset = 0;
6639 /* Scan over the following entries and adjust their offsets. */
6640 while (mp->next != NULL)
6642 if (mp->next->min_address < mp->min_address + mp->fix_size)
6643 mp->next->min_address = mp->min_address + mp->fix_size;
6645 if (mp->refcount)
6646 mp->next->offset = mp->offset + mp->fix_size;
6647 else
6648 mp->next->offset = mp->offset;
6650 mp = mp->next;
6653 return min_mp;
6656 static void
6657 assign_minipool_offsets (Mfix *barrier)
6659 HOST_WIDE_INT offset = 0;
6660 Mnode *mp;
6662 minipool_barrier = barrier;
6664 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
6666 mp->offset = offset;
6668 if (mp->refcount > 0)
6669 offset += mp->fix_size;
6673 /* Output the literal table */
6674 static void
6675 dump_minipool (rtx scan)
6677 Mnode * mp;
6678 Mnode * nmp;
6679 int align64 = 0;
6681 if (TARGET_REALLY_IWMMXT)
6682 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
6683 if (mp->refcount > 0 && mp->fix_size == 8)
6685 align64 = 1;
6686 break;
6689 if (rtl_dump_file)
6690 fprintf (rtl_dump_file,
6691 ";; Emitting minipool after insn %u; address %ld; align %d (bytes)\n",
6692 INSN_UID (scan), (unsigned long) minipool_barrier->address, align64 ? 8 : 4);
6694 scan = emit_label_after (gen_label_rtx (), scan);
6695 scan = emit_insn_after (align64 ? gen_align_8 () : gen_align_4 (), scan);
6696 scan = emit_label_after (minipool_vector_label, scan);
6698 for (mp = minipool_vector_head; mp != NULL; mp = nmp)
6700 if (mp->refcount > 0)
6702 if (rtl_dump_file)
6704 fprintf (rtl_dump_file,
6705 ";; Offset %u, min %ld, max %ld ",
6706 (unsigned) mp->offset, (unsigned long) mp->min_address,
6707 (unsigned long) mp->max_address);
6708 arm_print_value (rtl_dump_file, mp->value);
6709 fputc ('\n', rtl_dump_file);
6712 switch (mp->fix_size)
6714 #ifdef HAVE_consttable_1
6715 case 1:
6716 scan = emit_insn_after (gen_consttable_1 (mp->value), scan);
6717 break;
6719 #endif
6720 #ifdef HAVE_consttable_2
6721 case 2:
6722 scan = emit_insn_after (gen_consttable_2 (mp->value), scan);
6723 break;
6725 #endif
6726 #ifdef HAVE_consttable_4
6727 case 4:
6728 scan = emit_insn_after (gen_consttable_4 (mp->value), scan);
6729 break;
6731 #endif
6732 #ifdef HAVE_consttable_8
6733 case 8:
6734 scan = emit_insn_after (gen_consttable_8 (mp->value), scan);
6735 break;
6737 #endif
6738 default:
6739 abort ();
6740 break;
6744 nmp = mp->next;
6745 free (mp);
6748 minipool_vector_head = minipool_vector_tail = NULL;
6749 scan = emit_insn_after (gen_consttable_end (), scan);
6750 scan = emit_barrier_after (scan);
6753 /* Return the cost of forcibly inserting a barrier after INSN. */
6754 static int
6755 arm_barrier_cost (rtx insn)
6757 /* Basing the location of the pool on the loop depth is preferable,
6758 but at the moment, the basic block information seems to be
6759 corrupt by this stage of the compilation. */
6760 int base_cost = 50;
6761 rtx next = next_nonnote_insn (insn);
6763 if (next != NULL && GET_CODE (next) == CODE_LABEL)
6764 base_cost -= 20;
6766 switch (GET_CODE (insn))
6768 case CODE_LABEL:
6769 /* It will always be better to place the table before the label, rather
6770 than after it. */
6771 return 50;
6773 case INSN:
6774 case CALL_INSN:
6775 return base_cost;
6777 case JUMP_INSN:
6778 return base_cost - 10;
6780 default:
6781 return base_cost + 10;
6785 /* Find the best place in the insn stream in the range
6786 (FIX->address,MAX_ADDRESS) to forcibly insert a minipool barrier.
6787 Create the barrier by inserting a jump and add a new fix entry for
6788 it. */
6789 static Mfix *
6790 create_fix_barrier (Mfix *fix, HOST_WIDE_INT max_address)
6792 HOST_WIDE_INT count = 0;
6793 rtx barrier;
6794 rtx from = fix->insn;
6795 rtx selected = from;
6796 int selected_cost;
6797 HOST_WIDE_INT selected_address;
6798 Mfix * new_fix;
6799 HOST_WIDE_INT max_count = max_address - fix->address;
6800 rtx label = gen_label_rtx ();
6802 selected_cost = arm_barrier_cost (from);
6803 selected_address = fix->address;
6805 while (from && count < max_count)
6807 rtx tmp;
6808 int new_cost;
6810 /* This code shouldn't have been called if there was a natural barrier
6811 within range. */
6812 if (GET_CODE (from) == BARRIER)
6813 abort ();
6815 /* Count the length of this insn. */
6816 count += get_attr_length (from);
6818 /* If there is a jump table, add its length. */
6819 tmp = is_jump_table (from);
6820 if (tmp != NULL)
6822 count += get_jump_table_size (tmp);
6824 /* Jump tables aren't in a basic block, so base the cost on
6825 the dispatch insn. If we select this location, we will
6826 still put the pool after the table. */
6827 new_cost = arm_barrier_cost (from);
6829 if (count < max_count && new_cost <= selected_cost)
6831 selected = tmp;
6832 selected_cost = new_cost;
6833 selected_address = fix->address + count;
6836 /* Continue after the dispatch table. */
6837 from = NEXT_INSN (tmp);
6838 continue;
6841 new_cost = arm_barrier_cost (from);
6843 if (count < max_count && new_cost <= selected_cost)
6845 selected = from;
6846 selected_cost = new_cost;
6847 selected_address = fix->address + count;
6850 from = NEXT_INSN (from);
6853 /* Create a new JUMP_INSN that branches around a barrier. */
6854 from = emit_jump_insn_after (gen_jump (label), selected);
6855 JUMP_LABEL (from) = label;
6856 barrier = emit_barrier_after (from);
6857 emit_label_after (label, barrier);
6859 /* Create a minipool barrier entry for the new barrier. */
6860 new_fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* new_fix));
6861 new_fix->insn = barrier;
6862 new_fix->address = selected_address;
6863 new_fix->next = fix->next;
6864 fix->next = new_fix;
6866 return new_fix;
6869 /* Record that there is a natural barrier in the insn stream at
6870 ADDRESS. */
6871 static void
6872 push_minipool_barrier (rtx insn, HOST_WIDE_INT address)
6874 Mfix * fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* fix));
6876 fix->insn = insn;
6877 fix->address = address;
6879 fix->next = NULL;
6880 if (minipool_fix_head != NULL)
6881 minipool_fix_tail->next = fix;
6882 else
6883 minipool_fix_head = fix;
6885 minipool_fix_tail = fix;
6888 /* Record INSN, which will need fixing up to load a value from the
6889 minipool. ADDRESS is the offset of the insn since the start of the
6890 function; LOC is a pointer to the part of the insn which requires
6891 fixing; VALUE is the constant that must be loaded, which is of type
6892 MODE. */
6893 static void
6894 push_minipool_fix (rtx insn, HOST_WIDE_INT address, rtx *loc,
6895 enum machine_mode mode, rtx value)
6897 Mfix * fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* fix));
6899 #ifdef AOF_ASSEMBLER
6900 /* PIC symbol references need to be converted into offsets into the
6901 based area. */
6902 /* XXX This shouldn't be done here. */
6903 if (flag_pic && GET_CODE (value) == SYMBOL_REF)
6904 value = aof_pic_entry (value);
6905 #endif /* AOF_ASSEMBLER */
6907 fix->insn = insn;
6908 fix->address = address;
6909 fix->loc = loc;
6910 fix->mode = mode;
6911 fix->fix_size = MINIPOOL_FIX_SIZE (mode);
6912 fix->value = value;
6913 fix->forwards = get_attr_pool_range (insn);
6914 fix->backwards = get_attr_neg_pool_range (insn);
6915 fix->minipool = NULL;
6917 /* If an insn doesn't have a range defined for it, then it isn't
6918 expecting to be reworked by this code. Better to abort now than
6919 to generate duff assembly code. */
6920 if (fix->forwards == 0 && fix->backwards == 0)
6921 abort ();
6923 /* With iWMMXt enabled, the pool is aligned to an 8-byte boundary.
6924 So there might be an empty word before the start of the pool.
6925 Hence we reduce the forward range by 4 to allow for this
6926 possibility. */
6927 if (TARGET_REALLY_IWMMXT && fix->fix_size == 8)
6928 fix->forwards -= 4;
6930 if (rtl_dump_file)
6932 fprintf (rtl_dump_file,
6933 ";; %smode fixup for i%d; addr %lu, range (%ld,%ld): ",
6934 GET_MODE_NAME (mode),
6935 INSN_UID (insn), (unsigned long) address,
6936 -1 * (long)fix->backwards, (long)fix->forwards);
6937 arm_print_value (rtl_dump_file, fix->value);
6938 fprintf (rtl_dump_file, "\n");
6941 /* Add it to the chain of fixes. */
6942 fix->next = NULL;
6944 if (minipool_fix_head != NULL)
6945 minipool_fix_tail->next = fix;
6946 else
6947 minipool_fix_head = fix;
6949 minipool_fix_tail = fix;
6952 /* Scan INSN and note any of its operands that need fixing.
6953 If DO_PUSHES is false we do not actually push any of the fixups
6954 needed. The function returns TRUE is any fixups were needed/pushed.
6955 This is used by arm_memory_load_p() which needs to know about loads
6956 of constants that will be converted into minipool loads. */
6957 static bool
6958 note_invalid_constants (rtx insn, HOST_WIDE_INT address, int do_pushes)
6960 bool result = false;
6961 int opno;
6963 extract_insn (insn);
6965 if (!constrain_operands (1))
6966 fatal_insn_not_found (insn);
6968 if (recog_data.n_alternatives == 0)
6969 return false;
6971 /* Fill in recog_op_alt with information about the constraints of this insn. */
6972 preprocess_constraints ();
6974 for (opno = 0; opno < recog_data.n_operands; opno++)
6976 /* Things we need to fix can only occur in inputs. */
6977 if (recog_data.operand_type[opno] != OP_IN)
6978 continue;
6980 /* If this alternative is a memory reference, then any mention
6981 of constants in this alternative is really to fool reload
6982 into allowing us to accept one there. We need to fix them up
6983 now so that we output the right code. */
6984 if (recog_op_alt[opno][which_alternative].memory_ok)
6986 rtx op = recog_data.operand[opno];
6988 if (CONSTANT_P (op))
6990 if (do_pushes)
6991 push_minipool_fix (insn, address, recog_data.operand_loc[opno],
6992 recog_data.operand_mode[opno], op);
6993 result = true;
6995 else if (GET_CODE (op) == MEM
6996 && GET_CODE (XEXP (op, 0)) == SYMBOL_REF
6997 && CONSTANT_POOL_ADDRESS_P (XEXP (op, 0)))
6999 if (do_pushes)
7001 rtx cop = avoid_constant_pool_reference (op);
7003 /* Casting the address of something to a mode narrower
7004 than a word can cause avoid_constant_pool_reference()
7005 to return the pool reference itself. That's no good to
7006 us here. Lets just hope that we can use the
7007 constant pool value directly. */
7008 if (op == cop)
7009 cop = get_pool_constant (XEXP (op, 0));
7011 push_minipool_fix (insn, address,
7012 recog_data.operand_loc[opno],
7013 recog_data.operand_mode[opno], cop);
7016 result = true;
7021 return result;
7024 /* Gcc puts the pool in the wrong place for ARM, since we can only
7025 load addresses a limited distance around the pc. We do some
7026 special munging to move the constant pool values to the correct
7027 point in the code. */
7028 static void
7029 arm_reorg (void)
7031 rtx insn;
7032 HOST_WIDE_INT address = 0;
7033 Mfix * fix;
7035 minipool_fix_head = minipool_fix_tail = NULL;
7037 /* The first insn must always be a note, or the code below won't
7038 scan it properly. */
7039 insn = get_insns ();
7040 if (GET_CODE (insn) != NOTE)
7041 abort ();
7043 /* Scan all the insns and record the operands that will need fixing. */
7044 for (insn = next_nonnote_insn (insn); insn; insn = next_nonnote_insn (insn))
7046 if (TARGET_CIRRUS_FIX_INVALID_INSNS
7047 && (arm_cirrus_insn_p (insn)
7048 || GET_CODE (insn) == JUMP_INSN
7049 || arm_memory_load_p (insn)))
7050 cirrus_reorg (insn);
7052 if (GET_CODE (insn) == BARRIER)
7053 push_minipool_barrier (insn, address);
7054 else if (INSN_P (insn))
7056 rtx table;
7058 note_invalid_constants (insn, address, true);
7059 address += get_attr_length (insn);
7061 /* If the insn is a vector jump, add the size of the table
7062 and skip the table. */
7063 if ((table = is_jump_table (insn)) != NULL)
7065 address += get_jump_table_size (table);
7066 insn = table;
7071 fix = minipool_fix_head;
7073 /* Now scan the fixups and perform the required changes. */
7074 while (fix)
7076 Mfix * ftmp;
7077 Mfix * fdel;
7078 Mfix * last_added_fix;
7079 Mfix * last_barrier = NULL;
7080 Mfix * this_fix;
7082 /* Skip any further barriers before the next fix. */
7083 while (fix && GET_CODE (fix->insn) == BARRIER)
7084 fix = fix->next;
7086 /* No more fixes. */
7087 if (fix == NULL)
7088 break;
7090 last_added_fix = NULL;
7092 for (ftmp = fix; ftmp; ftmp = ftmp->next)
7094 if (GET_CODE (ftmp->insn) == BARRIER)
7096 if (ftmp->address >= minipool_vector_head->max_address)
7097 break;
7099 last_barrier = ftmp;
7101 else if ((ftmp->minipool = add_minipool_forward_ref (ftmp)) == NULL)
7102 break;
7104 last_added_fix = ftmp; /* Keep track of the last fix added. */
7107 /* If we found a barrier, drop back to that; any fixes that we
7108 could have reached but come after the barrier will now go in
7109 the next mini-pool. */
7110 if (last_barrier != NULL)
7112 /* Reduce the refcount for those fixes that won't go into this
7113 pool after all. */
7114 for (fdel = last_barrier->next;
7115 fdel && fdel != ftmp;
7116 fdel = fdel->next)
7118 fdel->minipool->refcount--;
7119 fdel->minipool = NULL;
7122 ftmp = last_barrier;
7124 else
7126 /* ftmp is first fix that we can't fit into this pool and
7127 there no natural barriers that we could use. Insert a
7128 new barrier in the code somewhere between the previous
7129 fix and this one, and arrange to jump around it. */
7130 HOST_WIDE_INT max_address;
7132 /* The last item on the list of fixes must be a barrier, so
7133 we can never run off the end of the list of fixes without
7134 last_barrier being set. */
7135 if (ftmp == NULL)
7136 abort ();
7138 max_address = minipool_vector_head->max_address;
7139 /* Check that there isn't another fix that is in range that
7140 we couldn't fit into this pool because the pool was
7141 already too large: we need to put the pool before such an
7142 instruction. */
7143 if (ftmp->address < max_address)
7144 max_address = ftmp->address;
7146 last_barrier = create_fix_barrier (last_added_fix, max_address);
7149 assign_minipool_offsets (last_barrier);
7151 while (ftmp)
7153 if (GET_CODE (ftmp->insn) != BARRIER
7154 && ((ftmp->minipool = add_minipool_backward_ref (ftmp))
7155 == NULL))
7156 break;
7158 ftmp = ftmp->next;
7161 /* Scan over the fixes we have identified for this pool, fixing them
7162 up and adding the constants to the pool itself. */
7163 for (this_fix = fix; this_fix && ftmp != this_fix;
7164 this_fix = this_fix->next)
7165 if (GET_CODE (this_fix->insn) != BARRIER)
7167 rtx addr
7168 = plus_constant (gen_rtx_LABEL_REF (VOIDmode,
7169 minipool_vector_label),
7170 this_fix->minipool->offset);
7171 *this_fix->loc = gen_rtx_MEM (this_fix->mode, addr);
7174 dump_minipool (last_barrier->insn);
7175 fix = ftmp;
7178 /* From now on we must synthesize any constants that we can't handle
7179 directly. This can happen if the RTL gets split during final
7180 instruction generation. */
7181 after_arm_reorg = 1;
7183 /* Free the minipool memory. */
7184 obstack_free (&minipool_obstack, minipool_startobj);
7187 /* Routines to output assembly language. */
7189 /* If the rtx is the correct value then return the string of the number.
7190 In this way we can ensure that valid double constants are generated even
7191 when cross compiling. */
7192 const char *
7193 fp_immediate_constant (rtx x)
7195 REAL_VALUE_TYPE r;
7196 int i;
7198 if (!fpa_consts_inited)
7199 init_fpa_table ();
7201 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
7202 for (i = 0; i < 8; i++)
7203 if (REAL_VALUES_EQUAL (r, values_fpa[i]))
7204 return strings_fpa[i];
7206 abort ();
7209 /* As for fp_immediate_constant, but value is passed directly, not in rtx. */
7210 static const char *
7211 fp_const_from_val (REAL_VALUE_TYPE *r)
7213 int i;
7215 if (!fpa_consts_inited)
7216 init_fpa_table ();
7218 for (i = 0; i < 8; i++)
7219 if (REAL_VALUES_EQUAL (*r, values_fpa[i]))
7220 return strings_fpa[i];
7222 abort ();
7225 /* Output the operands of a LDM/STM instruction to STREAM.
7226 MASK is the ARM register set mask of which only bits 0-15 are important.
7227 REG is the base register, either the frame pointer or the stack pointer,
7228 INSTR is the possibly suffixed load or store instruction. */
7229 static void
7230 print_multi_reg (FILE *stream, const char *instr, int reg, int mask)
7232 int i;
7233 int not_first = FALSE;
7235 fputc ('\t', stream);
7236 asm_fprintf (stream, instr, reg);
7237 fputs (", {", stream);
7239 for (i = 0; i <= LAST_ARM_REGNUM; i++)
7240 if (mask & (1 << i))
7242 if (not_first)
7243 fprintf (stream, ", ");
7245 asm_fprintf (stream, "%r", i);
7246 not_first = TRUE;
7249 fprintf (stream, "}");
7251 /* Add a ^ character for the 26-bit ABI, but only if we were loading
7252 the PC. Otherwise we would generate an UNPREDICTABLE instruction.
7253 Strictly speaking the instruction would be unpredicatble only if
7254 we were writing back the base register as well, but since we never
7255 want to generate an LDM type 2 instruction (register bank switching)
7256 which is what you get if the PC is not being loaded, we do not need
7257 to check for writeback. */
7258 if (! TARGET_APCS_32
7259 && ((mask & (1 << PC_REGNUM)) != 0))
7260 fprintf (stream, "^");
7262 fprintf (stream, "\n");
7265 /* Output a 'call' insn. */
7266 const char *
7267 output_call (rtx *operands)
7269 /* Handle calls to lr using ip (which may be clobbered in subr anyway). */
7271 if (REGNO (operands[0]) == LR_REGNUM)
7273 operands[0] = gen_rtx_REG (SImode, IP_REGNUM);
7274 output_asm_insn ("mov%?\t%0, %|lr", operands);
7277 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
7279 if (TARGET_INTERWORK)
7280 output_asm_insn ("bx%?\t%0", operands);
7281 else
7282 output_asm_insn ("mov%?\t%|pc, %0", operands);
7284 return "";
7287 /* Output a 'call' insn that is a reference in memory. */
7288 const char *
7289 output_call_mem (rtx *operands)
7291 if (TARGET_INTERWORK)
7293 output_asm_insn ("ldr%?\t%|ip, %0", operands);
7294 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
7295 output_asm_insn ("bx%?\t%|ip", operands);
7297 else if (regno_use_in (LR_REGNUM, operands[0]))
7299 /* LR is used in the memory address. We load the address in the
7300 first instruction. It's safe to use IP as the target of the
7301 load since the call will kill it anyway. */
7302 output_asm_insn ("ldr%?\t%|ip, %0", operands);
7303 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
7304 output_asm_insn ("mov%?\t%|pc, %|ip", operands);
7306 else
7308 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
7309 output_asm_insn ("ldr%?\t%|pc, %0", operands);
7312 return "";
7316 /* Output a move from arm registers to an fpa registers.
7317 OPERANDS[0] is an fpa register.
7318 OPERANDS[1] is the first registers of an arm register pair. */
7319 const char *
7320 output_mov_long_double_fpa_from_arm (rtx *operands)
7322 int arm_reg0 = REGNO (operands[1]);
7323 rtx ops[3];
7325 if (arm_reg0 == IP_REGNUM)
7326 abort ();
7328 ops[0] = gen_rtx_REG (SImode, arm_reg0);
7329 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
7330 ops[2] = gen_rtx_REG (SImode, 2 + arm_reg0);
7332 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1, %2}", ops);
7333 output_asm_insn ("ldf%?e\t%0, [%|sp], #12", operands);
7335 return "";
7338 /* Output a move from an fpa register to arm registers.
7339 OPERANDS[0] is the first registers of an arm register pair.
7340 OPERANDS[1] is an fpa register. */
7341 const char *
7342 output_mov_long_double_arm_from_fpa (rtx *operands)
7344 int arm_reg0 = REGNO (operands[0]);
7345 rtx ops[3];
7347 if (arm_reg0 == IP_REGNUM)
7348 abort ();
7350 ops[0] = gen_rtx_REG (SImode, arm_reg0);
7351 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
7352 ops[2] = gen_rtx_REG (SImode, 2 + arm_reg0);
7354 output_asm_insn ("stf%?e\t%1, [%|sp, #-12]!", operands);
7355 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1, %2}", ops);
7356 return "";
7359 /* Output a move from arm registers to arm registers of a long double
7360 OPERANDS[0] is the destination.
7361 OPERANDS[1] is the source. */
7362 const char *
7363 output_mov_long_double_arm_from_arm (rtx *operands)
7365 /* We have to be careful here because the two might overlap. */
7366 int dest_start = REGNO (operands[0]);
7367 int src_start = REGNO (operands[1]);
7368 rtx ops[2];
7369 int i;
7371 if (dest_start < src_start)
7373 for (i = 0; i < 3; i++)
7375 ops[0] = gen_rtx_REG (SImode, dest_start + i);
7376 ops[1] = gen_rtx_REG (SImode, src_start + i);
7377 output_asm_insn ("mov%?\t%0, %1", ops);
7380 else
7382 for (i = 2; i >= 0; i--)
7384 ops[0] = gen_rtx_REG (SImode, dest_start + i);
7385 ops[1] = gen_rtx_REG (SImode, src_start + i);
7386 output_asm_insn ("mov%?\t%0, %1", ops);
7390 return "";
7394 /* Output a move from arm registers to an fpa registers.
7395 OPERANDS[0] is an fpa register.
7396 OPERANDS[1] is the first registers of an arm register pair. */
7397 const char *
7398 output_mov_double_fpa_from_arm (rtx *operands)
7400 int arm_reg0 = REGNO (operands[1]);
7401 rtx ops[2];
7403 if (arm_reg0 == IP_REGNUM)
7404 abort ();
7406 ops[0] = gen_rtx_REG (SImode, arm_reg0);
7407 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
7408 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1}", ops);
7409 output_asm_insn ("ldf%?d\t%0, [%|sp], #8", operands);
7410 return "";
7413 /* Output a move from an fpa register to arm registers.
7414 OPERANDS[0] is the first registers of an arm register pair.
7415 OPERANDS[1] is an fpa register. */
7416 const char *
7417 output_mov_double_arm_from_fpa (rtx *operands)
7419 int arm_reg0 = REGNO (operands[0]);
7420 rtx ops[2];
7422 if (arm_reg0 == IP_REGNUM)
7423 abort ();
7425 ops[0] = gen_rtx_REG (SImode, arm_reg0);
7426 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
7427 output_asm_insn ("stf%?d\t%1, [%|sp, #-8]!", operands);
7428 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1}", ops);
7429 return "";
7432 /* Output a move between double words.
7433 It must be REG<-REG, REG<-CONST_DOUBLE, REG<-CONST_INT, REG<-MEM
7434 or MEM<-REG and all MEMs must be offsettable addresses. */
7435 const char *
7436 output_move_double (rtx *operands)
7438 enum rtx_code code0 = GET_CODE (operands[0]);
7439 enum rtx_code code1 = GET_CODE (operands[1]);
7440 rtx otherops[3];
7442 if (code0 == REG)
7444 int reg0 = REGNO (operands[0]);
7446 otherops[0] = gen_rtx_REG (SImode, 1 + reg0);
7448 if (code1 == REG)
7450 int reg1 = REGNO (operands[1]);
7451 if (reg1 == IP_REGNUM)
7452 abort ();
7454 /* Ensure the second source is not overwritten. */
7455 if (reg1 == reg0 + (WORDS_BIG_ENDIAN ? -1 : 1))
7456 output_asm_insn ("mov%?\t%Q0, %Q1\n\tmov%?\t%R0, %R1", operands);
7457 else
7458 output_asm_insn ("mov%?\t%R0, %R1\n\tmov%?\t%Q0, %Q1", operands);
7460 else if (code1 == CONST_VECTOR)
7462 HOST_WIDE_INT hint = 0;
7464 switch (GET_MODE (operands[1]))
7466 case V2SImode:
7467 otherops[1] = GEN_INT (INTVAL (CONST_VECTOR_ELT (operands[1], 1)));
7468 operands[1] = GEN_INT (INTVAL (CONST_VECTOR_ELT (operands[1], 0)));
7469 break;
7471 case V4HImode:
7472 if (BYTES_BIG_ENDIAN)
7474 hint = INTVAL (CONST_VECTOR_ELT (operands[1], 2));
7475 hint <<= 16;
7476 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 3));
7478 else
7480 hint = INTVAL (CONST_VECTOR_ELT (operands[1], 3));
7481 hint <<= 16;
7482 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 2));
7485 otherops[1] = GEN_INT (hint);
7486 hint = 0;
7488 if (BYTES_BIG_ENDIAN)
7490 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 0));
7491 hint <<= 16;
7492 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 1));
7494 else
7496 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 1));
7497 hint <<= 16;
7498 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 0));
7501 operands[1] = GEN_INT (hint);
7502 break;
7504 case V8QImode:
7505 if (BYTES_BIG_ENDIAN)
7507 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 4));
7508 hint <<= 8;
7509 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 5));
7510 hint <<= 8;
7511 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 6));
7512 hint <<= 8;
7513 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 7));
7515 else
7517 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 7));
7518 hint <<= 8;
7519 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 6));
7520 hint <<= 8;
7521 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 5));
7522 hint <<= 8;
7523 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 4));
7526 otherops[1] = GEN_INT (hint);
7527 hint = 0;
7529 if (BYTES_BIG_ENDIAN)
7531 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 0));
7532 hint <<= 8;
7533 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 1));
7534 hint <<= 8;
7535 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 2));
7536 hint <<= 8;
7537 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 3));
7539 else
7541 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 3));
7542 hint <<= 8;
7543 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 2));
7544 hint <<= 8;
7545 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 1));
7546 hint <<= 8;
7547 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 0));
7550 operands[1] = GEN_INT (hint);
7551 break;
7553 default:
7554 abort ();
7556 output_mov_immediate (operands);
7557 output_mov_immediate (otherops);
7559 else if (code1 == CONST_DOUBLE)
7561 if (GET_MODE (operands[1]) == DFmode)
7563 REAL_VALUE_TYPE r;
7564 long l[2];
7566 REAL_VALUE_FROM_CONST_DOUBLE (r, operands[1]);
7567 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
7568 otherops[1] = GEN_INT (l[1]);
7569 operands[1] = GEN_INT (l[0]);
7571 else if (GET_MODE (operands[1]) != VOIDmode)
7572 abort ();
7573 else if (WORDS_BIG_ENDIAN)
7575 otherops[1] = GEN_INT (CONST_DOUBLE_LOW (operands[1]));
7576 operands[1] = GEN_INT (CONST_DOUBLE_HIGH (operands[1]));
7578 else
7580 otherops[1] = GEN_INT (CONST_DOUBLE_HIGH (operands[1]));
7581 operands[1] = GEN_INT (CONST_DOUBLE_LOW (operands[1]));
7584 output_mov_immediate (operands);
7585 output_mov_immediate (otherops);
7587 else if (code1 == CONST_INT)
7589 #if HOST_BITS_PER_WIDE_INT > 32
7590 /* If HOST_WIDE_INT is more than 32 bits, the intval tells us
7591 what the upper word is. */
7592 if (WORDS_BIG_ENDIAN)
7594 otherops[1] = GEN_INT (ARM_SIGN_EXTEND (INTVAL (operands[1])));
7595 operands[1] = GEN_INT (INTVAL (operands[1]) >> 32);
7597 else
7599 otherops[1] = GEN_INT (INTVAL (operands[1]) >> 32);
7600 operands[1] = GEN_INT (ARM_SIGN_EXTEND (INTVAL (operands[1])));
7602 #else
7603 /* Sign extend the intval into the high-order word. */
7604 if (WORDS_BIG_ENDIAN)
7606 otherops[1] = operands[1];
7607 operands[1] = (INTVAL (operands[1]) < 0
7608 ? constm1_rtx : const0_rtx);
7610 else
7611 otherops[1] = INTVAL (operands[1]) < 0 ? constm1_rtx : const0_rtx;
7612 #endif
7613 output_mov_immediate (otherops);
7614 output_mov_immediate (operands);
7616 else if (code1 == MEM)
7618 switch (GET_CODE (XEXP (operands[1], 0)))
7620 case REG:
7621 output_asm_insn ("ldm%?ia\t%m1, %M0", operands);
7622 break;
7624 case PRE_INC:
7625 abort (); /* Should never happen now. */
7626 break;
7628 case PRE_DEC:
7629 output_asm_insn ("ldm%?db\t%m1!, %M0", operands);
7630 break;
7632 case POST_INC:
7633 output_asm_insn ("ldm%?ia\t%m1!, %M0", operands);
7634 break;
7636 case POST_DEC:
7637 abort (); /* Should never happen now. */
7638 break;
7640 case LABEL_REF:
7641 case CONST:
7642 output_asm_insn ("adr%?\t%0, %1", operands);
7643 output_asm_insn ("ldm%?ia\t%0, %M0", operands);
7644 break;
7646 default:
7647 if (arm_add_operand (XEXP (XEXP (operands[1], 0), 1),
7648 GET_MODE (XEXP (XEXP (operands[1], 0), 1))))
7650 otherops[0] = operands[0];
7651 otherops[1] = XEXP (XEXP (operands[1], 0), 0);
7652 otherops[2] = XEXP (XEXP (operands[1], 0), 1);
7654 if (GET_CODE (XEXP (operands[1], 0)) == PLUS)
7656 if (GET_CODE (otherops[2]) == CONST_INT)
7658 switch ((int) INTVAL (otherops[2]))
7660 case -8:
7661 output_asm_insn ("ldm%?db\t%1, %M0", otherops);
7662 return "";
7663 case -4:
7664 output_asm_insn ("ldm%?da\t%1, %M0", otherops);
7665 return "";
7666 case 4:
7667 output_asm_insn ("ldm%?ib\t%1, %M0", otherops);
7668 return "";
7671 if (!(const_ok_for_arm (INTVAL (otherops[2]))))
7672 output_asm_insn ("sub%?\t%0, %1, #%n2", otherops);
7673 else
7674 output_asm_insn ("add%?\t%0, %1, %2", otherops);
7676 else
7677 output_asm_insn ("add%?\t%0, %1, %2", otherops);
7679 else
7680 output_asm_insn ("sub%?\t%0, %1, %2", otherops);
7682 return "ldm%?ia\t%0, %M0";
7684 else
7686 otherops[1] = adjust_address (operands[1], SImode, 4);
7687 /* Take care of overlapping base/data reg. */
7688 if (reg_mentioned_p (operands[0], operands[1]))
7690 output_asm_insn ("ldr%?\t%0, %1", otherops);
7691 output_asm_insn ("ldr%?\t%0, %1", operands);
7693 else
7695 output_asm_insn ("ldr%?\t%0, %1", operands);
7696 output_asm_insn ("ldr%?\t%0, %1", otherops);
7701 else
7702 abort (); /* Constraints should prevent this. */
7704 else if (code0 == MEM && code1 == REG)
7706 if (REGNO (operands[1]) == IP_REGNUM)
7707 abort ();
7709 switch (GET_CODE (XEXP (operands[0], 0)))
7711 case REG:
7712 output_asm_insn ("stm%?ia\t%m0, %M1", operands);
7713 break;
7715 case PRE_INC:
7716 abort (); /* Should never happen now. */
7717 break;
7719 case PRE_DEC:
7720 output_asm_insn ("stm%?db\t%m0!, %M1", operands);
7721 break;
7723 case POST_INC:
7724 output_asm_insn ("stm%?ia\t%m0!, %M1", operands);
7725 break;
7727 case POST_DEC:
7728 abort (); /* Should never happen now. */
7729 break;
7731 case PLUS:
7732 if (GET_CODE (XEXP (XEXP (operands[0], 0), 1)) == CONST_INT)
7734 switch ((int) INTVAL (XEXP (XEXP (operands[0], 0), 1)))
7736 case -8:
7737 output_asm_insn ("stm%?db\t%m0, %M1", operands);
7738 return "";
7740 case -4:
7741 output_asm_insn ("stm%?da\t%m0, %M1", operands);
7742 return "";
7744 case 4:
7745 output_asm_insn ("stm%?ib\t%m0, %M1", operands);
7746 return "";
7749 /* Fall through */
7751 default:
7752 otherops[0] = adjust_address (operands[0], SImode, 4);
7753 otherops[1] = gen_rtx_REG (SImode, 1 + REGNO (operands[1]));
7754 output_asm_insn ("str%?\t%1, %0", operands);
7755 output_asm_insn ("str%?\t%1, %0", otherops);
7758 else
7759 /* Constraints should prevent this. */
7760 abort ();
7762 return "";
7766 /* Output an arbitrary MOV reg, #n.
7767 OPERANDS[0] is a register. OPERANDS[1] is a const_int. */
7768 const char *
7769 output_mov_immediate (rtx *operands)
7771 HOST_WIDE_INT n = INTVAL (operands[1]);
7773 /* Try to use one MOV. */
7774 if (const_ok_for_arm (n))
7775 output_asm_insn ("mov%?\t%0, %1", operands);
7777 /* Try to use one MVN. */
7778 else if (const_ok_for_arm (~n))
7780 operands[1] = GEN_INT (~n);
7781 output_asm_insn ("mvn%?\t%0, %1", operands);
7783 else
7785 int n_ones = 0;
7786 int i;
7788 /* If all else fails, make it out of ORRs or BICs as appropriate. */
7789 for (i = 0; i < 32; i++)
7790 if (n & 1 << i)
7791 n_ones++;
7793 if (n_ones > 16) /* Shorter to use MVN with BIC in this case. */
7794 output_multi_immediate (operands, "mvn%?\t%0, %1", "bic%?\t%0, %0, %1", 1, ~ n);
7795 else
7796 output_multi_immediate (operands, "mov%?\t%0, %1", "orr%?\t%0, %0, %1", 1, n);
7799 return "";
7802 /* Output an ADD r, s, #n where n may be too big for one instruction.
7803 If adding zero to one register, output nothing. */
7804 const char *
7805 output_add_immediate (rtx *operands)
7807 HOST_WIDE_INT n = INTVAL (operands[2]);
7809 if (n != 0 || REGNO (operands[0]) != REGNO (operands[1]))
7811 if (n < 0)
7812 output_multi_immediate (operands,
7813 "sub%?\t%0, %1, %2", "sub%?\t%0, %0, %2", 2,
7814 -n);
7815 else
7816 output_multi_immediate (operands,
7817 "add%?\t%0, %1, %2", "add%?\t%0, %0, %2", 2,
7821 return "";
7824 /* Output a multiple immediate operation.
7825 OPERANDS is the vector of operands referred to in the output patterns.
7826 INSTR1 is the output pattern to use for the first constant.
7827 INSTR2 is the output pattern to use for subsequent constants.
7828 IMMED_OP is the index of the constant slot in OPERANDS.
7829 N is the constant value. */
7830 static const char *
7831 output_multi_immediate (rtx *operands, const char *instr1, const char *instr2,
7832 int immed_op, HOST_WIDE_INT n)
7834 #if HOST_BITS_PER_WIDE_INT > 32
7835 n &= 0xffffffff;
7836 #endif
7838 if (n == 0)
7840 /* Quick and easy output. */
7841 operands[immed_op] = const0_rtx;
7842 output_asm_insn (instr1, operands);
7844 else
7846 int i;
7847 const char * instr = instr1;
7849 /* Note that n is never zero here (which would give no output). */
7850 for (i = 0; i < 32; i += 2)
7852 if (n & (3 << i))
7854 operands[immed_op] = GEN_INT (n & (255 << i));
7855 output_asm_insn (instr, operands);
7856 instr = instr2;
7857 i += 6;
7862 return "";
7865 /* Return the appropriate ARM instruction for the operation code.
7866 The returned result should not be overwritten. OP is the rtx of the
7867 operation. SHIFT_FIRST_ARG is TRUE if the first argument of the operator
7868 was shifted. */
7869 const char *
7870 arithmetic_instr (rtx op, int shift_first_arg)
7872 switch (GET_CODE (op))
7874 case PLUS:
7875 return "add";
7877 case MINUS:
7878 return shift_first_arg ? "rsb" : "sub";
7880 case IOR:
7881 return "orr";
7883 case XOR:
7884 return "eor";
7886 case AND:
7887 return "and";
7889 default:
7890 abort ();
7894 /* Ensure valid constant shifts and return the appropriate shift mnemonic
7895 for the operation code. The returned result should not be overwritten.
7896 OP is the rtx code of the shift.
7897 On exit, *AMOUNTP will be -1 if the shift is by a register, or a constant
7898 shift. */
7899 static const char *
7900 shift_op (rtx op, HOST_WIDE_INT *amountp)
7902 const char * mnem;
7903 enum rtx_code code = GET_CODE (op);
7905 if (GET_CODE (XEXP (op, 1)) == REG || GET_CODE (XEXP (op, 1)) == SUBREG)
7906 *amountp = -1;
7907 else if (GET_CODE (XEXP (op, 1)) == CONST_INT)
7908 *amountp = INTVAL (XEXP (op, 1));
7909 else
7910 abort ();
7912 switch (code)
7914 case ASHIFT:
7915 mnem = "asl";
7916 break;
7918 case ASHIFTRT:
7919 mnem = "asr";
7920 break;
7922 case LSHIFTRT:
7923 mnem = "lsr";
7924 break;
7926 case ROTATERT:
7927 mnem = "ror";
7928 break;
7930 case MULT:
7931 /* We never have to worry about the amount being other than a
7932 power of 2, since this case can never be reloaded from a reg. */
7933 if (*amountp != -1)
7934 *amountp = int_log2 (*amountp);
7935 else
7936 abort ();
7937 return "asl";
7939 default:
7940 abort ();
7943 if (*amountp != -1)
7945 /* This is not 100% correct, but follows from the desire to merge
7946 multiplication by a power of 2 with the recognizer for a
7947 shift. >=32 is not a valid shift for "asl", so we must try and
7948 output a shift that produces the correct arithmetical result.
7949 Using lsr #32 is identical except for the fact that the carry bit
7950 is not set correctly if we set the flags; but we never use the
7951 carry bit from such an operation, so we can ignore that. */
7952 if (code == ROTATERT)
7953 /* Rotate is just modulo 32. */
7954 *amountp &= 31;
7955 else if (*amountp != (*amountp & 31))
7957 if (code == ASHIFT)
7958 mnem = "lsr";
7959 *amountp = 32;
7962 /* Shifts of 0 are no-ops. */
7963 if (*amountp == 0)
7964 return NULL;
7967 return mnem;
7970 /* Obtain the shift from the POWER of two. */
7972 static HOST_WIDE_INT
7973 int_log2 (HOST_WIDE_INT power)
7975 HOST_WIDE_INT shift = 0;
7977 while ((((HOST_WIDE_INT) 1 << shift) & power) == 0)
7979 if (shift > 31)
7980 abort ();
7981 shift++;
7984 return shift;
7987 /* Output a .ascii pseudo-op, keeping track of lengths. This is because
7988 /bin/as is horribly restrictive. */
7989 #define MAX_ASCII_LEN 51
7991 void
7992 output_ascii_pseudo_op (FILE *stream, const unsigned char *p, int len)
7994 int i;
7995 int len_so_far = 0;
7997 fputs ("\t.ascii\t\"", stream);
7999 for (i = 0; i < len; i++)
8001 int c = p[i];
8003 if (len_so_far >= MAX_ASCII_LEN)
8005 fputs ("\"\n\t.ascii\t\"", stream);
8006 len_so_far = 0;
8009 switch (c)
8011 case TARGET_TAB:
8012 fputs ("\\t", stream);
8013 len_so_far += 2;
8014 break;
8016 case TARGET_FF:
8017 fputs ("\\f", stream);
8018 len_so_far += 2;
8019 break;
8021 case TARGET_BS:
8022 fputs ("\\b", stream);
8023 len_so_far += 2;
8024 break;
8026 case TARGET_CR:
8027 fputs ("\\r", stream);
8028 len_so_far += 2;
8029 break;
8031 case TARGET_NEWLINE:
8032 fputs ("\\n", stream);
8033 c = p [i + 1];
8034 if ((c >= ' ' && c <= '~')
8035 || c == TARGET_TAB)
8036 /* This is a good place for a line break. */
8037 len_so_far = MAX_ASCII_LEN;
8038 else
8039 len_so_far += 2;
8040 break;
8042 case '\"':
8043 case '\\':
8044 putc ('\\', stream);
8045 len_so_far++;
8046 /* Drop through. */
8048 default:
8049 if (c >= ' ' && c <= '~')
8051 putc (c, stream);
8052 len_so_far++;
8054 else
8056 fprintf (stream, "\\%03o", c);
8057 len_so_far += 4;
8059 break;
8063 fputs ("\"\n", stream);
8066 /* Compute the register sabe mask for registers 0 through 12
8067 inclusive. This code is used by both arm_compute_save_reg_mask
8068 and arm_compute_initial_elimination_offset. */
8069 static unsigned long
8070 arm_compute_save_reg0_reg12_mask (void)
8072 unsigned long func_type = arm_current_func_type ();
8073 unsigned int save_reg_mask = 0;
8074 unsigned int reg;
8076 if (IS_INTERRUPT (func_type))
8078 unsigned int max_reg;
8079 /* Interrupt functions must not corrupt any registers,
8080 even call clobbered ones. If this is a leaf function
8081 we can just examine the registers used by the RTL, but
8082 otherwise we have to assume that whatever function is
8083 called might clobber anything, and so we have to save
8084 all the call-clobbered registers as well. */
8085 if (ARM_FUNC_TYPE (func_type) == ARM_FT_FIQ)
8086 /* FIQ handlers have registers r8 - r12 banked, so
8087 we only need to check r0 - r7, Normal ISRs only
8088 bank r14 and r15, so we must check up to r12.
8089 r13 is the stack pointer which is always preserved,
8090 so we do not need to consider it here. */
8091 max_reg = 7;
8092 else
8093 max_reg = 12;
8095 for (reg = 0; reg <= max_reg; reg++)
8096 if (regs_ever_live[reg]
8097 || (! current_function_is_leaf && call_used_regs [reg]))
8098 save_reg_mask |= (1 << reg);
8100 else
8102 /* In the normal case we only need to save those registers
8103 which are call saved and which are used by this function. */
8104 for (reg = 0; reg <= 10; reg++)
8105 if (regs_ever_live[reg] && ! call_used_regs [reg])
8106 save_reg_mask |= (1 << reg);
8108 /* Handle the frame pointer as a special case. */
8109 if (! TARGET_APCS_FRAME
8110 && ! frame_pointer_needed
8111 && regs_ever_live[HARD_FRAME_POINTER_REGNUM]
8112 && ! call_used_regs[HARD_FRAME_POINTER_REGNUM])
8113 save_reg_mask |= 1 << HARD_FRAME_POINTER_REGNUM;
8115 /* If we aren't loading the PIC register,
8116 don't stack it even though it may be live. */
8117 if (flag_pic
8118 && ! TARGET_SINGLE_PIC_BASE
8119 && regs_ever_live[PIC_OFFSET_TABLE_REGNUM])
8120 save_reg_mask |= 1 << PIC_OFFSET_TABLE_REGNUM;
8123 return save_reg_mask;
8126 /* Compute a bit mask of which registers need to be
8127 saved on the stack for the current function. */
8129 static unsigned long
8130 arm_compute_save_reg_mask (void)
8132 unsigned int save_reg_mask = 0;
8133 unsigned long func_type = arm_current_func_type ();
8135 if (IS_NAKED (func_type))
8136 /* This should never really happen. */
8137 return 0;
8139 /* If we are creating a stack frame, then we must save the frame pointer,
8140 IP (which will hold the old stack pointer), LR and the PC. */
8141 if (frame_pointer_needed)
8142 save_reg_mask |=
8143 (1 << ARM_HARD_FRAME_POINTER_REGNUM)
8144 | (1 << IP_REGNUM)
8145 | (1 << LR_REGNUM)
8146 | (1 << PC_REGNUM);
8148 /* Volatile functions do not return, so there
8149 is no need to save any other registers. */
8150 if (IS_VOLATILE (func_type))
8151 return save_reg_mask;
8153 save_reg_mask |= arm_compute_save_reg0_reg12_mask ();
8155 /* Decide if we need to save the link register.
8156 Interrupt routines have their own banked link register,
8157 so they never need to save it.
8158 Otherwise if we do not use the link register we do not need to save
8159 it. If we are pushing other registers onto the stack however, we
8160 can save an instruction in the epilogue by pushing the link register
8161 now and then popping it back into the PC. This incurs extra memory
8162 accesses though, so we only do it when optimizing for size, and only
8163 if we know that we will not need a fancy return sequence. */
8164 if (regs_ever_live [LR_REGNUM]
8165 || (save_reg_mask
8166 && optimize_size
8167 && ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL))
8168 save_reg_mask |= 1 << LR_REGNUM;
8170 if (cfun->machine->lr_save_eliminated)
8171 save_reg_mask &= ~ (1 << LR_REGNUM);
8173 if (TARGET_REALLY_IWMMXT
8174 && ((bit_count (save_reg_mask)
8175 + ARM_NUM_INTS (current_function_pretend_args_size)) % 2) != 0)
8177 unsigned int reg;
8179 /* The total number of registers that are going to be pushed
8180 onto the stack is odd. We need to ensure that the stack
8181 is 64-bit aligned before we start to save iWMMXt registers,
8182 and also before we start to create locals. (A local variable
8183 might be a double or long long which we will load/store using
8184 an iWMMXt instruction). Therefore we need to push another
8185 ARM register, so that the stack will be 64-bit aligned. We
8186 try to avoid using the arg registers (r0 -r3) as they might be
8187 used to pass values in a tail call. */
8188 for (reg = 4; reg <= 12; reg++)
8189 if ((save_reg_mask & (1 << reg)) == 0)
8190 break;
8192 if (reg <= 12)
8193 save_reg_mask |= (1 << reg);
8194 else
8196 cfun->machine->sibcall_blocked = 1;
8197 save_reg_mask |= (1 << 3);
8201 return save_reg_mask;
8204 /* Generate a function exit sequence. If REALLY_RETURN is false, then do
8205 everything bar the final return instruction. */
8206 const char *
8207 output_return_instruction (rtx operand, int really_return, int reverse)
8209 char conditional[10];
8210 char instr[100];
8211 int reg;
8212 unsigned long live_regs_mask;
8213 unsigned long func_type;
8215 func_type = arm_current_func_type ();
8217 if (IS_NAKED (func_type))
8218 return "";
8220 if (IS_VOLATILE (func_type) && TARGET_ABORT_NORETURN)
8222 /* If this function was declared non-returning, and we have
8223 found a tail call, then we have to trust that the called
8224 function won't return. */
8225 if (really_return)
8227 rtx ops[2];
8229 /* Otherwise, trap an attempted return by aborting. */
8230 ops[0] = operand;
8231 ops[1] = gen_rtx_SYMBOL_REF (Pmode, NEED_PLT_RELOC ? "abort(PLT)"
8232 : "abort");
8233 assemble_external_libcall (ops[1]);
8234 output_asm_insn (reverse ? "bl%D0\t%a1" : "bl%d0\t%a1", ops);
8237 return "";
8240 if (current_function_calls_alloca && !really_return)
8241 abort ();
8243 sprintf (conditional, "%%?%%%c0", reverse ? 'D' : 'd');
8245 return_used_this_function = 1;
8247 live_regs_mask = arm_compute_save_reg_mask ();
8249 if (live_regs_mask)
8251 const char * return_reg;
8253 /* If we do not have any special requirements for function exit
8254 (eg interworking, or ISR) then we can load the return address
8255 directly into the PC. Otherwise we must load it into LR. */
8256 if (really_return
8257 && ! TARGET_INTERWORK)
8258 return_reg = reg_names[PC_REGNUM];
8259 else
8260 return_reg = reg_names[LR_REGNUM];
8262 if ((live_regs_mask & (1 << IP_REGNUM)) == (1 << IP_REGNUM))
8264 /* There are three possible reasons for the IP register
8265 being saved. 1) a stack frame was created, in which case
8266 IP contains the old stack pointer, or 2) an ISR routine
8267 corrupted it, or 3) it was saved to align the stack on
8268 iWMMXt. In case 1, restore IP into SP, otherwise just
8269 restore IP. */
8270 if (frame_pointer_needed)
8272 live_regs_mask &= ~ (1 << IP_REGNUM);
8273 live_regs_mask |= (1 << SP_REGNUM);
8275 else
8277 if (! IS_INTERRUPT (func_type)
8278 && ! TARGET_REALLY_IWMMXT)
8279 abort ();
8283 /* On some ARM architectures it is faster to use LDR rather than
8284 LDM to load a single register. On other architectures, the
8285 cost is the same. In 26 bit mode, or for exception handlers,
8286 we have to use LDM to load the PC so that the CPSR is also
8287 restored. */
8288 for (reg = 0; reg <= LAST_ARM_REGNUM; reg++)
8290 if (live_regs_mask == (unsigned int)(1 << reg))
8291 break;
8293 if (reg <= LAST_ARM_REGNUM
8294 && (reg != LR_REGNUM
8295 || ! really_return
8296 || (TARGET_APCS_32 && ! IS_INTERRUPT (func_type))))
8298 sprintf (instr, "ldr%s\t%%|%s, [%%|sp], #4", conditional,
8299 (reg == LR_REGNUM) ? return_reg : reg_names[reg]);
8301 else
8303 char *p;
8304 int first = 1;
8306 /* Generate the load multiple instruction to restore the
8307 registers. Note we can get here, even if
8308 frame_pointer_needed is true, but only if sp already
8309 points to the base of the saved core registers. */
8310 if (live_regs_mask & (1 << SP_REGNUM))
8312 unsigned HOST_WIDE_INT stack_adjust =
8313 arm_get_frame_size () + current_function_outgoing_args_size;
8315 if (stack_adjust != 0 && stack_adjust != 4)
8316 abort ();
8318 if (stack_adjust && arm_arch5)
8319 sprintf (instr, "ldm%sib\t%%|sp, {", conditional);
8320 else
8322 /* If we can't use ldmib (SA110 bug), then try to pop r3
8323 instead. */
8324 if (stack_adjust)
8325 live_regs_mask |= 1 << 3;
8326 sprintf (instr, "ldm%sfd\t%%|sp, {", conditional);
8329 else
8330 sprintf (instr, "ldm%sfd\t%%|sp!, {", conditional);
8332 p = instr + strlen (instr);
8334 for (reg = 0; reg <= SP_REGNUM; reg++)
8335 if (live_regs_mask & (1 << reg))
8337 int l = strlen (reg_names[reg]);
8339 if (first)
8340 first = 0;
8341 else
8343 memcpy (p, ", ", 2);
8344 p += 2;
8347 memcpy (p, "%|", 2);
8348 memcpy (p + 2, reg_names[reg], l);
8349 p += l + 2;
8352 if (live_regs_mask & (1 << LR_REGNUM))
8354 sprintf (p, "%s%%|%s}", first ? "" : ", ", return_reg);
8355 /* Decide if we need to add the ^ symbol to the end of the
8356 register list. This causes the saved condition codes
8357 register to be copied into the current condition codes
8358 register. We do the copy if we are conforming to the 32-bit
8359 ABI and this is an interrupt function, or if we are
8360 conforming to the 26-bit ABI. There is a special case for
8361 the 26-bit ABI however, which is if we are writing back the
8362 stack pointer but not loading the PC. In this case adding
8363 the ^ symbol would create a type 2 LDM instruction, where
8364 writeback is UNPREDICTABLE. We are safe in leaving the ^
8365 character off in this case however, since the actual return
8366 instruction will be a MOVS which will restore the CPSR. */
8367 if ((TARGET_APCS_32 && IS_INTERRUPT (func_type))
8368 || (! TARGET_APCS_32 && really_return))
8369 strcat (p, "^");
8371 else
8372 strcpy (p, "}");
8375 output_asm_insn (instr, & operand);
8377 /* See if we need to generate an extra instruction to
8378 perform the actual function return. */
8379 if (really_return
8380 && func_type != ARM_FT_INTERWORKED
8381 && (live_regs_mask & (1 << LR_REGNUM)) != 0)
8383 /* The return has already been handled
8384 by loading the LR into the PC. */
8385 really_return = 0;
8389 if (really_return)
8391 switch ((int) ARM_FUNC_TYPE (func_type))
8393 case ARM_FT_ISR:
8394 case ARM_FT_FIQ:
8395 sprintf (instr, "sub%ss\t%%|pc, %%|lr, #4", conditional);
8396 break;
8398 case ARM_FT_INTERWORKED:
8399 sprintf (instr, "bx%s\t%%|lr", conditional);
8400 break;
8402 case ARM_FT_EXCEPTION:
8403 sprintf (instr, "mov%ss\t%%|pc, %%|lr", conditional);
8404 break;
8406 default:
8407 /* ARMv5 implementations always provide BX, so interworking
8408 is the default unless APCS-26 is in use. */
8409 if ((insn_flags & FL_ARCH5) != 0 && TARGET_APCS_32)
8410 sprintf (instr, "bx%s\t%%|lr", conditional);
8411 else
8412 sprintf (instr, "mov%s%s\t%%|pc, %%|lr",
8413 conditional, TARGET_APCS_32 ? "" : "s");
8414 break;
8417 output_asm_insn (instr, & operand);
8420 return "";
8423 /* Write the function name into the code section, directly preceding
8424 the function prologue.
8426 Code will be output similar to this:
8428 .ascii "arm_poke_function_name", 0
8429 .align
8431 .word 0xff000000 + (t1 - t0)
8432 arm_poke_function_name
8433 mov ip, sp
8434 stmfd sp!, {fp, ip, lr, pc}
8435 sub fp, ip, #4
8437 When performing a stack backtrace, code can inspect the value
8438 of 'pc' stored at 'fp' + 0. If the trace function then looks
8439 at location pc - 12 and the top 8 bits are set, then we know
8440 that there is a function name embedded immediately preceding this
8441 location and has length ((pc[-3]) & 0xff000000).
8443 We assume that pc is declared as a pointer to an unsigned long.
8445 It is of no benefit to output the function name if we are assembling
8446 a leaf function. These function types will not contain a stack
8447 backtrace structure, therefore it is not possible to determine the
8448 function name. */
8449 void
8450 arm_poke_function_name (FILE *stream, const char *name)
8452 unsigned long alignlength;
8453 unsigned long length;
8454 rtx x;
8456 length = strlen (name) + 1;
8457 alignlength = ROUND_UP_WORD (length);
8459 ASM_OUTPUT_ASCII (stream, name, length);
8460 ASM_OUTPUT_ALIGN (stream, 2);
8461 x = GEN_INT ((unsigned HOST_WIDE_INT) 0xff000000 + alignlength);
8462 assemble_aligned_integer (UNITS_PER_WORD, x);
8465 /* Place some comments into the assembler stream
8466 describing the current function. */
8467 static void
8468 arm_output_function_prologue (FILE *f, HOST_WIDE_INT frame_size)
8470 unsigned long func_type;
8472 if (!TARGET_ARM)
8474 thumb_output_function_prologue (f, frame_size);
8475 return;
8478 /* Sanity check. */
8479 if (arm_ccfsm_state || arm_target_insn)
8480 abort ();
8482 func_type = arm_current_func_type ();
8484 switch ((int) ARM_FUNC_TYPE (func_type))
8486 default:
8487 case ARM_FT_NORMAL:
8488 break;
8489 case ARM_FT_INTERWORKED:
8490 asm_fprintf (f, "\t%@ Function supports interworking.\n");
8491 break;
8492 case ARM_FT_EXCEPTION_HANDLER:
8493 asm_fprintf (f, "\t%@ C++ Exception Handler.\n");
8494 break;
8495 case ARM_FT_ISR:
8496 asm_fprintf (f, "\t%@ Interrupt Service Routine.\n");
8497 break;
8498 case ARM_FT_FIQ:
8499 asm_fprintf (f, "\t%@ Fast Interrupt Service Routine.\n");
8500 break;
8501 case ARM_FT_EXCEPTION:
8502 asm_fprintf (f, "\t%@ ARM Exception Handler.\n");
8503 break;
8506 if (IS_NAKED (func_type))
8507 asm_fprintf (f, "\t%@ Naked Function: prologue and epilogue provided by programmer.\n");
8509 if (IS_VOLATILE (func_type))
8510 asm_fprintf (f, "\t%@ Volatile: function does not return.\n");
8512 if (IS_NESTED (func_type))
8513 asm_fprintf (f, "\t%@ Nested: function declared inside another function.\n");
8515 asm_fprintf (f, "\t%@ args = %d, pretend = %d, frame = %wd\n",
8516 current_function_args_size,
8517 current_function_pretend_args_size, frame_size);
8519 asm_fprintf (f, "\t%@ frame_needed = %d, uses_anonymous_args = %d\n",
8520 frame_pointer_needed,
8521 cfun->machine->uses_anonymous_args);
8523 if (cfun->machine->lr_save_eliminated)
8524 asm_fprintf (f, "\t%@ link register save eliminated.\n");
8526 #ifdef AOF_ASSEMBLER
8527 if (flag_pic)
8528 asm_fprintf (f, "\tmov\t%r, %r\n", IP_REGNUM, PIC_OFFSET_TABLE_REGNUM);
8529 #endif
8531 return_used_this_function = 0;
8534 const char *
8535 arm_output_epilogue (rtx sibling)
8537 int reg;
8538 unsigned long saved_regs_mask;
8539 unsigned long func_type;
8540 /* Floats_offset is the offset from the "virtual" frame. In an APCS
8541 frame that is $fp + 4 for a non-variadic function. */
8542 int floats_offset = 0;
8543 rtx operands[3];
8544 int frame_size = arm_get_frame_size ();
8545 FILE * f = asm_out_file;
8546 rtx eh_ofs = cfun->machine->eh_epilogue_sp_ofs;
8547 unsigned int lrm_count = 0;
8548 int really_return = (sibling == NULL);
8550 /* If we have already generated the return instruction
8551 then it is futile to generate anything else. */
8552 if (use_return_insn (FALSE, sibling) && return_used_this_function)
8553 return "";
8555 func_type = arm_current_func_type ();
8557 if (IS_NAKED (func_type))
8558 /* Naked functions don't have epilogues. */
8559 return "";
8561 if (IS_VOLATILE (func_type) && TARGET_ABORT_NORETURN)
8563 rtx op;
8565 /* A volatile function should never return. Call abort. */
8566 op = gen_rtx_SYMBOL_REF (Pmode, NEED_PLT_RELOC ? "abort(PLT)" : "abort");
8567 assemble_external_libcall (op);
8568 output_asm_insn ("bl\t%a0", &op);
8570 return "";
8573 if (ARM_FUNC_TYPE (func_type) == ARM_FT_EXCEPTION_HANDLER
8574 && ! really_return)
8575 /* If we are throwing an exception, then we really must
8576 be doing a return, so we can't tail-call. */
8577 abort ();
8579 saved_regs_mask = arm_compute_save_reg_mask ();
8581 if (TARGET_IWMMXT)
8582 lrm_count = bit_count (saved_regs_mask);
8584 /* XXX We should adjust floats_offset for any anonymous args, and then
8585 re-adjust vfp_offset below to compensate. */
8587 /* Compute how far away the floats will be. */
8588 for (reg = 0; reg <= LAST_ARM_REGNUM; reg++)
8589 if (saved_regs_mask & (1 << reg))
8590 floats_offset += 4;
8592 if (frame_pointer_needed)
8594 int vfp_offset = 4;
8596 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
8598 for (reg = LAST_ARM_FP_REGNUM; reg >= FIRST_ARM_FP_REGNUM; reg--)
8599 if (regs_ever_live[reg] && !call_used_regs[reg])
8601 floats_offset += 12;
8602 asm_fprintf (f, "\tldfe\t%r, [%r, #-%d]\n",
8603 reg, FP_REGNUM, floats_offset - vfp_offset);
8606 else
8608 int start_reg = LAST_ARM_FP_REGNUM;
8610 for (reg = LAST_ARM_FP_REGNUM; reg >= FIRST_ARM_FP_REGNUM; reg--)
8612 if (regs_ever_live[reg] && !call_used_regs[reg])
8614 floats_offset += 12;
8616 /* We can't unstack more than four registers at once. */
8617 if (start_reg - reg == 3)
8619 asm_fprintf (f, "\tlfm\t%r, 4, [%r, #-%d]\n",
8620 reg, FP_REGNUM, floats_offset - vfp_offset);
8621 start_reg = reg - 1;
8624 else
8626 if (reg != start_reg)
8627 asm_fprintf (f, "\tlfm\t%r, %d, [%r, #-%d]\n",
8628 reg + 1, start_reg - reg,
8629 FP_REGNUM, floats_offset - vfp_offset);
8630 start_reg = reg - 1;
8634 /* Just in case the last register checked also needs unstacking. */
8635 if (reg != start_reg)
8636 asm_fprintf (f, "\tlfm\t%r, %d, [%r, #-%d]\n",
8637 reg + 1, start_reg - reg,
8638 FP_REGNUM, floats_offset - vfp_offset);
8641 if (TARGET_IWMMXT)
8643 /* The frame pointer is guaranteed to be non-double-word aligned.
8644 This is because it is set to (old_stack_pointer - 4) and the
8645 old_stack_pointer was double word aligned. Thus the offset to
8646 the iWMMXt registers to be loaded must also be non-double-word
8647 sized, so that the resultant address *is* double-word aligned.
8648 We can ignore floats_offset since that was already included in
8649 the live_regs_mask. */
8650 lrm_count += (lrm_count % 2 ? 2 : 1);
8652 for (reg = FIRST_IWMMXT_REGNUM; reg <= LAST_IWMMXT_REGNUM; reg++)
8653 if (regs_ever_live[reg] && !call_used_regs[reg])
8655 asm_fprintf (f, "\twldrd\t%r, [%r, #-%d]\n",
8656 reg, FP_REGNUM, lrm_count * 4);
8657 lrm_count += 2;
8661 /* saved_regs_mask should contain the IP, which at the time of stack
8662 frame generation actually contains the old stack pointer. So a
8663 quick way to unwind the stack is just pop the IP register directly
8664 into the stack pointer. */
8665 if ((saved_regs_mask & (1 << IP_REGNUM)) == 0)
8666 abort ();
8667 saved_regs_mask &= ~ (1 << IP_REGNUM);
8668 saved_regs_mask |= (1 << SP_REGNUM);
8670 /* There are two registers left in saved_regs_mask - LR and PC. We
8671 only need to restore the LR register (the return address), but to
8672 save time we can load it directly into the PC, unless we need a
8673 special function exit sequence, or we are not really returning. */
8674 if (really_return && ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL)
8675 /* Delete the LR from the register mask, so that the LR on
8676 the stack is loaded into the PC in the register mask. */
8677 saved_regs_mask &= ~ (1 << LR_REGNUM);
8678 else
8679 saved_regs_mask &= ~ (1 << PC_REGNUM);
8681 /* We must use SP as the base register, because SP is one of the
8682 registers being restored. If an interrupt or page fault
8683 happens in the ldm instruction, the SP might or might not
8684 have been restored. That would be bad, as then SP will no
8685 longer indicate the safe area of stack, and we can get stack
8686 corruption. Using SP as the base register means that it will
8687 be reset correctly to the original value, should an interrupt
8688 occur. If the stack pointer already points at the right
8689 place, then omit the subtraction. */
8690 if (((frame_size + current_function_outgoing_args_size + floats_offset)
8691 != 4 * (1 + (int) bit_count (saved_regs_mask)))
8692 || current_function_calls_alloca)
8693 asm_fprintf (f, "\tsub\t%r, %r, #%d\n", SP_REGNUM, FP_REGNUM,
8694 4 * bit_count (saved_regs_mask));
8695 print_multi_reg (f, "ldmfd\t%r", SP_REGNUM, saved_regs_mask);
8697 if (IS_INTERRUPT (func_type))
8698 /* Interrupt handlers will have pushed the
8699 IP onto the stack, so restore it now. */
8700 print_multi_reg (f, "ldmfd\t%r!", SP_REGNUM, 1 << IP_REGNUM);
8702 else
8704 /* Restore stack pointer if necessary. */
8705 if (frame_size + current_function_outgoing_args_size != 0)
8707 operands[0] = operands[1] = stack_pointer_rtx;
8708 operands[2] = GEN_INT (frame_size
8709 + current_function_outgoing_args_size);
8710 output_add_immediate (operands);
8713 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
8715 for (reg = FIRST_ARM_FP_REGNUM; reg <= LAST_ARM_FP_REGNUM; reg++)
8716 if (regs_ever_live[reg] && !call_used_regs[reg])
8717 asm_fprintf (f, "\tldfe\t%r, [%r], #12\n",
8718 reg, SP_REGNUM);
8720 else
8722 int start_reg = FIRST_ARM_FP_REGNUM;
8724 for (reg = FIRST_ARM_FP_REGNUM; reg <= LAST_ARM_FP_REGNUM; reg++)
8726 if (regs_ever_live[reg] && !call_used_regs[reg])
8728 if (reg - start_reg == 3)
8730 asm_fprintf (f, "\tlfmfd\t%r, 4, [%r]!\n",
8731 start_reg, SP_REGNUM);
8732 start_reg = reg + 1;
8735 else
8737 if (reg != start_reg)
8738 asm_fprintf (f, "\tlfmfd\t%r, %d, [%r]!\n",
8739 start_reg, reg - start_reg,
8740 SP_REGNUM);
8742 start_reg = reg + 1;
8746 /* Just in case the last register checked also needs unstacking. */
8747 if (reg != start_reg)
8748 asm_fprintf (f, "\tlfmfd\t%r, %d, [%r]!\n",
8749 start_reg, reg - start_reg, SP_REGNUM);
8752 if (TARGET_IWMMXT)
8753 for (reg = FIRST_IWMMXT_REGNUM; reg <= LAST_IWMMXT_REGNUM; reg++)
8754 if (regs_ever_live[reg] && !call_used_regs[reg])
8755 asm_fprintf (f, "\twldrd\t%r, [%r, #+8]!\n", reg, SP_REGNUM);
8757 /* If we can, restore the LR into the PC. */
8758 if (ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
8759 && really_return
8760 && current_function_pretend_args_size == 0
8761 && saved_regs_mask & (1 << LR_REGNUM))
8763 saved_regs_mask &= ~ (1 << LR_REGNUM);
8764 saved_regs_mask |= (1 << PC_REGNUM);
8767 /* Load the registers off the stack. If we only have one register
8768 to load use the LDR instruction - it is faster. */
8769 if (saved_regs_mask == (1 << LR_REGNUM))
8771 /* The exception handler ignores the LR, so we do
8772 not really need to load it off the stack. */
8773 if (eh_ofs)
8774 asm_fprintf (f, "\tadd\t%r, %r, #4\n", SP_REGNUM, SP_REGNUM);
8775 else
8776 asm_fprintf (f, "\tldr\t%r, [%r], #4\n", LR_REGNUM, SP_REGNUM);
8778 else if (saved_regs_mask)
8780 if (saved_regs_mask & (1 << SP_REGNUM))
8781 /* Note - write back to the stack register is not enabled
8782 (ie "ldmfd sp!..."). We know that the stack pointer is
8783 in the list of registers and if we add writeback the
8784 instruction becomes UNPREDICTABLE. */
8785 print_multi_reg (f, "ldmfd\t%r", SP_REGNUM, saved_regs_mask);
8786 else
8787 print_multi_reg (f, "ldmfd\t%r!", SP_REGNUM, saved_regs_mask);
8790 if (current_function_pretend_args_size)
8792 /* Unwind the pre-pushed regs. */
8793 operands[0] = operands[1] = stack_pointer_rtx;
8794 operands[2] = GEN_INT (current_function_pretend_args_size);
8795 output_add_immediate (operands);
8799 if (! really_return
8800 || (ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
8801 && current_function_pretend_args_size == 0
8802 && saved_regs_mask & (1 << PC_REGNUM)))
8803 return "";
8805 /* Generate the return instruction. */
8806 switch ((int) ARM_FUNC_TYPE (func_type))
8808 case ARM_FT_EXCEPTION_HANDLER:
8809 /* Even in 26-bit mode we do a mov (rather than a movs)
8810 because we don't have the PSR bits set in the address. */
8811 asm_fprintf (f, "\tmov\t%r, %r\n", PC_REGNUM, EXCEPTION_LR_REGNUM);
8812 break;
8814 case ARM_FT_ISR:
8815 case ARM_FT_FIQ:
8816 asm_fprintf (f, "\tsubs\t%r, %r, #4\n", PC_REGNUM, LR_REGNUM);
8817 break;
8819 case ARM_FT_EXCEPTION:
8820 asm_fprintf (f, "\tmovs\t%r, %r\n", PC_REGNUM, LR_REGNUM);
8821 break;
8823 case ARM_FT_INTERWORKED:
8824 asm_fprintf (f, "\tbx\t%r\n", LR_REGNUM);
8825 break;
8827 default:
8828 if (frame_pointer_needed)
8829 /* If we used the frame pointer then the return address
8830 will have been loaded off the stack directly into the
8831 PC, so there is no need to issue a MOV instruction
8832 here. */
8834 else if (current_function_pretend_args_size == 0
8835 && (saved_regs_mask & (1 << LR_REGNUM)))
8836 /* Similarly we may have been able to load LR into the PC
8837 even if we did not create a stack frame. */
8839 else if (TARGET_APCS_32)
8840 asm_fprintf (f, "\tmov\t%r, %r\n", PC_REGNUM, LR_REGNUM);
8841 else
8842 asm_fprintf (f, "\tmovs\t%r, %r\n", PC_REGNUM, LR_REGNUM);
8843 break;
8846 return "";
8849 static void
8850 arm_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
8851 HOST_WIDE_INT frame_size)
8853 if (TARGET_THUMB)
8855 /* ??? Probably not safe to set this here, since it assumes that a
8856 function will be emitted as assembly immediately after we generate
8857 RTL for it. This does not happen for inline functions. */
8858 return_used_this_function = 0;
8860 else
8862 /* We need to take into account any stack-frame rounding. */
8863 frame_size = arm_get_frame_size ();
8865 if (use_return_insn (FALSE, NULL)
8866 && return_used_this_function
8867 && (frame_size + current_function_outgoing_args_size) != 0
8868 && !frame_pointer_needed)
8869 abort ();
8871 /* Reset the ARM-specific per-function variables. */
8872 after_arm_reorg = 0;
8876 /* Generate and emit an insn that we will recognize as a push_multi.
8877 Unfortunately, since this insn does not reflect very well the actual
8878 semantics of the operation, we need to annotate the insn for the benefit
8879 of DWARF2 frame unwind information. */
8880 static rtx
8881 emit_multi_reg_push (int mask)
8883 int num_regs = 0;
8884 int num_dwarf_regs;
8885 int i, j;
8886 rtx par;
8887 rtx dwarf;
8888 int dwarf_par_index;
8889 rtx tmp, reg;
8891 for (i = 0; i <= LAST_ARM_REGNUM; i++)
8892 if (mask & (1 << i))
8893 num_regs++;
8895 if (num_regs == 0 || num_regs > 16)
8896 abort ();
8898 /* We don't record the PC in the dwarf frame information. */
8899 num_dwarf_regs = num_regs;
8900 if (mask & (1 << PC_REGNUM))
8901 num_dwarf_regs--;
8903 /* For the body of the insn we are going to generate an UNSPEC in
8904 parallel with several USEs. This allows the insn to be recognized
8905 by the push_multi pattern in the arm.md file. The insn looks
8906 something like this:
8908 (parallel [
8909 (set (mem:BLK (pre_dec:BLK (reg:SI sp)))
8910 (unspec:BLK [(reg:SI r4)] UNSPEC_PUSH_MULT))
8911 (use (reg:SI 11 fp))
8912 (use (reg:SI 12 ip))
8913 (use (reg:SI 14 lr))
8914 (use (reg:SI 15 pc))
8917 For the frame note however, we try to be more explicit and actually
8918 show each register being stored into the stack frame, plus a (single)
8919 decrement of the stack pointer. We do it this way in order to be
8920 friendly to the stack unwinding code, which only wants to see a single
8921 stack decrement per instruction. The RTL we generate for the note looks
8922 something like this:
8924 (sequence [
8925 (set (reg:SI sp) (plus:SI (reg:SI sp) (const_int -20)))
8926 (set (mem:SI (reg:SI sp)) (reg:SI r4))
8927 (set (mem:SI (plus:SI (reg:SI sp) (const_int 4))) (reg:SI fp))
8928 (set (mem:SI (plus:SI (reg:SI sp) (const_int 8))) (reg:SI ip))
8929 (set (mem:SI (plus:SI (reg:SI sp) (const_int 12))) (reg:SI lr))
8932 This sequence is used both by the code to support stack unwinding for
8933 exceptions handlers and the code to generate dwarf2 frame debugging. */
8935 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (num_regs));
8936 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (num_dwarf_regs + 1));
8937 dwarf_par_index = 1;
8939 for (i = 0; i <= LAST_ARM_REGNUM; i++)
8941 if (mask & (1 << i))
8943 reg = gen_rtx_REG (SImode, i);
8945 XVECEXP (par, 0, 0)
8946 = gen_rtx_SET (VOIDmode,
8947 gen_rtx_MEM (BLKmode,
8948 gen_rtx_PRE_DEC (BLKmode,
8949 stack_pointer_rtx)),
8950 gen_rtx_UNSPEC (BLKmode,
8951 gen_rtvec (1, reg),
8952 UNSPEC_PUSH_MULT));
8954 if (i != PC_REGNUM)
8956 tmp = gen_rtx_SET (VOIDmode,
8957 gen_rtx_MEM (SImode, stack_pointer_rtx),
8958 reg);
8959 RTX_FRAME_RELATED_P (tmp) = 1;
8960 XVECEXP (dwarf, 0, dwarf_par_index) = tmp;
8961 dwarf_par_index++;
8964 break;
8968 for (j = 1, i++; j < num_regs; i++)
8970 if (mask & (1 << i))
8972 reg = gen_rtx_REG (SImode, i);
8974 XVECEXP (par, 0, j) = gen_rtx_USE (VOIDmode, reg);
8976 if (i != PC_REGNUM)
8978 tmp = gen_rtx_SET (VOIDmode,
8979 gen_rtx_MEM (SImode,
8980 plus_constant (stack_pointer_rtx,
8981 4 * j)),
8982 reg);
8983 RTX_FRAME_RELATED_P (tmp) = 1;
8984 XVECEXP (dwarf, 0, dwarf_par_index++) = tmp;
8987 j++;
8991 par = emit_insn (par);
8993 tmp = gen_rtx_SET (SImode,
8994 stack_pointer_rtx,
8995 gen_rtx_PLUS (SImode,
8996 stack_pointer_rtx,
8997 GEN_INT (-4 * num_regs)));
8998 RTX_FRAME_RELATED_P (tmp) = 1;
8999 XVECEXP (dwarf, 0, 0) = tmp;
9001 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
9002 REG_NOTES (par));
9003 return par;
9006 static rtx
9007 emit_sfm (int base_reg, int count)
9009 rtx par;
9010 rtx dwarf;
9011 rtx tmp, reg;
9012 int i;
9014 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
9015 dwarf = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
9017 reg = gen_rtx_REG (XFmode, base_reg++);
9019 XVECEXP (par, 0, 0)
9020 = gen_rtx_SET (VOIDmode,
9021 gen_rtx_MEM (BLKmode,
9022 gen_rtx_PRE_DEC (BLKmode, stack_pointer_rtx)),
9023 gen_rtx_UNSPEC (BLKmode,
9024 gen_rtvec (1, reg),
9025 UNSPEC_PUSH_MULT));
9027 = gen_rtx_SET (VOIDmode,
9028 gen_rtx_MEM (XFmode,
9029 gen_rtx_PRE_DEC (BLKmode, stack_pointer_rtx)),
9030 reg);
9031 RTX_FRAME_RELATED_P (tmp) = 1;
9032 XVECEXP (dwarf, 0, count - 1) = tmp;
9034 for (i = 1; i < count; i++)
9036 reg = gen_rtx_REG (XFmode, base_reg++);
9037 XVECEXP (par, 0, i) = gen_rtx_USE (VOIDmode, reg);
9039 tmp = gen_rtx_SET (VOIDmode,
9040 gen_rtx_MEM (XFmode,
9041 gen_rtx_PRE_DEC (BLKmode,
9042 stack_pointer_rtx)),
9043 reg);
9044 RTX_FRAME_RELATED_P (tmp) = 1;
9045 XVECEXP (dwarf, 0, count - i - 1) = tmp;
9048 par = emit_insn (par);
9049 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
9050 REG_NOTES (par));
9051 return par;
9054 /* Compute the distance from register FROM to register TO.
9055 These can be the arg pointer (26), the soft frame pointer (25),
9056 the stack pointer (13) or the hard frame pointer (11).
9057 Typical stack layout looks like this:
9059 old stack pointer -> | |
9060 ----
9061 | | \
9062 | | saved arguments for
9063 | | vararg functions
9064 | | /
9066 hard FP & arg pointer -> | | \
9067 | | stack
9068 | | frame
9069 | | /
9071 | | \
9072 | | call saved
9073 | | registers
9074 soft frame pointer -> | | /
9076 | | \
9077 | | local
9078 | | variables
9079 | | /
9081 | | \
9082 | | outgoing
9083 | | arguments
9084 current stack pointer -> | | /
9087 For a given function some or all of these stack components
9088 may not be needed, giving rise to the possibility of
9089 eliminating some of the registers.
9091 The values returned by this function must reflect the behavior
9092 of arm_expand_prologue() and arm_compute_save_reg_mask().
9094 The sign of the number returned reflects the direction of stack
9095 growth, so the values are positive for all eliminations except
9096 from the soft frame pointer to the hard frame pointer. */
9097 unsigned int
9098 arm_compute_initial_elimination_offset (unsigned int from, unsigned int to)
9100 unsigned int local_vars = arm_get_frame_size ();
9101 unsigned int outgoing_args = current_function_outgoing_args_size;
9102 unsigned int stack_frame;
9103 unsigned int call_saved_registers;
9104 unsigned long func_type;
9106 func_type = arm_current_func_type ();
9108 /* Volatile functions never return, so there is
9109 no need to save call saved registers. */
9110 call_saved_registers = 0;
9111 if (! IS_VOLATILE (func_type))
9113 unsigned int reg_mask;
9114 unsigned int reg;
9116 /* Make sure that we compute which registers will be saved
9117 on the stack using the same algorithm that is used by
9118 the prologue creation code. */
9119 reg_mask = arm_compute_save_reg_mask ();
9121 /* Now count the number of bits set in save_reg_mask.
9122 If we have already counted the registers in the stack
9123 frame, do not count them again. Non call-saved registers
9124 might be saved in the call-save area of the stack, if
9125 doing so will preserve the stack's alignment. Hence we
9126 must count them here. For each set bit we need 4 bytes
9127 of stack space. */
9128 if (frame_pointer_needed)
9129 reg_mask &= 0x07ff;
9130 call_saved_registers += 4 * bit_count (reg_mask);
9132 /* If the hard floating point registers are going to be
9133 used then they must be saved on the stack as well.
9134 Each register occupies 12 bytes of stack space. */
9135 for (reg = FIRST_ARM_FP_REGNUM; reg <= LAST_ARM_FP_REGNUM; reg++)
9136 if (regs_ever_live[reg] && ! call_used_regs[reg])
9137 call_saved_registers += 12;
9139 if (TARGET_REALLY_IWMMXT)
9140 /* Check for the call-saved iWMMXt registers. */
9141 for (reg = FIRST_IWMMXT_REGNUM; reg <= LAST_IWMMXT_REGNUM; reg++)
9142 if (regs_ever_live[reg] && ! call_used_regs [reg])
9143 call_saved_registers += 8;
9146 /* The stack frame contains 4 registers - the old frame pointer,
9147 the old stack pointer, the return address and PC of the start
9148 of the function. */
9149 stack_frame = frame_pointer_needed ? 16 : 0;
9151 /* OK, now we have enough information to compute the distances.
9152 There must be an entry in these switch tables for each pair
9153 of registers in ELIMINABLE_REGS, even if some of the entries
9154 seem to be redundant or useless. */
9155 switch (from)
9157 case ARG_POINTER_REGNUM:
9158 switch (to)
9160 case THUMB_HARD_FRAME_POINTER_REGNUM:
9161 return 0;
9163 case FRAME_POINTER_REGNUM:
9164 /* This is the reverse of the soft frame pointer
9165 to hard frame pointer elimination below. */
9166 if (call_saved_registers == 0 && stack_frame == 0)
9167 return 0;
9168 return (call_saved_registers + stack_frame - 4);
9170 case ARM_HARD_FRAME_POINTER_REGNUM:
9171 /* If there is no stack frame then the hard
9172 frame pointer and the arg pointer coincide. */
9173 if (stack_frame == 0 && call_saved_registers != 0)
9174 return 0;
9175 /* FIXME: Not sure about this. Maybe we should always return 0 ? */
9176 return (frame_pointer_needed
9177 && current_function_needs_context
9178 && ! cfun->machine->uses_anonymous_args) ? 4 : 0;
9180 case STACK_POINTER_REGNUM:
9181 /* If nothing has been pushed on the stack at all
9182 then this will return -4. This *is* correct! */
9183 return call_saved_registers + stack_frame + local_vars + outgoing_args - 4;
9185 default:
9186 abort ();
9188 break;
9190 case FRAME_POINTER_REGNUM:
9191 switch (to)
9193 case THUMB_HARD_FRAME_POINTER_REGNUM:
9194 return 0;
9196 case ARM_HARD_FRAME_POINTER_REGNUM:
9197 /* The hard frame pointer points to the top entry in the
9198 stack frame. The soft frame pointer to the bottom entry
9199 in the stack frame. If there is no stack frame at all,
9200 then they are identical. */
9201 if (call_saved_registers == 0 && stack_frame == 0)
9202 return 0;
9203 return - (call_saved_registers + stack_frame - 4);
9205 case STACK_POINTER_REGNUM:
9206 return local_vars + outgoing_args;
9208 default:
9209 abort ();
9211 break;
9213 default:
9214 /* You cannot eliminate from the stack pointer.
9215 In theory you could eliminate from the hard frame
9216 pointer to the stack pointer, but this will never
9217 happen, since if a stack frame is not needed the
9218 hard frame pointer will never be used. */
9219 abort ();
9223 /* Calculate the size of the stack frame, taking into account any
9224 padding that is required to ensure stack-alignment. */
9225 HOST_WIDE_INT
9226 arm_get_frame_size (void)
9228 int regno;
9230 int base_size = ROUND_UP_WORD (get_frame_size ());
9231 int entry_size = 0;
9232 unsigned long func_type = arm_current_func_type ();
9233 int leaf;
9235 if (! TARGET_ARM)
9236 abort();
9238 if (! TARGET_ATPCS)
9239 return base_size;
9241 /* We need to know if we are a leaf function. Unfortunately, it
9242 is possible to be called after start_sequence has been called,
9243 which causes get_insns to return the insns for the sequence,
9244 not the function, which will cause leaf_function_p to return
9245 the incorrect result.
9247 To work around this, we cache the computed frame size. This
9248 works because we will only be calling RTL expanders that need
9249 to know about leaf functions once reload has completed, and the
9250 frame size cannot be changed after that time, so we can safely
9251 use the cached value. */
9253 if (reload_completed)
9254 return cfun->machine->frame_size;
9256 leaf = leaf_function_p ();
9258 /* A leaf function does not need any stack alignment if it has nothing
9259 on the stack. */
9260 if (leaf && base_size == 0)
9262 cfun->machine->frame_size = 0;
9263 return 0;
9266 /* We know that SP will be word aligned on entry, and we must
9267 preserve that condition at any subroutine call. But those are
9268 the only constraints. */
9270 /* Space for variadic functions. */
9271 if (current_function_pretend_args_size)
9272 entry_size += current_function_pretend_args_size;
9274 /* Space for saved registers. */
9275 entry_size += bit_count (arm_compute_save_reg_mask ()) * 4;
9277 /* Space for saved FPA registers. */
9278 if (! IS_VOLATILE (func_type))
9280 for (regno = FIRST_ARM_FP_REGNUM; regno <= LAST_ARM_FP_REGNUM; regno++)
9281 if (regs_ever_live[regno] && ! call_used_regs[regno])
9282 entry_size += 12;
9285 if (TARGET_REALLY_IWMMXT)
9287 /* Check for the call-saved iWMMXt registers. */
9288 for (regno = FIRST_IWMMXT_REGNUM; regno <= LAST_IWMMXT_REGNUM; regno++)
9289 if (regs_ever_live [regno] && ! call_used_regs [regno])
9290 entry_size += 8;
9293 if ((entry_size + base_size + current_function_outgoing_args_size) & 7)
9294 base_size += 4;
9295 if ((entry_size + base_size + current_function_outgoing_args_size) & 7)
9296 abort ();
9298 cfun->machine->frame_size = base_size;
9300 return base_size;
9303 /* Generate the prologue instructions for entry into an ARM function. */
9304 void
9305 arm_expand_prologue (void)
9307 int reg;
9308 rtx amount;
9309 rtx insn;
9310 rtx ip_rtx;
9311 unsigned long live_regs_mask;
9312 unsigned long func_type;
9313 int fp_offset = 0;
9314 int saved_pretend_args = 0;
9315 unsigned int args_to_push;
9317 func_type = arm_current_func_type ();
9319 /* Naked functions don't have prologues. */
9320 if (IS_NAKED (func_type))
9321 return;
9323 /* Make a copy of c_f_p_a_s as we may need to modify it locally. */
9324 args_to_push = current_function_pretend_args_size;
9326 /* Compute which register we will have to save onto the stack. */
9327 live_regs_mask = arm_compute_save_reg_mask ();
9329 ip_rtx = gen_rtx_REG (SImode, IP_REGNUM);
9331 if (frame_pointer_needed)
9333 if (IS_INTERRUPT (func_type))
9335 /* Interrupt functions must not corrupt any registers.
9336 Creating a frame pointer however, corrupts the IP
9337 register, so we must push it first. */
9338 insn = emit_multi_reg_push (1 << IP_REGNUM);
9340 /* Do not set RTX_FRAME_RELATED_P on this insn.
9341 The dwarf stack unwinding code only wants to see one
9342 stack decrement per function, and this is not it. If
9343 this instruction is labeled as being part of the frame
9344 creation sequence then dwarf2out_frame_debug_expr will
9345 abort when it encounters the assignment of IP to FP
9346 later on, since the use of SP here establishes SP as
9347 the CFA register and not IP.
9349 Anyway this instruction is not really part of the stack
9350 frame creation although it is part of the prologue. */
9352 else if (IS_NESTED (func_type))
9354 /* The Static chain register is the same as the IP register
9355 used as a scratch register during stack frame creation.
9356 To get around this need to find somewhere to store IP
9357 whilst the frame is being created. We try the following
9358 places in order:
9360 1. The last argument register.
9361 2. A slot on the stack above the frame. (This only
9362 works if the function is not a varargs function).
9363 3. Register r3, after pushing the argument registers
9364 onto the stack.
9366 Note - we only need to tell the dwarf2 backend about the SP
9367 adjustment in the second variant; the static chain register
9368 doesn't need to be unwound, as it doesn't contain a value
9369 inherited from the caller. */
9371 if (regs_ever_live[3] == 0)
9373 insn = gen_rtx_REG (SImode, 3);
9374 insn = gen_rtx_SET (SImode, insn, ip_rtx);
9375 insn = emit_insn (insn);
9377 else if (args_to_push == 0)
9379 rtx dwarf;
9380 insn = gen_rtx_PRE_DEC (SImode, stack_pointer_rtx);
9381 insn = gen_rtx_MEM (SImode, insn);
9382 insn = gen_rtx_SET (VOIDmode, insn, ip_rtx);
9383 insn = emit_insn (insn);
9385 fp_offset = 4;
9387 /* Just tell the dwarf backend that we adjusted SP. */
9388 dwarf = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
9389 gen_rtx_PLUS (SImode, stack_pointer_rtx,
9390 GEN_INT (-fp_offset)));
9391 RTX_FRAME_RELATED_P (insn) = 1;
9392 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
9393 dwarf, REG_NOTES (insn));
9395 else
9397 /* Store the args on the stack. */
9398 if (cfun->machine->uses_anonymous_args)
9399 insn = emit_multi_reg_push
9400 ((0xf0 >> (args_to_push / 4)) & 0xf);
9401 else
9402 insn = emit_insn
9403 (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
9404 GEN_INT (- args_to_push)));
9406 RTX_FRAME_RELATED_P (insn) = 1;
9408 saved_pretend_args = 1;
9409 fp_offset = args_to_push;
9410 args_to_push = 0;
9412 /* Now reuse r3 to preserve IP. */
9413 insn = gen_rtx_REG (SImode, 3);
9414 insn = gen_rtx_SET (SImode, insn, ip_rtx);
9415 (void) emit_insn (insn);
9419 if (fp_offset)
9421 insn = gen_rtx_PLUS (SImode, stack_pointer_rtx, GEN_INT (fp_offset));
9422 insn = gen_rtx_SET (SImode, ip_rtx, insn);
9424 else
9425 insn = gen_movsi (ip_rtx, stack_pointer_rtx);
9427 insn = emit_insn (insn);
9428 RTX_FRAME_RELATED_P (insn) = 1;
9431 if (args_to_push)
9433 /* Push the argument registers, or reserve space for them. */
9434 if (cfun->machine->uses_anonymous_args)
9435 insn = emit_multi_reg_push
9436 ((0xf0 >> (args_to_push / 4)) & 0xf);
9437 else
9438 insn = emit_insn
9439 (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
9440 GEN_INT (- args_to_push)));
9441 RTX_FRAME_RELATED_P (insn) = 1;
9444 /* If this is an interrupt service routine, and the link register
9445 is going to be pushed, and we are not creating a stack frame,
9446 (which would involve an extra push of IP and a pop in the epilogue)
9447 subtracting four from LR now will mean that the function return
9448 can be done with a single instruction. */
9449 if ((func_type == ARM_FT_ISR || func_type == ARM_FT_FIQ)
9450 && (live_regs_mask & (1 << LR_REGNUM)) != 0
9451 && ! frame_pointer_needed)
9452 emit_insn (gen_rtx_SET (SImode,
9453 gen_rtx_REG (SImode, LR_REGNUM),
9454 gen_rtx_PLUS (SImode,
9455 gen_rtx_REG (SImode, LR_REGNUM),
9456 GEN_INT (-4))));
9458 if (live_regs_mask)
9460 insn = emit_multi_reg_push (live_regs_mask);
9461 RTX_FRAME_RELATED_P (insn) = 1;
9464 if (TARGET_IWMMXT)
9465 for (reg = FIRST_IWMMXT_REGNUM; reg <= LAST_IWMMXT_REGNUM; reg++)
9466 if (regs_ever_live[reg] && ! call_used_regs [reg])
9468 insn = gen_rtx_PRE_DEC (V2SImode, stack_pointer_rtx);
9469 insn = gen_rtx_MEM (V2SImode, insn);
9470 insn = emit_insn (gen_rtx_SET (VOIDmode, insn,
9471 gen_rtx_REG (V2SImode, reg)));
9472 RTX_FRAME_RELATED_P (insn) = 1;
9475 if (! IS_VOLATILE (func_type))
9477 /* Save any floating point call-saved registers used by this
9478 function. */
9479 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
9481 for (reg = LAST_ARM_FP_REGNUM; reg >= FIRST_ARM_FP_REGNUM; reg--)
9482 if (regs_ever_live[reg] && !call_used_regs[reg])
9484 insn = gen_rtx_PRE_DEC (XFmode, stack_pointer_rtx);
9485 insn = gen_rtx_MEM (XFmode, insn);
9486 insn = emit_insn (gen_rtx_SET (VOIDmode, insn,
9487 gen_rtx_REG (XFmode, reg)));
9488 RTX_FRAME_RELATED_P (insn) = 1;
9491 else
9493 int start_reg = LAST_ARM_FP_REGNUM;
9495 for (reg = LAST_ARM_FP_REGNUM; reg >= FIRST_ARM_FP_REGNUM; reg--)
9497 if (regs_ever_live[reg] && !call_used_regs[reg])
9499 if (start_reg - reg == 3)
9501 insn = emit_sfm (reg, 4);
9502 RTX_FRAME_RELATED_P (insn) = 1;
9503 start_reg = reg - 1;
9506 else
9508 if (start_reg != reg)
9510 insn = emit_sfm (reg + 1, start_reg - reg);
9511 RTX_FRAME_RELATED_P (insn) = 1;
9513 start_reg = reg - 1;
9517 if (start_reg != reg)
9519 insn = emit_sfm (reg + 1, start_reg - reg);
9520 RTX_FRAME_RELATED_P (insn) = 1;
9525 if (frame_pointer_needed)
9527 /* Create the new frame pointer. */
9528 insn = GEN_INT (-(4 + args_to_push + fp_offset));
9529 insn = emit_insn (gen_addsi3 (hard_frame_pointer_rtx, ip_rtx, insn));
9530 RTX_FRAME_RELATED_P (insn) = 1;
9532 if (IS_NESTED (func_type))
9534 /* Recover the static chain register. */
9535 if (regs_ever_live [3] == 0
9536 || saved_pretend_args)
9537 insn = gen_rtx_REG (SImode, 3);
9538 else /* if (current_function_pretend_args_size == 0) */
9540 insn = gen_rtx_PLUS (SImode, hard_frame_pointer_rtx,
9541 GEN_INT (4));
9542 insn = gen_rtx_MEM (SImode, insn);
9545 emit_insn (gen_rtx_SET (SImode, ip_rtx, insn));
9546 /* Add a USE to stop propagate_one_insn() from barfing. */
9547 emit_insn (gen_prologue_use (ip_rtx));
9551 amount = GEN_INT (-(arm_get_frame_size ()
9552 + current_function_outgoing_args_size));
9554 if (amount != const0_rtx)
9556 /* This add can produce multiple insns for a large constant, so we
9557 need to get tricky. */
9558 rtx last = get_last_insn ();
9559 insn = emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
9560 amount));
9563 last = last ? NEXT_INSN (last) : get_insns ();
9564 RTX_FRAME_RELATED_P (last) = 1;
9566 while (last != insn);
9568 /* If the frame pointer is needed, emit a special barrier that
9569 will prevent the scheduler from moving stores to the frame
9570 before the stack adjustment. */
9571 if (frame_pointer_needed)
9572 insn = emit_insn (gen_stack_tie (stack_pointer_rtx,
9573 hard_frame_pointer_rtx));
9576 /* If we are profiling, make sure no instructions are scheduled before
9577 the call to mcount. Similarly if the user has requested no
9578 scheduling in the prolog. */
9579 if (current_function_profile || TARGET_NO_SCHED_PRO)
9580 emit_insn (gen_blockage ());
9582 /* If the link register is being kept alive, with the return address in it,
9583 then make sure that it does not get reused by the ce2 pass. */
9584 if ((live_regs_mask & (1 << LR_REGNUM)) == 0)
9586 emit_insn (gen_prologue_use (gen_rtx_REG (SImode, LR_REGNUM)));
9587 cfun->machine->lr_save_eliminated = 1;
9591 /* If CODE is 'd', then the X is a condition operand and the instruction
9592 should only be executed if the condition is true.
9593 if CODE is 'D', then the X is a condition operand and the instruction
9594 should only be executed if the condition is false: however, if the mode
9595 of the comparison is CCFPEmode, then always execute the instruction -- we
9596 do this because in these circumstances !GE does not necessarily imply LT;
9597 in these cases the instruction pattern will take care to make sure that
9598 an instruction containing %d will follow, thereby undoing the effects of
9599 doing this instruction unconditionally.
9600 If CODE is 'N' then X is a floating point operand that must be negated
9601 before output.
9602 If CODE is 'B' then output a bitwise inverted value of X (a const int).
9603 If X is a REG and CODE is `M', output a ldm/stm style multi-reg. */
9604 void
9605 arm_print_operand (FILE *stream, rtx x, int code)
9607 switch (code)
9609 case '@':
9610 fputs (ASM_COMMENT_START, stream);
9611 return;
9613 case '_':
9614 fputs (user_label_prefix, stream);
9615 return;
9617 case '|':
9618 fputs (REGISTER_PREFIX, stream);
9619 return;
9621 case '?':
9622 if (arm_ccfsm_state == 3 || arm_ccfsm_state == 4)
9624 if (TARGET_THUMB || current_insn_predicate != NULL)
9625 abort ();
9627 fputs (arm_condition_codes[arm_current_cc], stream);
9629 else if (current_insn_predicate)
9631 enum arm_cond_code code;
9633 if (TARGET_THUMB)
9634 abort ();
9636 code = get_arm_condition_code (current_insn_predicate);
9637 fputs (arm_condition_codes[code], stream);
9639 return;
9641 case 'N':
9643 REAL_VALUE_TYPE r;
9644 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
9645 r = REAL_VALUE_NEGATE (r);
9646 fprintf (stream, "%s", fp_const_from_val (&r));
9648 return;
9650 case 'B':
9651 if (GET_CODE (x) == CONST_INT)
9653 HOST_WIDE_INT val;
9654 val = ARM_SIGN_EXTEND (~INTVAL (x));
9655 fprintf (stream, HOST_WIDE_INT_PRINT_DEC, val);
9657 else
9659 putc ('~', stream);
9660 output_addr_const (stream, x);
9662 return;
9664 case 'i':
9665 fprintf (stream, "%s", arithmetic_instr (x, 1));
9666 return;
9668 /* Truncate Cirrus shift counts. */
9669 case 's':
9670 if (GET_CODE (x) == CONST_INT)
9672 fprintf (stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0x3f);
9673 return;
9675 arm_print_operand (stream, x, 0);
9676 return;
9678 case 'I':
9679 fprintf (stream, "%s", arithmetic_instr (x, 0));
9680 return;
9682 case 'S':
9684 HOST_WIDE_INT val;
9685 const char * shift = shift_op (x, &val);
9687 if (shift)
9689 fprintf (stream, ", %s ", shift_op (x, &val));
9690 if (val == -1)
9691 arm_print_operand (stream, XEXP (x, 1), 0);
9692 else
9693 fprintf (stream, "#" HOST_WIDE_INT_PRINT_DEC, val);
9696 return;
9698 /* An explanation of the 'Q', 'R' and 'H' register operands:
9700 In a pair of registers containing a DI or DF value the 'Q'
9701 operand returns the register number of the register containing
9702 the least significant part of the value. The 'R' operand returns
9703 the register number of the register containing the most
9704 significant part of the value.
9706 The 'H' operand returns the higher of the two register numbers.
9707 On a run where WORDS_BIG_ENDIAN is true the 'H' operand is the
9708 same as the 'Q' operand, since the most significant part of the
9709 value is held in the lower number register. The reverse is true
9710 on systems where WORDS_BIG_ENDIAN is false.
9712 The purpose of these operands is to distinguish between cases
9713 where the endian-ness of the values is important (for example
9714 when they are added together), and cases where the endian-ness
9715 is irrelevant, but the order of register operations is important.
9716 For example when loading a value from memory into a register
9717 pair, the endian-ness does not matter. Provided that the value
9718 from the lower memory address is put into the lower numbered
9719 register, and the value from the higher address is put into the
9720 higher numbered register, the load will work regardless of whether
9721 the value being loaded is big-wordian or little-wordian. The
9722 order of the two register loads can matter however, if the address
9723 of the memory location is actually held in one of the registers
9724 being overwritten by the load. */
9725 case 'Q':
9726 if (REGNO (x) > LAST_ARM_REGNUM)
9727 abort ();
9728 asm_fprintf (stream, "%r", REGNO (x) + (WORDS_BIG_ENDIAN ? 1 : 0));
9729 return;
9731 case 'R':
9732 if (REGNO (x) > LAST_ARM_REGNUM)
9733 abort ();
9734 asm_fprintf (stream, "%r", REGNO (x) + (WORDS_BIG_ENDIAN ? 0 : 1));
9735 return;
9737 case 'H':
9738 if (REGNO (x) > LAST_ARM_REGNUM)
9739 abort ();
9740 asm_fprintf (stream, "%r", REGNO (x) + 1);
9741 return;
9743 case 'm':
9744 asm_fprintf (stream, "%r",
9745 GET_CODE (XEXP (x, 0)) == REG
9746 ? REGNO (XEXP (x, 0)) : REGNO (XEXP (XEXP (x, 0), 0)));
9747 return;
9749 case 'M':
9750 asm_fprintf (stream, "{%r-%r}",
9751 REGNO (x),
9752 REGNO (x) + ARM_NUM_REGS (GET_MODE (x)) - 1);
9753 return;
9755 case 'd':
9756 /* CONST_TRUE_RTX means always -- that's the default. */
9757 if (x == const_true_rtx)
9758 return;
9760 fputs (arm_condition_codes[get_arm_condition_code (x)],
9761 stream);
9762 return;
9764 case 'D':
9765 /* CONST_TRUE_RTX means not always -- ie never. We shouldn't ever
9766 want to do that. */
9767 if (x == const_true_rtx)
9768 abort ();
9770 fputs (arm_condition_codes[ARM_INVERSE_CONDITION_CODE
9771 (get_arm_condition_code (x))],
9772 stream);
9773 return;
9775 /* Cirrus registers can be accessed in a variety of ways:
9776 single floating point (f)
9777 double floating point (d)
9778 32bit integer (fx)
9779 64bit integer (dx). */
9780 case 'W': /* Cirrus register in F mode. */
9781 case 'X': /* Cirrus register in D mode. */
9782 case 'Y': /* Cirrus register in FX mode. */
9783 case 'Z': /* Cirrus register in DX mode. */
9784 if (GET_CODE (x) != REG || REGNO_REG_CLASS (REGNO (x)) != CIRRUS_REGS)
9785 abort ();
9787 fprintf (stream, "mv%s%s",
9788 code == 'W' ? "f"
9789 : code == 'X' ? "d"
9790 : code == 'Y' ? "fx" : "dx", reg_names[REGNO (x)] + 2);
9792 return;
9794 /* Print cirrus register in the mode specified by the register's mode. */
9795 case 'V':
9797 int mode = GET_MODE (x);
9799 if (GET_CODE (x) != REG || REGNO_REG_CLASS (REGNO (x)) != CIRRUS_REGS)
9800 abort ();
9802 fprintf (stream, "mv%s%s",
9803 mode == DFmode ? "d"
9804 : mode == SImode ? "fx"
9805 : mode == DImode ? "dx"
9806 : "f", reg_names[REGNO (x)] + 2);
9808 return;
9811 case 'U':
9812 if (GET_CODE (x) != REG
9813 || REGNO (x) < FIRST_IWMMXT_GR_REGNUM
9814 || REGNO (x) > LAST_IWMMXT_GR_REGNUM)
9815 /* Bad value for wCG register number. */
9816 abort ();
9817 else
9818 fprintf (stream, "%d", REGNO (x) - FIRST_IWMMXT_GR_REGNUM);
9819 return;
9821 /* Print an iWMMXt control register name. */
9822 case 'w':
9823 if (GET_CODE (x) != CONST_INT
9824 || INTVAL (x) < 0
9825 || INTVAL (x) >= 16)
9826 /* Bad value for wC register number. */
9827 abort ();
9828 else
9830 static const char * wc_reg_names [16] =
9832 "wCID", "wCon", "wCSSF", "wCASF",
9833 "wC4", "wC5", "wC6", "wC7",
9834 "wCGR0", "wCGR1", "wCGR2", "wCGR3",
9835 "wC12", "wC13", "wC14", "wC15"
9838 fprintf (stream, wc_reg_names [INTVAL (x)]);
9840 return;
9842 default:
9843 if (x == 0)
9844 abort ();
9846 if (GET_CODE (x) == REG)
9847 asm_fprintf (stream, "%r", REGNO (x));
9848 else if (GET_CODE (x) == MEM)
9850 output_memory_reference_mode = GET_MODE (x);
9851 output_address (XEXP (x, 0));
9853 else if (GET_CODE (x) == CONST_DOUBLE)
9854 fprintf (stream, "#%s", fp_immediate_constant (x));
9855 else if (GET_CODE (x) == NEG)
9856 abort (); /* This should never happen now. */
9857 else
9859 fputc ('#', stream);
9860 output_addr_const (stream, x);
9865 #ifndef AOF_ASSEMBLER
9866 /* Target hook for assembling integer objects. The ARM version needs to
9867 handle word-sized values specially. */
9868 static bool
9869 arm_assemble_integer (rtx x, unsigned int size, int aligned_p)
9871 if (size == UNITS_PER_WORD && aligned_p)
9873 fputs ("\t.word\t", asm_out_file);
9874 output_addr_const (asm_out_file, x);
9876 /* Mark symbols as position independent. We only do this in the
9877 .text segment, not in the .data segment. */
9878 if (NEED_GOT_RELOC && flag_pic && making_const_table &&
9879 (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF))
9881 if (GET_CODE (x) == SYMBOL_REF
9882 && (CONSTANT_POOL_ADDRESS_P (x)
9883 || SYMBOL_REF_LOCAL_P (x)))
9884 fputs ("(GOTOFF)", asm_out_file);
9885 else if (GET_CODE (x) == LABEL_REF)
9886 fputs ("(GOTOFF)", asm_out_file);
9887 else
9888 fputs ("(GOT)", asm_out_file);
9890 fputc ('\n', asm_out_file);
9891 return true;
9894 if (VECTOR_MODE_SUPPORTED_P (GET_MODE (x)))
9896 int i, units;
9898 if (GET_CODE (x) != CONST_VECTOR)
9899 abort ();
9901 units = CONST_VECTOR_NUNITS (x);
9903 switch (GET_MODE (x))
9905 case V2SImode: size = 4; break;
9906 case V4HImode: size = 2; break;
9907 case V8QImode: size = 1; break;
9908 default:
9909 abort ();
9912 for (i = 0; i < units; i++)
9914 rtx elt;
9916 elt = CONST_VECTOR_ELT (x, i);
9917 assemble_integer
9918 (elt, size, i == 0 ? BIGGEST_ALIGNMENT : size * BITS_PER_UNIT, 1);
9921 return true;
9924 return default_assemble_integer (x, size, aligned_p);
9926 #endif
9928 /* A finite state machine takes care of noticing whether or not instructions
9929 can be conditionally executed, and thus decrease execution time and code
9930 size by deleting branch instructions. The fsm is controlled by
9931 final_prescan_insn, and controls the actions of ASM_OUTPUT_OPCODE. */
9933 /* The state of the fsm controlling condition codes are:
9934 0: normal, do nothing special
9935 1: make ASM_OUTPUT_OPCODE not output this instruction
9936 2: make ASM_OUTPUT_OPCODE not output this instruction
9937 3: make instructions conditional
9938 4: make instructions conditional
9940 State transitions (state->state by whom under condition):
9941 0 -> 1 final_prescan_insn if the `target' is a label
9942 0 -> 2 final_prescan_insn if the `target' is an unconditional branch
9943 1 -> 3 ASM_OUTPUT_OPCODE after not having output the conditional branch
9944 2 -> 4 ASM_OUTPUT_OPCODE after not having output the conditional branch
9945 3 -> 0 (*targetm.asm_out.internal_label) if the `target' label is reached
9946 (the target label has CODE_LABEL_NUMBER equal to arm_target_label).
9947 4 -> 0 final_prescan_insn if the `target' unconditional branch is reached
9948 (the target insn is arm_target_insn).
9950 If the jump clobbers the conditions then we use states 2 and 4.
9952 A similar thing can be done with conditional return insns.
9954 XXX In case the `target' is an unconditional branch, this conditionalising
9955 of the instructions always reduces code size, but not always execution
9956 time. But then, I want to reduce the code size to somewhere near what
9957 /bin/cc produces. */
9959 /* Returns the index of the ARM condition code string in
9960 `arm_condition_codes'. COMPARISON should be an rtx like
9961 `(eq (...) (...))'. */
9962 static enum arm_cond_code
9963 get_arm_condition_code (rtx comparison)
9965 enum machine_mode mode = GET_MODE (XEXP (comparison, 0));
9966 int code;
9967 enum rtx_code comp_code = GET_CODE (comparison);
9969 if (GET_MODE_CLASS (mode) != MODE_CC)
9970 mode = SELECT_CC_MODE (comp_code, XEXP (comparison, 0),
9971 XEXP (comparison, 1));
9973 switch (mode)
9975 case CC_DNEmode: code = ARM_NE; goto dominance;
9976 case CC_DEQmode: code = ARM_EQ; goto dominance;
9977 case CC_DGEmode: code = ARM_GE; goto dominance;
9978 case CC_DGTmode: code = ARM_GT; goto dominance;
9979 case CC_DLEmode: code = ARM_LE; goto dominance;
9980 case CC_DLTmode: code = ARM_LT; goto dominance;
9981 case CC_DGEUmode: code = ARM_CS; goto dominance;
9982 case CC_DGTUmode: code = ARM_HI; goto dominance;
9983 case CC_DLEUmode: code = ARM_LS; goto dominance;
9984 case CC_DLTUmode: code = ARM_CC;
9986 dominance:
9987 if (comp_code != EQ && comp_code != NE)
9988 abort ();
9990 if (comp_code == EQ)
9991 return ARM_INVERSE_CONDITION_CODE (code);
9992 return code;
9994 case CC_NOOVmode:
9995 switch (comp_code)
9997 case NE: return ARM_NE;
9998 case EQ: return ARM_EQ;
9999 case GE: return ARM_PL;
10000 case LT: return ARM_MI;
10001 default: abort ();
10004 case CC_Zmode:
10005 switch (comp_code)
10007 case NE: return ARM_NE;
10008 case EQ: return ARM_EQ;
10009 default: abort ();
10012 case CC_Nmode:
10013 switch (comp_code)
10015 case NE: return ARM_MI;
10016 case EQ: return ARM_PL;
10017 default: abort ();
10020 case CCFPEmode:
10021 case CCFPmode:
10022 /* These encodings assume that AC=1 in the FPA system control
10023 byte. This allows us to handle all cases except UNEQ and
10024 LTGT. */
10025 switch (comp_code)
10027 case GE: return ARM_GE;
10028 case GT: return ARM_GT;
10029 case LE: return ARM_LS;
10030 case LT: return ARM_MI;
10031 case NE: return ARM_NE;
10032 case EQ: return ARM_EQ;
10033 case ORDERED: return ARM_VC;
10034 case UNORDERED: return ARM_VS;
10035 case UNLT: return ARM_LT;
10036 case UNLE: return ARM_LE;
10037 case UNGT: return ARM_HI;
10038 case UNGE: return ARM_PL;
10039 /* UNEQ and LTGT do not have a representation. */
10040 case UNEQ: /* Fall through. */
10041 case LTGT: /* Fall through. */
10042 default: abort ();
10045 case CC_SWPmode:
10046 switch (comp_code)
10048 case NE: return ARM_NE;
10049 case EQ: return ARM_EQ;
10050 case GE: return ARM_LE;
10051 case GT: return ARM_LT;
10052 case LE: return ARM_GE;
10053 case LT: return ARM_GT;
10054 case GEU: return ARM_LS;
10055 case GTU: return ARM_CC;
10056 case LEU: return ARM_CS;
10057 case LTU: return ARM_HI;
10058 default: abort ();
10061 case CC_Cmode:
10062 switch (comp_code)
10064 case LTU: return ARM_CS;
10065 case GEU: return ARM_CC;
10066 default: abort ();
10069 case CCmode:
10070 switch (comp_code)
10072 case NE: return ARM_NE;
10073 case EQ: return ARM_EQ;
10074 case GE: return ARM_GE;
10075 case GT: return ARM_GT;
10076 case LE: return ARM_LE;
10077 case LT: return ARM_LT;
10078 case GEU: return ARM_CS;
10079 case GTU: return ARM_HI;
10080 case LEU: return ARM_LS;
10081 case LTU: return ARM_CC;
10082 default: abort ();
10085 default: abort ();
10088 abort ();
10091 void
10092 arm_final_prescan_insn (rtx insn)
10094 /* BODY will hold the body of INSN. */
10095 rtx body = PATTERN (insn);
10097 /* This will be 1 if trying to repeat the trick, and things need to be
10098 reversed if it appears to fail. */
10099 int reverse = 0;
10101 /* JUMP_CLOBBERS will be one implies that the conditions if a branch is
10102 taken are clobbered, even if the rtl suggests otherwise. It also
10103 means that we have to grub around within the jump expression to find
10104 out what the conditions are when the jump isn't taken. */
10105 int jump_clobbers = 0;
10107 /* If we start with a return insn, we only succeed if we find another one. */
10108 int seeking_return = 0;
10110 /* START_INSN will hold the insn from where we start looking. This is the
10111 first insn after the following code_label if REVERSE is true. */
10112 rtx start_insn = insn;
10114 /* If in state 4, check if the target branch is reached, in order to
10115 change back to state 0. */
10116 if (arm_ccfsm_state == 4)
10118 if (insn == arm_target_insn)
10120 arm_target_insn = NULL;
10121 arm_ccfsm_state = 0;
10123 return;
10126 /* If in state 3, it is possible to repeat the trick, if this insn is an
10127 unconditional branch to a label, and immediately following this branch
10128 is the previous target label which is only used once, and the label this
10129 branch jumps to is not too far off. */
10130 if (arm_ccfsm_state == 3)
10132 if (simplejump_p (insn))
10134 start_insn = next_nonnote_insn (start_insn);
10135 if (GET_CODE (start_insn) == BARRIER)
10137 /* XXX Isn't this always a barrier? */
10138 start_insn = next_nonnote_insn (start_insn);
10140 if (GET_CODE (start_insn) == CODE_LABEL
10141 && CODE_LABEL_NUMBER (start_insn) == arm_target_label
10142 && LABEL_NUSES (start_insn) == 1)
10143 reverse = TRUE;
10144 else
10145 return;
10147 else if (GET_CODE (body) == RETURN)
10149 start_insn = next_nonnote_insn (start_insn);
10150 if (GET_CODE (start_insn) == BARRIER)
10151 start_insn = next_nonnote_insn (start_insn);
10152 if (GET_CODE (start_insn) == CODE_LABEL
10153 && CODE_LABEL_NUMBER (start_insn) == arm_target_label
10154 && LABEL_NUSES (start_insn) == 1)
10156 reverse = TRUE;
10157 seeking_return = 1;
10159 else
10160 return;
10162 else
10163 return;
10166 if (arm_ccfsm_state != 0 && !reverse)
10167 abort ();
10168 if (GET_CODE (insn) != JUMP_INSN)
10169 return;
10171 /* This jump might be paralleled with a clobber of the condition codes
10172 the jump should always come first */
10173 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) > 0)
10174 body = XVECEXP (body, 0, 0);
10176 if (reverse
10177 || (GET_CODE (body) == SET && GET_CODE (SET_DEST (body)) == PC
10178 && GET_CODE (SET_SRC (body)) == IF_THEN_ELSE))
10180 int insns_skipped;
10181 int fail = FALSE, succeed = FALSE;
10182 /* Flag which part of the IF_THEN_ELSE is the LABEL_REF. */
10183 int then_not_else = TRUE;
10184 rtx this_insn = start_insn, label = 0;
10186 /* If the jump cannot be done with one instruction, we cannot
10187 conditionally execute the instruction in the inverse case. */
10188 if (get_attr_conds (insn) == CONDS_JUMP_CLOB)
10190 jump_clobbers = 1;
10191 return;
10194 /* Register the insn jumped to. */
10195 if (reverse)
10197 if (!seeking_return)
10198 label = XEXP (SET_SRC (body), 0);
10200 else if (GET_CODE (XEXP (SET_SRC (body), 1)) == LABEL_REF)
10201 label = XEXP (XEXP (SET_SRC (body), 1), 0);
10202 else if (GET_CODE (XEXP (SET_SRC (body), 2)) == LABEL_REF)
10204 label = XEXP (XEXP (SET_SRC (body), 2), 0);
10205 then_not_else = FALSE;
10207 else if (GET_CODE (XEXP (SET_SRC (body), 1)) == RETURN)
10208 seeking_return = 1;
10209 else if (GET_CODE (XEXP (SET_SRC (body), 2)) == RETURN)
10211 seeking_return = 1;
10212 then_not_else = FALSE;
10214 else
10215 abort ();
10217 /* See how many insns this branch skips, and what kind of insns. If all
10218 insns are okay, and the label or unconditional branch to the same
10219 label is not too far away, succeed. */
10220 for (insns_skipped = 0;
10221 !fail && !succeed && insns_skipped++ < max_insns_skipped;)
10223 rtx scanbody;
10225 this_insn = next_nonnote_insn (this_insn);
10226 if (!this_insn)
10227 break;
10229 switch (GET_CODE (this_insn))
10231 case CODE_LABEL:
10232 /* Succeed if it is the target label, otherwise fail since
10233 control falls in from somewhere else. */
10234 if (this_insn == label)
10236 if (jump_clobbers)
10238 arm_ccfsm_state = 2;
10239 this_insn = next_nonnote_insn (this_insn);
10241 else
10242 arm_ccfsm_state = 1;
10243 succeed = TRUE;
10245 else
10246 fail = TRUE;
10247 break;
10249 case BARRIER:
10250 /* Succeed if the following insn is the target label.
10251 Otherwise fail.
10252 If return insns are used then the last insn in a function
10253 will be a barrier. */
10254 this_insn = next_nonnote_insn (this_insn);
10255 if (this_insn && this_insn == label)
10257 if (jump_clobbers)
10259 arm_ccfsm_state = 2;
10260 this_insn = next_nonnote_insn (this_insn);
10262 else
10263 arm_ccfsm_state = 1;
10264 succeed = TRUE;
10266 else
10267 fail = TRUE;
10268 break;
10270 case CALL_INSN:
10271 /* If using 32-bit addresses the cc is not preserved over
10272 calls. */
10273 if (TARGET_APCS_32)
10275 /* Succeed if the following insn is the target label,
10276 or if the following two insns are a barrier and
10277 the target label. */
10278 this_insn = next_nonnote_insn (this_insn);
10279 if (this_insn && GET_CODE (this_insn) == BARRIER)
10280 this_insn = next_nonnote_insn (this_insn);
10282 if (this_insn && this_insn == label
10283 && insns_skipped < max_insns_skipped)
10285 if (jump_clobbers)
10287 arm_ccfsm_state = 2;
10288 this_insn = next_nonnote_insn (this_insn);
10290 else
10291 arm_ccfsm_state = 1;
10292 succeed = TRUE;
10294 else
10295 fail = TRUE;
10297 break;
10299 case JUMP_INSN:
10300 /* If this is an unconditional branch to the same label, succeed.
10301 If it is to another label, do nothing. If it is conditional,
10302 fail. */
10303 /* XXX Probably, the tests for SET and the PC are
10304 unnecessary. */
10306 scanbody = PATTERN (this_insn);
10307 if (GET_CODE (scanbody) == SET
10308 && GET_CODE (SET_DEST (scanbody)) == PC)
10310 if (GET_CODE (SET_SRC (scanbody)) == LABEL_REF
10311 && XEXP (SET_SRC (scanbody), 0) == label && !reverse)
10313 arm_ccfsm_state = 2;
10314 succeed = TRUE;
10316 else if (GET_CODE (SET_SRC (scanbody)) == IF_THEN_ELSE)
10317 fail = TRUE;
10319 /* Fail if a conditional return is undesirable (eg on a
10320 StrongARM), but still allow this if optimizing for size. */
10321 else if (GET_CODE (scanbody) == RETURN
10322 && !use_return_insn (TRUE, NULL)
10323 && !optimize_size)
10324 fail = TRUE;
10325 else if (GET_CODE (scanbody) == RETURN
10326 && seeking_return)
10328 arm_ccfsm_state = 2;
10329 succeed = TRUE;
10331 else if (GET_CODE (scanbody) == PARALLEL)
10333 switch (get_attr_conds (this_insn))
10335 case CONDS_NOCOND:
10336 break;
10337 default:
10338 fail = TRUE;
10339 break;
10342 else
10343 fail = TRUE; /* Unrecognized jump (eg epilogue). */
10345 break;
10347 case INSN:
10348 /* Instructions using or affecting the condition codes make it
10349 fail. */
10350 scanbody = PATTERN (this_insn);
10351 if (!(GET_CODE (scanbody) == SET
10352 || GET_CODE (scanbody) == PARALLEL)
10353 || get_attr_conds (this_insn) != CONDS_NOCOND)
10354 fail = TRUE;
10356 /* A conditional cirrus instruction must be followed by
10357 a non Cirrus instruction. However, since we
10358 conditionalize instructions in this function and by
10359 the time we get here we can't add instructions
10360 (nops), because shorten_branches() has already been
10361 called, we will disable conditionalizing Cirrus
10362 instructions to be safe. */
10363 if (GET_CODE (scanbody) != USE
10364 && GET_CODE (scanbody) != CLOBBER
10365 && get_attr_cirrus (this_insn) != CIRRUS_NOT)
10366 fail = TRUE;
10367 break;
10369 default:
10370 break;
10373 if (succeed)
10375 if ((!seeking_return) && (arm_ccfsm_state == 1 || reverse))
10376 arm_target_label = CODE_LABEL_NUMBER (label);
10377 else if (seeking_return || arm_ccfsm_state == 2)
10379 while (this_insn && GET_CODE (PATTERN (this_insn)) == USE)
10381 this_insn = next_nonnote_insn (this_insn);
10382 if (this_insn && (GET_CODE (this_insn) == BARRIER
10383 || GET_CODE (this_insn) == CODE_LABEL))
10384 abort ();
10386 if (!this_insn)
10388 /* Oh, dear! we ran off the end.. give up. */
10389 recog (PATTERN (insn), insn, NULL);
10390 arm_ccfsm_state = 0;
10391 arm_target_insn = NULL;
10392 return;
10394 arm_target_insn = this_insn;
10396 else
10397 abort ();
10398 if (jump_clobbers)
10400 if (reverse)
10401 abort ();
10402 arm_current_cc =
10403 get_arm_condition_code (XEXP (XEXP (XEXP (SET_SRC (body),
10404 0), 0), 1));
10405 if (GET_CODE (XEXP (XEXP (SET_SRC (body), 0), 0)) == AND)
10406 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
10407 if (GET_CODE (XEXP (SET_SRC (body), 0)) == NE)
10408 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
10410 else
10412 /* If REVERSE is true, ARM_CURRENT_CC needs to be inverted from
10413 what it was. */
10414 if (!reverse)
10415 arm_current_cc = get_arm_condition_code (XEXP (SET_SRC (body),
10416 0));
10419 if (reverse || then_not_else)
10420 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
10423 /* Restore recog_data (getting the attributes of other insns can
10424 destroy this array, but final.c assumes that it remains intact
10425 across this call; since the insn has been recognized already we
10426 call recog direct). */
10427 recog (PATTERN (insn), insn, NULL);
10431 /* Returns true if REGNO is a valid register
10432 for holding a quantity of tyoe MODE. */
10434 arm_hard_regno_mode_ok (unsigned int regno, enum machine_mode mode)
10436 if (GET_MODE_CLASS (mode) == MODE_CC)
10437 return regno == CC_REGNUM;
10439 if (TARGET_THUMB)
10440 /* For the Thumb we only allow values bigger than SImode in
10441 registers 0 - 6, so that there is always a second low
10442 register available to hold the upper part of the value.
10443 We probably we ought to ensure that the register is the
10444 start of an even numbered register pair. */
10445 return (ARM_NUM_REGS (mode) < 2) || (regno < LAST_LO_REGNUM);
10447 if (IS_CIRRUS_REGNUM (regno))
10448 /* We have outlawed SI values in Cirrus registers because they
10449 reside in the lower 32 bits, but SF values reside in the
10450 upper 32 bits. This causes gcc all sorts of grief. We can't
10451 even split the registers into pairs because Cirrus SI values
10452 get sign extended to 64bits-- aldyh. */
10453 return (GET_MODE_CLASS (mode) == MODE_FLOAT) || (mode == DImode);
10455 if (IS_IWMMXT_GR_REGNUM (regno))
10456 return mode == SImode;
10458 if (IS_IWMMXT_REGNUM (regno))
10459 return VALID_IWMMXT_REG_MODE (mode);
10461 if (regno <= LAST_ARM_REGNUM)
10462 /* We allow any value to be stored in the general registers. */
10463 return 1;
10465 if ( regno == FRAME_POINTER_REGNUM
10466 || regno == ARG_POINTER_REGNUM)
10467 /* We only allow integers in the fake hard registers. */
10468 return GET_MODE_CLASS (mode) == MODE_INT;
10470 /* The only registers left are the FPA registers
10471 which we only allow to hold FP values. */
10472 return GET_MODE_CLASS (mode) == MODE_FLOAT
10473 && regno >= FIRST_ARM_FP_REGNUM
10474 && regno <= LAST_ARM_FP_REGNUM;
10478 arm_regno_class (int regno)
10480 if (TARGET_THUMB)
10482 if (regno == STACK_POINTER_REGNUM)
10483 return STACK_REG;
10484 if (regno == CC_REGNUM)
10485 return CC_REG;
10486 if (regno < 8)
10487 return LO_REGS;
10488 return HI_REGS;
10491 if ( regno <= LAST_ARM_REGNUM
10492 || regno == FRAME_POINTER_REGNUM
10493 || regno == ARG_POINTER_REGNUM)
10494 return GENERAL_REGS;
10496 if (regno == CC_REGNUM)
10497 return NO_REGS;
10499 if (IS_CIRRUS_REGNUM (regno))
10500 return CIRRUS_REGS;
10502 if (IS_IWMMXT_REGNUM (regno))
10503 return IWMMXT_REGS;
10505 if (IS_IWMMXT_GR_REGNUM (regno))
10506 return IWMMXT_GR_REGS;
10508 return FPA_REGS;
10511 /* Handle a special case when computing the offset
10512 of an argument from the frame pointer. */
10514 arm_debugger_arg_offset (int value, rtx addr)
10516 rtx insn;
10518 /* We are only interested if dbxout_parms() failed to compute the offset. */
10519 if (value != 0)
10520 return 0;
10522 /* We can only cope with the case where the address is held in a register. */
10523 if (GET_CODE (addr) != REG)
10524 return 0;
10526 /* If we are using the frame pointer to point at the argument, then
10527 an offset of 0 is correct. */
10528 if (REGNO (addr) == (unsigned) HARD_FRAME_POINTER_REGNUM)
10529 return 0;
10531 /* If we are using the stack pointer to point at the
10532 argument, then an offset of 0 is correct. */
10533 if ((TARGET_THUMB || !frame_pointer_needed)
10534 && REGNO (addr) == SP_REGNUM)
10535 return 0;
10537 /* Oh dear. The argument is pointed to by a register rather
10538 than being held in a register, or being stored at a known
10539 offset from the frame pointer. Since GDB only understands
10540 those two kinds of argument we must translate the address
10541 held in the register into an offset from the frame pointer.
10542 We do this by searching through the insns for the function
10543 looking to see where this register gets its value. If the
10544 register is initialized from the frame pointer plus an offset
10545 then we are in luck and we can continue, otherwise we give up.
10547 This code is exercised by producing debugging information
10548 for a function with arguments like this:
10550 double func (double a, double b, int c, double d) {return d;}
10552 Without this code the stab for parameter 'd' will be set to
10553 an offset of 0 from the frame pointer, rather than 8. */
10555 /* The if() statement says:
10557 If the insn is a normal instruction
10558 and if the insn is setting the value in a register
10559 and if the register being set is the register holding the address of the argument
10560 and if the address is computing by an addition
10561 that involves adding to a register
10562 which is the frame pointer
10563 a constant integer
10565 then... */
10567 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
10569 if ( GET_CODE (insn) == INSN
10570 && GET_CODE (PATTERN (insn)) == SET
10571 && REGNO (XEXP (PATTERN (insn), 0)) == REGNO (addr)
10572 && GET_CODE (XEXP (PATTERN (insn), 1)) == PLUS
10573 && GET_CODE (XEXP (XEXP (PATTERN (insn), 1), 0)) == REG
10574 && REGNO (XEXP (XEXP (PATTERN (insn), 1), 0)) == (unsigned) HARD_FRAME_POINTER_REGNUM
10575 && GET_CODE (XEXP (XEXP (PATTERN (insn), 1), 1)) == CONST_INT
10578 value = INTVAL (XEXP (XEXP (PATTERN (insn), 1), 1));
10580 break;
10584 if (value == 0)
10586 debug_rtx (addr);
10587 warning ("unable to compute real location of stacked parameter");
10588 value = 8; /* XXX magic hack */
10591 return value;
10594 #define def_mbuiltin(MASK, NAME, TYPE, CODE) \
10595 do \
10597 if ((MASK) & insn_flags) \
10598 builtin_function ((NAME), (TYPE), (CODE), BUILT_IN_MD, NULL, NULL_TREE); \
10600 while (0)
10602 struct builtin_description
10604 const unsigned int mask;
10605 const enum insn_code icode;
10606 const char * const name;
10607 const enum arm_builtins code;
10608 const enum rtx_code comparison;
10609 const unsigned int flag;
10612 static const struct builtin_description bdesc_2arg[] =
10614 #define IWMMXT_BUILTIN(code, string, builtin) \
10615 { FL_IWMMXT, CODE_FOR_##code, "__builtin_arm_" string, \
10616 ARM_BUILTIN_##builtin, 0, 0 },
10618 IWMMXT_BUILTIN (addv8qi3, "waddb", WADDB)
10619 IWMMXT_BUILTIN (addv4hi3, "waddh", WADDH)
10620 IWMMXT_BUILTIN (addv2si3, "waddw", WADDW)
10621 IWMMXT_BUILTIN (subv8qi3, "wsubb", WSUBB)
10622 IWMMXT_BUILTIN (subv4hi3, "wsubh", WSUBH)
10623 IWMMXT_BUILTIN (subv2si3, "wsubw", WSUBW)
10624 IWMMXT_BUILTIN (ssaddv8qi3, "waddbss", WADDSSB)
10625 IWMMXT_BUILTIN (ssaddv4hi3, "waddhss", WADDSSH)
10626 IWMMXT_BUILTIN (ssaddv2si3, "waddwss", WADDSSW)
10627 IWMMXT_BUILTIN (sssubv8qi3, "wsubbss", WSUBSSB)
10628 IWMMXT_BUILTIN (sssubv4hi3, "wsubhss", WSUBSSH)
10629 IWMMXT_BUILTIN (sssubv2si3, "wsubwss", WSUBSSW)
10630 IWMMXT_BUILTIN (usaddv8qi3, "waddbus", WADDUSB)
10631 IWMMXT_BUILTIN (usaddv4hi3, "waddhus", WADDUSH)
10632 IWMMXT_BUILTIN (usaddv2si3, "waddwus", WADDUSW)
10633 IWMMXT_BUILTIN (ussubv8qi3, "wsubbus", WSUBUSB)
10634 IWMMXT_BUILTIN (ussubv4hi3, "wsubhus", WSUBUSH)
10635 IWMMXT_BUILTIN (ussubv2si3, "wsubwus", WSUBUSW)
10636 IWMMXT_BUILTIN (mulv4hi3, "wmulul", WMULUL)
10637 IWMMXT_BUILTIN (smulv4hi3_highpart, "wmulsh", WMULSH)
10638 IWMMXT_BUILTIN (umulv4hi3_highpart, "wmuluh", WMULUH)
10639 IWMMXT_BUILTIN (eqv8qi3, "wcmpeqb", WCMPEQB)
10640 IWMMXT_BUILTIN (eqv4hi3, "wcmpeqh", WCMPEQH)
10641 IWMMXT_BUILTIN (eqv2si3, "wcmpeqw", WCMPEQW)
10642 IWMMXT_BUILTIN (gtuv8qi3, "wcmpgtub", WCMPGTUB)
10643 IWMMXT_BUILTIN (gtuv4hi3, "wcmpgtuh", WCMPGTUH)
10644 IWMMXT_BUILTIN (gtuv2si3, "wcmpgtuw", WCMPGTUW)
10645 IWMMXT_BUILTIN (gtv8qi3, "wcmpgtsb", WCMPGTSB)
10646 IWMMXT_BUILTIN (gtv4hi3, "wcmpgtsh", WCMPGTSH)
10647 IWMMXT_BUILTIN (gtv2si3, "wcmpgtsw", WCMPGTSW)
10648 IWMMXT_BUILTIN (umaxv8qi3, "wmaxub", WMAXUB)
10649 IWMMXT_BUILTIN (smaxv8qi3, "wmaxsb", WMAXSB)
10650 IWMMXT_BUILTIN (umaxv4hi3, "wmaxuh", WMAXUH)
10651 IWMMXT_BUILTIN (smaxv4hi3, "wmaxsh", WMAXSH)
10652 IWMMXT_BUILTIN (umaxv2si3, "wmaxuw", WMAXUW)
10653 IWMMXT_BUILTIN (smaxv2si3, "wmaxsw", WMAXSW)
10654 IWMMXT_BUILTIN (uminv8qi3, "wminub", WMINUB)
10655 IWMMXT_BUILTIN (sminv8qi3, "wminsb", WMINSB)
10656 IWMMXT_BUILTIN (uminv4hi3, "wminuh", WMINUH)
10657 IWMMXT_BUILTIN (sminv4hi3, "wminsh", WMINSH)
10658 IWMMXT_BUILTIN (uminv2si3, "wminuw", WMINUW)
10659 IWMMXT_BUILTIN (sminv2si3, "wminsw", WMINSW)
10660 IWMMXT_BUILTIN (iwmmxt_anddi3, "wand", WAND)
10661 IWMMXT_BUILTIN (iwmmxt_nanddi3, "wandn", WANDN)
10662 IWMMXT_BUILTIN (iwmmxt_iordi3, "wor", WOR)
10663 IWMMXT_BUILTIN (iwmmxt_xordi3, "wxor", WXOR)
10664 IWMMXT_BUILTIN (iwmmxt_uavgv8qi3, "wavg2b", WAVG2B)
10665 IWMMXT_BUILTIN (iwmmxt_uavgv4hi3, "wavg2h", WAVG2H)
10666 IWMMXT_BUILTIN (iwmmxt_uavgrndv8qi3, "wavg2br", WAVG2BR)
10667 IWMMXT_BUILTIN (iwmmxt_uavgrndv4hi3, "wavg2hr", WAVG2HR)
10668 IWMMXT_BUILTIN (iwmmxt_wunpckilb, "wunpckilb", WUNPCKILB)
10669 IWMMXT_BUILTIN (iwmmxt_wunpckilh, "wunpckilh", WUNPCKILH)
10670 IWMMXT_BUILTIN (iwmmxt_wunpckilw, "wunpckilw", WUNPCKILW)
10671 IWMMXT_BUILTIN (iwmmxt_wunpckihb, "wunpckihb", WUNPCKIHB)
10672 IWMMXT_BUILTIN (iwmmxt_wunpckihh, "wunpckihh", WUNPCKIHH)
10673 IWMMXT_BUILTIN (iwmmxt_wunpckihw, "wunpckihw", WUNPCKIHW)
10674 IWMMXT_BUILTIN (iwmmxt_wmadds, "wmadds", WMADDS)
10675 IWMMXT_BUILTIN (iwmmxt_wmaddu, "wmaddu", WMADDU)
10677 #define IWMMXT_BUILTIN2(code, builtin) \
10678 { FL_IWMMXT, CODE_FOR_##code, NULL, ARM_BUILTIN_##builtin, 0, 0 },
10680 IWMMXT_BUILTIN2 (iwmmxt_wpackhss, WPACKHSS)
10681 IWMMXT_BUILTIN2 (iwmmxt_wpackwss, WPACKWSS)
10682 IWMMXT_BUILTIN2 (iwmmxt_wpackdss, WPACKDSS)
10683 IWMMXT_BUILTIN2 (iwmmxt_wpackhus, WPACKHUS)
10684 IWMMXT_BUILTIN2 (iwmmxt_wpackwus, WPACKWUS)
10685 IWMMXT_BUILTIN2 (iwmmxt_wpackdus, WPACKDUS)
10686 IWMMXT_BUILTIN2 (ashlv4hi3_di, WSLLH)
10687 IWMMXT_BUILTIN2 (ashlv4hi3, WSLLHI)
10688 IWMMXT_BUILTIN2 (ashlv2si3_di, WSLLW)
10689 IWMMXT_BUILTIN2 (ashlv2si3, WSLLWI)
10690 IWMMXT_BUILTIN2 (ashldi3_di, WSLLD)
10691 IWMMXT_BUILTIN2 (ashldi3_iwmmxt, WSLLDI)
10692 IWMMXT_BUILTIN2 (lshrv4hi3_di, WSRLH)
10693 IWMMXT_BUILTIN2 (lshrv4hi3, WSRLHI)
10694 IWMMXT_BUILTIN2 (lshrv2si3_di, WSRLW)
10695 IWMMXT_BUILTIN2 (lshrv2si3, WSRLWI)
10696 IWMMXT_BUILTIN2 (lshrdi3_di, WSRLD)
10697 IWMMXT_BUILTIN2 (lshrdi3, WSRLDI)
10698 IWMMXT_BUILTIN2 (ashrv4hi3_di, WSRAH)
10699 IWMMXT_BUILTIN2 (ashrv4hi3, WSRAHI)
10700 IWMMXT_BUILTIN2 (ashrv2si3_di, WSRAW)
10701 IWMMXT_BUILTIN2 (ashrv2si3, WSRAWI)
10702 IWMMXT_BUILTIN2 (ashrdi3_di, WSRAD)
10703 IWMMXT_BUILTIN2 (ashrdi3, WSRADI)
10704 IWMMXT_BUILTIN2 (rorv4hi3_di, WRORH)
10705 IWMMXT_BUILTIN2 (rorv4hi3, WRORHI)
10706 IWMMXT_BUILTIN2 (rorv2si3_di, WRORW)
10707 IWMMXT_BUILTIN2 (rorv2si3, WRORWI)
10708 IWMMXT_BUILTIN2 (rordi3_di, WRORD)
10709 IWMMXT_BUILTIN2 (rordi3, WRORDI)
10710 IWMMXT_BUILTIN2 (iwmmxt_wmacuz, WMACUZ)
10711 IWMMXT_BUILTIN2 (iwmmxt_wmacsz, WMACSZ)
10714 static const struct builtin_description bdesc_1arg[] =
10716 IWMMXT_BUILTIN (iwmmxt_tmovmskb, "tmovmskb", TMOVMSKB)
10717 IWMMXT_BUILTIN (iwmmxt_tmovmskh, "tmovmskh", TMOVMSKH)
10718 IWMMXT_BUILTIN (iwmmxt_tmovmskw, "tmovmskw", TMOVMSKW)
10719 IWMMXT_BUILTIN (iwmmxt_waccb, "waccb", WACCB)
10720 IWMMXT_BUILTIN (iwmmxt_wacch, "wacch", WACCH)
10721 IWMMXT_BUILTIN (iwmmxt_waccw, "waccw", WACCW)
10722 IWMMXT_BUILTIN (iwmmxt_wunpckehub, "wunpckehub", WUNPCKEHUB)
10723 IWMMXT_BUILTIN (iwmmxt_wunpckehuh, "wunpckehuh", WUNPCKEHUH)
10724 IWMMXT_BUILTIN (iwmmxt_wunpckehuw, "wunpckehuw", WUNPCKEHUW)
10725 IWMMXT_BUILTIN (iwmmxt_wunpckehsb, "wunpckehsb", WUNPCKEHSB)
10726 IWMMXT_BUILTIN (iwmmxt_wunpckehsh, "wunpckehsh", WUNPCKEHSH)
10727 IWMMXT_BUILTIN (iwmmxt_wunpckehsw, "wunpckehsw", WUNPCKEHSW)
10728 IWMMXT_BUILTIN (iwmmxt_wunpckelub, "wunpckelub", WUNPCKELUB)
10729 IWMMXT_BUILTIN (iwmmxt_wunpckeluh, "wunpckeluh", WUNPCKELUH)
10730 IWMMXT_BUILTIN (iwmmxt_wunpckeluw, "wunpckeluw", WUNPCKELUW)
10731 IWMMXT_BUILTIN (iwmmxt_wunpckelsb, "wunpckelsb", WUNPCKELSB)
10732 IWMMXT_BUILTIN (iwmmxt_wunpckelsh, "wunpckelsh", WUNPCKELSH)
10733 IWMMXT_BUILTIN (iwmmxt_wunpckelsw, "wunpckelsw", WUNPCKELSW)
10736 /* Set up all the iWMMXt builtins. This is
10737 not called if TARGET_IWMMXT is zero. */
10739 static void
10740 arm_init_iwmmxt_builtins (void)
10742 const struct builtin_description * d;
10743 size_t i;
10744 tree endlink = void_list_node;
10746 tree int_ftype_int
10747 = build_function_type (integer_type_node,
10748 tree_cons (NULL_TREE, integer_type_node, endlink));
10749 tree v8qi_ftype_v8qi_v8qi_int
10750 = build_function_type (V8QI_type_node,
10751 tree_cons (NULL_TREE, V8QI_type_node,
10752 tree_cons (NULL_TREE, V8QI_type_node,
10753 tree_cons (NULL_TREE,
10754 integer_type_node,
10755 endlink))));
10756 tree v4hi_ftype_v4hi_int
10757 = build_function_type (V4HI_type_node,
10758 tree_cons (NULL_TREE, V4HI_type_node,
10759 tree_cons (NULL_TREE, integer_type_node,
10760 endlink)));
10761 tree v2si_ftype_v2si_int
10762 = build_function_type (V2SI_type_node,
10763 tree_cons (NULL_TREE, V2SI_type_node,
10764 tree_cons (NULL_TREE, integer_type_node,
10765 endlink)));
10766 tree v2si_ftype_di_di
10767 = build_function_type (V2SI_type_node,
10768 tree_cons (NULL_TREE, long_long_integer_type_node,
10769 tree_cons (NULL_TREE, long_long_integer_type_node,
10770 endlink)));
10771 tree di_ftype_di_int
10772 = build_function_type (long_long_integer_type_node,
10773 tree_cons (NULL_TREE, long_long_integer_type_node,
10774 tree_cons (NULL_TREE, integer_type_node,
10775 endlink)));
10776 tree di_ftype_di_int_int
10777 = build_function_type (long_long_integer_type_node,
10778 tree_cons (NULL_TREE, long_long_integer_type_node,
10779 tree_cons (NULL_TREE, integer_type_node,
10780 tree_cons (NULL_TREE,
10781 integer_type_node,
10782 endlink))));
10783 tree int_ftype_v8qi
10784 = build_function_type (integer_type_node,
10785 tree_cons (NULL_TREE, V8QI_type_node,
10786 endlink));
10787 tree int_ftype_v4hi
10788 = build_function_type (integer_type_node,
10789 tree_cons (NULL_TREE, V4HI_type_node,
10790 endlink));
10791 tree int_ftype_v2si
10792 = build_function_type (integer_type_node,
10793 tree_cons (NULL_TREE, V2SI_type_node,
10794 endlink));
10795 tree int_ftype_v8qi_int
10796 = build_function_type (integer_type_node,
10797 tree_cons (NULL_TREE, V8QI_type_node,
10798 tree_cons (NULL_TREE, integer_type_node,
10799 endlink)));
10800 tree int_ftype_v4hi_int
10801 = build_function_type (integer_type_node,
10802 tree_cons (NULL_TREE, V4HI_type_node,
10803 tree_cons (NULL_TREE, integer_type_node,
10804 endlink)));
10805 tree int_ftype_v2si_int
10806 = build_function_type (integer_type_node,
10807 tree_cons (NULL_TREE, V2SI_type_node,
10808 tree_cons (NULL_TREE, integer_type_node,
10809 endlink)));
10810 tree v8qi_ftype_v8qi_int_int
10811 = build_function_type (V8QI_type_node,
10812 tree_cons (NULL_TREE, V8QI_type_node,
10813 tree_cons (NULL_TREE, integer_type_node,
10814 tree_cons (NULL_TREE,
10815 integer_type_node,
10816 endlink))));
10817 tree v4hi_ftype_v4hi_int_int
10818 = build_function_type (V4HI_type_node,
10819 tree_cons (NULL_TREE, V4HI_type_node,
10820 tree_cons (NULL_TREE, integer_type_node,
10821 tree_cons (NULL_TREE,
10822 integer_type_node,
10823 endlink))));
10824 tree v2si_ftype_v2si_int_int
10825 = build_function_type (V2SI_type_node,
10826 tree_cons (NULL_TREE, V2SI_type_node,
10827 tree_cons (NULL_TREE, integer_type_node,
10828 tree_cons (NULL_TREE,
10829 integer_type_node,
10830 endlink))));
10831 /* Miscellaneous. */
10832 tree v8qi_ftype_v4hi_v4hi
10833 = build_function_type (V8QI_type_node,
10834 tree_cons (NULL_TREE, V4HI_type_node,
10835 tree_cons (NULL_TREE, V4HI_type_node,
10836 endlink)));
10837 tree v4hi_ftype_v2si_v2si
10838 = build_function_type (V4HI_type_node,
10839 tree_cons (NULL_TREE, V2SI_type_node,
10840 tree_cons (NULL_TREE, V2SI_type_node,
10841 endlink)));
10842 tree v2si_ftype_v4hi_v4hi
10843 = build_function_type (V2SI_type_node,
10844 tree_cons (NULL_TREE, V4HI_type_node,
10845 tree_cons (NULL_TREE, V4HI_type_node,
10846 endlink)));
10847 tree v2si_ftype_v8qi_v8qi
10848 = build_function_type (V2SI_type_node,
10849 tree_cons (NULL_TREE, V8QI_type_node,
10850 tree_cons (NULL_TREE, V8QI_type_node,
10851 endlink)));
10852 tree v4hi_ftype_v4hi_di
10853 = build_function_type (V4HI_type_node,
10854 tree_cons (NULL_TREE, V4HI_type_node,
10855 tree_cons (NULL_TREE,
10856 long_long_integer_type_node,
10857 endlink)));
10858 tree v2si_ftype_v2si_di
10859 = build_function_type (V2SI_type_node,
10860 tree_cons (NULL_TREE, V2SI_type_node,
10861 tree_cons (NULL_TREE,
10862 long_long_integer_type_node,
10863 endlink)));
10864 tree void_ftype_int_int
10865 = build_function_type (void_type_node,
10866 tree_cons (NULL_TREE, integer_type_node,
10867 tree_cons (NULL_TREE, integer_type_node,
10868 endlink)));
10869 tree di_ftype_void
10870 = build_function_type (long_long_unsigned_type_node, endlink);
10871 tree di_ftype_v8qi
10872 = build_function_type (long_long_integer_type_node,
10873 tree_cons (NULL_TREE, V8QI_type_node,
10874 endlink));
10875 tree di_ftype_v4hi
10876 = build_function_type (long_long_integer_type_node,
10877 tree_cons (NULL_TREE, V4HI_type_node,
10878 endlink));
10879 tree di_ftype_v2si
10880 = build_function_type (long_long_integer_type_node,
10881 tree_cons (NULL_TREE, V2SI_type_node,
10882 endlink));
10883 tree v2si_ftype_v4hi
10884 = build_function_type (V2SI_type_node,
10885 tree_cons (NULL_TREE, V4HI_type_node,
10886 endlink));
10887 tree v4hi_ftype_v8qi
10888 = build_function_type (V4HI_type_node,
10889 tree_cons (NULL_TREE, V8QI_type_node,
10890 endlink));
10892 tree di_ftype_di_v4hi_v4hi
10893 = build_function_type (long_long_unsigned_type_node,
10894 tree_cons (NULL_TREE,
10895 long_long_unsigned_type_node,
10896 tree_cons (NULL_TREE, V4HI_type_node,
10897 tree_cons (NULL_TREE,
10898 V4HI_type_node,
10899 endlink))));
10901 tree di_ftype_v4hi_v4hi
10902 = build_function_type (long_long_unsigned_type_node,
10903 tree_cons (NULL_TREE, V4HI_type_node,
10904 tree_cons (NULL_TREE, V4HI_type_node,
10905 endlink)));
10907 /* Normal vector binops. */
10908 tree v8qi_ftype_v8qi_v8qi
10909 = build_function_type (V8QI_type_node,
10910 tree_cons (NULL_TREE, V8QI_type_node,
10911 tree_cons (NULL_TREE, V8QI_type_node,
10912 endlink)));
10913 tree v4hi_ftype_v4hi_v4hi
10914 = build_function_type (V4HI_type_node,
10915 tree_cons (NULL_TREE, V4HI_type_node,
10916 tree_cons (NULL_TREE, V4HI_type_node,
10917 endlink)));
10918 tree v2si_ftype_v2si_v2si
10919 = build_function_type (V2SI_type_node,
10920 tree_cons (NULL_TREE, V2SI_type_node,
10921 tree_cons (NULL_TREE, V2SI_type_node,
10922 endlink)));
10923 tree di_ftype_di_di
10924 = build_function_type (long_long_unsigned_type_node,
10925 tree_cons (NULL_TREE, long_long_unsigned_type_node,
10926 tree_cons (NULL_TREE,
10927 long_long_unsigned_type_node,
10928 endlink)));
10930 /* Add all builtins that are more or less simple operations on two
10931 operands. */
10932 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
10934 /* Use one of the operands; the target can have a different mode for
10935 mask-generating compares. */
10936 enum machine_mode mode;
10937 tree type;
10939 if (d->name == 0)
10940 continue;
10942 mode = insn_data[d->icode].operand[1].mode;
10944 switch (mode)
10946 case V8QImode:
10947 type = v8qi_ftype_v8qi_v8qi;
10948 break;
10949 case V4HImode:
10950 type = v4hi_ftype_v4hi_v4hi;
10951 break;
10952 case V2SImode:
10953 type = v2si_ftype_v2si_v2si;
10954 break;
10955 case DImode:
10956 type = di_ftype_di_di;
10957 break;
10959 default:
10960 abort ();
10963 def_mbuiltin (d->mask, d->name, type, d->code);
10966 /* Add the remaining MMX insns with somewhat more complicated types. */
10967 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wzero", di_ftype_void, ARM_BUILTIN_WZERO);
10968 def_mbuiltin (FL_IWMMXT, "__builtin_arm_setwcx", void_ftype_int_int, ARM_BUILTIN_SETWCX);
10969 def_mbuiltin (FL_IWMMXT, "__builtin_arm_getwcx", int_ftype_int, ARM_BUILTIN_GETWCX);
10971 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSLLH);
10972 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllw", v2si_ftype_v2si_di, ARM_BUILTIN_WSLLW);
10973 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wslld", di_ftype_di_di, ARM_BUILTIN_WSLLD);
10974 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSLLHI);
10975 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllwi", v2si_ftype_v2si_int, ARM_BUILTIN_WSLLWI);
10976 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wslldi", di_ftype_di_int, ARM_BUILTIN_WSLLDI);
10978 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSRLH);
10979 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlw", v2si_ftype_v2si_di, ARM_BUILTIN_WSRLW);
10980 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrld", di_ftype_di_di, ARM_BUILTIN_WSRLD);
10981 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSRLHI);
10982 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlwi", v2si_ftype_v2si_int, ARM_BUILTIN_WSRLWI);
10983 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrldi", di_ftype_di_int, ARM_BUILTIN_WSRLDI);
10985 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrah", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSRAH);
10986 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsraw", v2si_ftype_v2si_di, ARM_BUILTIN_WSRAW);
10987 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrad", di_ftype_di_di, ARM_BUILTIN_WSRAD);
10988 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrahi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSRAHI);
10989 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrawi", v2si_ftype_v2si_int, ARM_BUILTIN_WSRAWI);
10990 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsradi", di_ftype_di_int, ARM_BUILTIN_WSRADI);
10992 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WRORH);
10993 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorw", v2si_ftype_v2si_di, ARM_BUILTIN_WRORW);
10994 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrord", di_ftype_di_di, ARM_BUILTIN_WRORD);
10995 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WRORHI);
10996 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorwi", v2si_ftype_v2si_int, ARM_BUILTIN_WRORWI);
10997 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrordi", di_ftype_di_int, ARM_BUILTIN_WRORDI);
10999 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wshufh", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSHUFH);
11001 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadb", v2si_ftype_v8qi_v8qi, ARM_BUILTIN_WSADB);
11002 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadh", v2si_ftype_v4hi_v4hi, ARM_BUILTIN_WSADH);
11003 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadbz", v2si_ftype_v8qi_v8qi, ARM_BUILTIN_WSADBZ);
11004 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadhz", v2si_ftype_v4hi_v4hi, ARM_BUILTIN_WSADHZ);
11006 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsb", int_ftype_v8qi_int, ARM_BUILTIN_TEXTRMSB);
11007 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsh", int_ftype_v4hi_int, ARM_BUILTIN_TEXTRMSH);
11008 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsw", int_ftype_v2si_int, ARM_BUILTIN_TEXTRMSW);
11009 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmub", int_ftype_v8qi_int, ARM_BUILTIN_TEXTRMUB);
11010 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmuh", int_ftype_v4hi_int, ARM_BUILTIN_TEXTRMUH);
11011 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmuw", int_ftype_v2si_int, ARM_BUILTIN_TEXTRMUW);
11012 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrb", v8qi_ftype_v8qi_int_int, ARM_BUILTIN_TINSRB);
11013 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrh", v4hi_ftype_v4hi_int_int, ARM_BUILTIN_TINSRH);
11014 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrw", v2si_ftype_v2si_int_int, ARM_BUILTIN_TINSRW);
11016 def_mbuiltin (FL_IWMMXT, "__builtin_arm_waccb", di_ftype_v8qi, ARM_BUILTIN_WACCB);
11017 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wacch", di_ftype_v4hi, ARM_BUILTIN_WACCH);
11018 def_mbuiltin (FL_IWMMXT, "__builtin_arm_waccw", di_ftype_v2si, ARM_BUILTIN_WACCW);
11020 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskb", int_ftype_v8qi, ARM_BUILTIN_TMOVMSKB);
11021 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskh", int_ftype_v4hi, ARM_BUILTIN_TMOVMSKH);
11022 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskw", int_ftype_v2si, ARM_BUILTIN_TMOVMSKW);
11024 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackhss", v8qi_ftype_v4hi_v4hi, ARM_BUILTIN_WPACKHSS);
11025 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackhus", v8qi_ftype_v4hi_v4hi, ARM_BUILTIN_WPACKHUS);
11026 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackwus", v4hi_ftype_v2si_v2si, ARM_BUILTIN_WPACKWUS);
11027 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackwss", v4hi_ftype_v2si_v2si, ARM_BUILTIN_WPACKWSS);
11028 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackdus", v2si_ftype_di_di, ARM_BUILTIN_WPACKDUS);
11029 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackdss", v2si_ftype_di_di, ARM_BUILTIN_WPACKDSS);
11031 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehub", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKEHUB);
11032 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehuh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKEHUH);
11033 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehuw", di_ftype_v2si, ARM_BUILTIN_WUNPCKEHUW);
11034 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsb", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKEHSB);
11035 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKEHSH);
11036 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsw", di_ftype_v2si, ARM_BUILTIN_WUNPCKEHSW);
11037 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelub", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKELUB);
11038 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckeluh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKELUH);
11039 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckeluw", di_ftype_v2si, ARM_BUILTIN_WUNPCKELUW);
11040 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsb", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKELSB);
11041 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKELSH);
11042 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsw", di_ftype_v2si, ARM_BUILTIN_WUNPCKELSW);
11044 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacs", di_ftype_di_v4hi_v4hi, ARM_BUILTIN_WMACS);
11045 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacsz", di_ftype_v4hi_v4hi, ARM_BUILTIN_WMACSZ);
11046 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacu", di_ftype_di_v4hi_v4hi, ARM_BUILTIN_WMACU);
11047 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacuz", di_ftype_v4hi_v4hi, ARM_BUILTIN_WMACUZ);
11049 def_mbuiltin (FL_IWMMXT, "__builtin_arm_walign", v8qi_ftype_v8qi_v8qi_int, ARM_BUILTIN_WALIGN);
11050 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmia", di_ftype_di_int_int, ARM_BUILTIN_TMIA);
11051 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiaph", di_ftype_di_int_int, ARM_BUILTIN_TMIAPH);
11052 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiabb", di_ftype_di_int_int, ARM_BUILTIN_TMIABB);
11053 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiabt", di_ftype_di_int_int, ARM_BUILTIN_TMIABT);
11054 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiatb", di_ftype_di_int_int, ARM_BUILTIN_TMIATB);
11055 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiatt", di_ftype_di_int_int, ARM_BUILTIN_TMIATT);
11058 static void
11059 arm_init_builtins (void)
11061 if (TARGET_REALLY_IWMMXT)
11062 arm_init_iwmmxt_builtins ();
11065 /* Errors in the source file can cause expand_expr to return const0_rtx
11066 where we expect a vector. To avoid crashing, use one of the vector
11067 clear instructions. */
11069 static rtx
11070 safe_vector_operand (rtx x, enum machine_mode mode)
11072 if (x != const0_rtx)
11073 return x;
11074 x = gen_reg_rtx (mode);
11076 emit_insn (gen_iwmmxt_clrdi (mode == DImode ? x
11077 : gen_rtx_SUBREG (DImode, x, 0)));
11078 return x;
11081 /* Subroutine of arm_expand_builtin to take care of binop insns. */
11083 static rtx
11084 arm_expand_binop_builtin (enum insn_code icode,
11085 tree arglist, rtx target)
11087 rtx pat;
11088 tree arg0 = TREE_VALUE (arglist);
11089 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
11090 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
11091 rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
11092 enum machine_mode tmode = insn_data[icode].operand[0].mode;
11093 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
11094 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
11096 if (VECTOR_MODE_P (mode0))
11097 op0 = safe_vector_operand (op0, mode0);
11098 if (VECTOR_MODE_P (mode1))
11099 op1 = safe_vector_operand (op1, mode1);
11101 if (! target
11102 || GET_MODE (target) != tmode
11103 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
11104 target = gen_reg_rtx (tmode);
11106 /* In case the insn wants input operands in modes different from
11107 the result, abort. */
11108 if (GET_MODE (op0) != mode0 || GET_MODE (op1) != mode1)
11109 abort ();
11111 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
11112 op0 = copy_to_mode_reg (mode0, op0);
11113 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
11114 op1 = copy_to_mode_reg (mode1, op1);
11116 pat = GEN_FCN (icode) (target, op0, op1);
11117 if (! pat)
11118 return 0;
11119 emit_insn (pat);
11120 return target;
11123 /* Subroutine of arm_expand_builtin to take care of unop insns. */
11125 static rtx
11126 arm_expand_unop_builtin (enum insn_code icode,
11127 tree arglist, rtx target, int do_load)
11129 rtx pat;
11130 tree arg0 = TREE_VALUE (arglist);
11131 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
11132 enum machine_mode tmode = insn_data[icode].operand[0].mode;
11133 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
11135 if (! target
11136 || GET_MODE (target) != tmode
11137 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
11138 target = gen_reg_rtx (tmode);
11139 if (do_load)
11140 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
11141 else
11143 if (VECTOR_MODE_P (mode0))
11144 op0 = safe_vector_operand (op0, mode0);
11146 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
11147 op0 = copy_to_mode_reg (mode0, op0);
11150 pat = GEN_FCN (icode) (target, op0);
11151 if (! pat)
11152 return 0;
11153 emit_insn (pat);
11154 return target;
11157 /* Expand an expression EXP that calls a built-in function,
11158 with result going to TARGET if that's convenient
11159 (and in mode MODE if that's convenient).
11160 SUBTARGET may be used as the target for computing one of EXP's operands.
11161 IGNORE is nonzero if the value is to be ignored. */
11163 static rtx
11164 arm_expand_builtin (tree exp,
11165 rtx target,
11166 rtx subtarget ATTRIBUTE_UNUSED,
11167 enum machine_mode mode ATTRIBUTE_UNUSED,
11168 int ignore ATTRIBUTE_UNUSED)
11170 const struct builtin_description * d;
11171 enum insn_code icode;
11172 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
11173 tree arglist = TREE_OPERAND (exp, 1);
11174 tree arg0;
11175 tree arg1;
11176 tree arg2;
11177 rtx op0;
11178 rtx op1;
11179 rtx op2;
11180 rtx pat;
11181 int fcode = DECL_FUNCTION_CODE (fndecl);
11182 size_t i;
11183 enum machine_mode tmode;
11184 enum machine_mode mode0;
11185 enum machine_mode mode1;
11186 enum machine_mode mode2;
11188 switch (fcode)
11190 case ARM_BUILTIN_TEXTRMSB:
11191 case ARM_BUILTIN_TEXTRMUB:
11192 case ARM_BUILTIN_TEXTRMSH:
11193 case ARM_BUILTIN_TEXTRMUH:
11194 case ARM_BUILTIN_TEXTRMSW:
11195 case ARM_BUILTIN_TEXTRMUW:
11196 icode = (fcode == ARM_BUILTIN_TEXTRMSB ? CODE_FOR_iwmmxt_textrmsb
11197 : fcode == ARM_BUILTIN_TEXTRMUB ? CODE_FOR_iwmmxt_textrmub
11198 : fcode == ARM_BUILTIN_TEXTRMSH ? CODE_FOR_iwmmxt_textrmsh
11199 : fcode == ARM_BUILTIN_TEXTRMUH ? CODE_FOR_iwmmxt_textrmuh
11200 : CODE_FOR_iwmmxt_textrmw);
11202 arg0 = TREE_VALUE (arglist);
11203 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
11204 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
11205 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
11206 tmode = insn_data[icode].operand[0].mode;
11207 mode0 = insn_data[icode].operand[1].mode;
11208 mode1 = insn_data[icode].operand[2].mode;
11210 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
11211 op0 = copy_to_mode_reg (mode0, op0);
11212 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
11214 /* @@@ better error message */
11215 error ("selector must be an immediate");
11216 return gen_reg_rtx (tmode);
11218 if (target == 0
11219 || GET_MODE (target) != tmode
11220 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
11221 target = gen_reg_rtx (tmode);
11222 pat = GEN_FCN (icode) (target, op0, op1);
11223 if (! pat)
11224 return 0;
11225 emit_insn (pat);
11226 return target;
11228 case ARM_BUILTIN_TINSRB:
11229 case ARM_BUILTIN_TINSRH:
11230 case ARM_BUILTIN_TINSRW:
11231 icode = (fcode == ARM_BUILTIN_TINSRB ? CODE_FOR_iwmmxt_tinsrb
11232 : fcode == ARM_BUILTIN_TINSRH ? CODE_FOR_iwmmxt_tinsrh
11233 : CODE_FOR_iwmmxt_tinsrw);
11234 arg0 = TREE_VALUE (arglist);
11235 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
11236 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
11237 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
11238 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
11239 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
11240 tmode = insn_data[icode].operand[0].mode;
11241 mode0 = insn_data[icode].operand[1].mode;
11242 mode1 = insn_data[icode].operand[2].mode;
11243 mode2 = insn_data[icode].operand[3].mode;
11245 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
11246 op0 = copy_to_mode_reg (mode0, op0);
11247 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
11248 op1 = copy_to_mode_reg (mode1, op1);
11249 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
11251 /* @@@ better error message */
11252 error ("selector must be an immediate");
11253 return const0_rtx;
11255 if (target == 0
11256 || GET_MODE (target) != tmode
11257 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
11258 target = gen_reg_rtx (tmode);
11259 pat = GEN_FCN (icode) (target, op0, op1, op2);
11260 if (! pat)
11261 return 0;
11262 emit_insn (pat);
11263 return target;
11265 case ARM_BUILTIN_SETWCX:
11266 arg0 = TREE_VALUE (arglist);
11267 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
11268 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
11269 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
11270 emit_insn (gen_iwmmxt_tmcr (op0, op1));
11271 return 0;
11273 case ARM_BUILTIN_GETWCX:
11274 arg0 = TREE_VALUE (arglist);
11275 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
11276 target = gen_reg_rtx (SImode);
11277 emit_insn (gen_iwmmxt_tmrc (target, op0));
11278 return target;
11280 case ARM_BUILTIN_WSHUFH:
11281 icode = CODE_FOR_iwmmxt_wshufh;
11282 arg0 = TREE_VALUE (arglist);
11283 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
11284 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
11285 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
11286 tmode = insn_data[icode].operand[0].mode;
11287 mode1 = insn_data[icode].operand[1].mode;
11288 mode2 = insn_data[icode].operand[2].mode;
11290 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
11291 op0 = copy_to_mode_reg (mode1, op0);
11292 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
11294 /* @@@ better error message */
11295 error ("mask must be an immediate");
11296 return const0_rtx;
11298 if (target == 0
11299 || GET_MODE (target) != tmode
11300 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
11301 target = gen_reg_rtx (tmode);
11302 pat = GEN_FCN (icode) (target, op0, op1);
11303 if (! pat)
11304 return 0;
11305 emit_insn (pat);
11306 return target;
11308 case ARM_BUILTIN_WSADB:
11309 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadb, arglist, target);
11310 case ARM_BUILTIN_WSADH:
11311 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadh, arglist, target);
11312 case ARM_BUILTIN_WSADBZ:
11313 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadbz, arglist, target);
11314 case ARM_BUILTIN_WSADHZ:
11315 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadhz, arglist, target);
11317 /* Several three-argument builtins. */
11318 case ARM_BUILTIN_WMACS:
11319 case ARM_BUILTIN_WMACU:
11320 case ARM_BUILTIN_WALIGN:
11321 case ARM_BUILTIN_TMIA:
11322 case ARM_BUILTIN_TMIAPH:
11323 case ARM_BUILTIN_TMIATT:
11324 case ARM_BUILTIN_TMIATB:
11325 case ARM_BUILTIN_TMIABT:
11326 case ARM_BUILTIN_TMIABB:
11327 icode = (fcode == ARM_BUILTIN_WMACS ? CODE_FOR_iwmmxt_wmacs
11328 : fcode == ARM_BUILTIN_WMACU ? CODE_FOR_iwmmxt_wmacu
11329 : fcode == ARM_BUILTIN_TMIA ? CODE_FOR_iwmmxt_tmia
11330 : fcode == ARM_BUILTIN_TMIAPH ? CODE_FOR_iwmmxt_tmiaph
11331 : fcode == ARM_BUILTIN_TMIABB ? CODE_FOR_iwmmxt_tmiabb
11332 : fcode == ARM_BUILTIN_TMIABT ? CODE_FOR_iwmmxt_tmiabt
11333 : fcode == ARM_BUILTIN_TMIATB ? CODE_FOR_iwmmxt_tmiatb
11334 : fcode == ARM_BUILTIN_TMIATT ? CODE_FOR_iwmmxt_tmiatt
11335 : CODE_FOR_iwmmxt_walign);
11336 arg0 = TREE_VALUE (arglist);
11337 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
11338 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
11339 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
11340 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
11341 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
11342 tmode = insn_data[icode].operand[0].mode;
11343 mode0 = insn_data[icode].operand[1].mode;
11344 mode1 = insn_data[icode].operand[2].mode;
11345 mode2 = insn_data[icode].operand[3].mode;
11347 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
11348 op0 = copy_to_mode_reg (mode0, op0);
11349 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
11350 op1 = copy_to_mode_reg (mode1, op1);
11351 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
11352 op2 = copy_to_mode_reg (mode2, op2);
11353 if (target == 0
11354 || GET_MODE (target) != tmode
11355 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
11356 target = gen_reg_rtx (tmode);
11357 pat = GEN_FCN (icode) (target, op0, op1, op2);
11358 if (! pat)
11359 return 0;
11360 emit_insn (pat);
11361 return target;
11363 case ARM_BUILTIN_WZERO:
11364 target = gen_reg_rtx (DImode);
11365 emit_insn (gen_iwmmxt_clrdi (target));
11366 return target;
11368 default:
11369 break;
11372 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
11373 if (d->code == (const enum arm_builtins) fcode)
11374 return arm_expand_binop_builtin (d->icode, arglist, target);
11376 for (i = 0, d = bdesc_1arg; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
11377 if (d->code == (const enum arm_builtins) fcode)
11378 return arm_expand_unop_builtin (d->icode, arglist, target, 0);
11380 /* @@@ Should really do something sensible here. */
11381 return NULL_RTX;
11384 /* Recursively search through all of the blocks in a function
11385 checking to see if any of the variables created in that
11386 function match the RTX called 'orig'. If they do then
11387 replace them with the RTX called 'new'. */
11388 static void
11389 replace_symbols_in_block (tree block, rtx orig, rtx new)
11391 for (; block; block = BLOCK_CHAIN (block))
11393 tree sym;
11395 if (!TREE_USED (block))
11396 continue;
11398 for (sym = BLOCK_VARS (block); sym; sym = TREE_CHAIN (sym))
11400 if ( (DECL_NAME (sym) == 0 && TREE_CODE (sym) != TYPE_DECL)
11401 || DECL_IGNORED_P (sym)
11402 || TREE_CODE (sym) != VAR_DECL
11403 || DECL_EXTERNAL (sym)
11404 || !rtx_equal_p (DECL_RTL (sym), orig)
11406 continue;
11408 SET_DECL_RTL (sym, new);
11411 replace_symbols_in_block (BLOCK_SUBBLOCKS (block), orig, new);
11415 /* Return the number (counting from 0) of
11416 the least significant set bit in MASK. */
11418 inline static int
11419 number_of_first_bit_set (int mask)
11421 int bit;
11423 for (bit = 0;
11424 (mask & (1 << bit)) == 0;
11425 ++bit)
11426 continue;
11428 return bit;
11431 /* Generate code to return from a thumb function.
11432 If 'reg_containing_return_addr' is -1, then the return address is
11433 actually on the stack, at the stack pointer. */
11434 static void
11435 thumb_exit (FILE *f, int reg_containing_return_addr, rtx eh_ofs)
11437 unsigned regs_available_for_popping;
11438 unsigned regs_to_pop;
11439 int pops_needed;
11440 unsigned available;
11441 unsigned required;
11442 int mode;
11443 int size;
11444 int restore_a4 = FALSE;
11446 /* Compute the registers we need to pop. */
11447 regs_to_pop = 0;
11448 pops_needed = 0;
11450 /* There is an assumption here, that if eh_ofs is not NULL, the
11451 normal return address will have been pushed. */
11452 if (reg_containing_return_addr == -1 || eh_ofs)
11454 /* When we are generating a return for __builtin_eh_return,
11455 reg_containing_return_addr must specify the return regno. */
11456 if (eh_ofs && reg_containing_return_addr == -1)
11457 abort ();
11459 regs_to_pop |= 1 << LR_REGNUM;
11460 ++pops_needed;
11463 if (TARGET_BACKTRACE)
11465 /* Restore the (ARM) frame pointer and stack pointer. */
11466 regs_to_pop |= (1 << ARM_HARD_FRAME_POINTER_REGNUM) | (1 << SP_REGNUM);
11467 pops_needed += 2;
11470 /* If there is nothing to pop then just emit the BX instruction and
11471 return. */
11472 if (pops_needed == 0)
11474 if (eh_ofs)
11475 asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, REGNO (eh_ofs));
11477 asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
11478 return;
11480 /* Otherwise if we are not supporting interworking and we have not created
11481 a backtrace structure and the function was not entered in ARM mode then
11482 just pop the return address straight into the PC. */
11483 else if (!TARGET_INTERWORK
11484 && !TARGET_BACKTRACE
11485 && !is_called_in_ARM_mode (current_function_decl))
11487 if (eh_ofs)
11489 asm_fprintf (f, "\tadd\t%r, #4\n", SP_REGNUM);
11490 asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, REGNO (eh_ofs));
11491 asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
11493 else
11494 asm_fprintf (f, "\tpop\t{%r}\n", PC_REGNUM);
11496 return;
11499 /* Find out how many of the (return) argument registers we can corrupt. */
11500 regs_available_for_popping = 0;
11502 /* If returning via __builtin_eh_return, the bottom three registers
11503 all contain information needed for the return. */
11504 if (eh_ofs)
11505 size = 12;
11506 else
11508 #ifdef RTX_CODE
11509 /* If we can deduce the registers used from the function's
11510 return value. This is more reliable that examining
11511 regs_ever_live[] because that will be set if the register is
11512 ever used in the function, not just if the register is used
11513 to hold a return value. */
11515 if (current_function_return_rtx != 0)
11516 mode = GET_MODE (current_function_return_rtx);
11517 else
11518 #endif
11519 mode = DECL_MODE (DECL_RESULT (current_function_decl));
11521 size = GET_MODE_SIZE (mode);
11523 if (size == 0)
11525 /* In a void function we can use any argument register.
11526 In a function that returns a structure on the stack
11527 we can use the second and third argument registers. */
11528 if (mode == VOIDmode)
11529 regs_available_for_popping =
11530 (1 << ARG_REGISTER (1))
11531 | (1 << ARG_REGISTER (2))
11532 | (1 << ARG_REGISTER (3));
11533 else
11534 regs_available_for_popping =
11535 (1 << ARG_REGISTER (2))
11536 | (1 << ARG_REGISTER (3));
11538 else if (size <= 4)
11539 regs_available_for_popping =
11540 (1 << ARG_REGISTER (2))
11541 | (1 << ARG_REGISTER (3));
11542 else if (size <= 8)
11543 regs_available_for_popping =
11544 (1 << ARG_REGISTER (3));
11547 /* Match registers to be popped with registers into which we pop them. */
11548 for (available = regs_available_for_popping,
11549 required = regs_to_pop;
11550 required != 0 && available != 0;
11551 available &= ~(available & - available),
11552 required &= ~(required & - required))
11553 -- pops_needed;
11555 /* If we have any popping registers left over, remove them. */
11556 if (available > 0)
11557 regs_available_for_popping &= ~available;
11559 /* Otherwise if we need another popping register we can use
11560 the fourth argument register. */
11561 else if (pops_needed)
11563 /* If we have not found any free argument registers and
11564 reg a4 contains the return address, we must move it. */
11565 if (regs_available_for_popping == 0
11566 && reg_containing_return_addr == LAST_ARG_REGNUM)
11568 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM, LAST_ARG_REGNUM);
11569 reg_containing_return_addr = LR_REGNUM;
11571 else if (size > 12)
11573 /* Register a4 is being used to hold part of the return value,
11574 but we have dire need of a free, low register. */
11575 restore_a4 = TRUE;
11577 asm_fprintf (f, "\tmov\t%r, %r\n",IP_REGNUM, LAST_ARG_REGNUM);
11580 if (reg_containing_return_addr != LAST_ARG_REGNUM)
11582 /* The fourth argument register is available. */
11583 regs_available_for_popping |= 1 << LAST_ARG_REGNUM;
11585 --pops_needed;
11589 /* Pop as many registers as we can. */
11590 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
11591 regs_available_for_popping);
11593 /* Process the registers we popped. */
11594 if (reg_containing_return_addr == -1)
11596 /* The return address was popped into the lowest numbered register. */
11597 regs_to_pop &= ~(1 << LR_REGNUM);
11599 reg_containing_return_addr =
11600 number_of_first_bit_set (regs_available_for_popping);
11602 /* Remove this register for the mask of available registers, so that
11603 the return address will not be corrupted by further pops. */
11604 regs_available_for_popping &= ~(1 << reg_containing_return_addr);
11607 /* If we popped other registers then handle them here. */
11608 if (regs_available_for_popping)
11610 int frame_pointer;
11612 /* Work out which register currently contains the frame pointer. */
11613 frame_pointer = number_of_first_bit_set (regs_available_for_popping);
11615 /* Move it into the correct place. */
11616 asm_fprintf (f, "\tmov\t%r, %r\n",
11617 ARM_HARD_FRAME_POINTER_REGNUM, frame_pointer);
11619 /* (Temporarily) remove it from the mask of popped registers. */
11620 regs_available_for_popping &= ~(1 << frame_pointer);
11621 regs_to_pop &= ~(1 << ARM_HARD_FRAME_POINTER_REGNUM);
11623 if (regs_available_for_popping)
11625 int stack_pointer;
11627 /* We popped the stack pointer as well,
11628 find the register that contains it. */
11629 stack_pointer = number_of_first_bit_set (regs_available_for_popping);
11631 /* Move it into the stack register. */
11632 asm_fprintf (f, "\tmov\t%r, %r\n", SP_REGNUM, stack_pointer);
11634 /* At this point we have popped all necessary registers, so
11635 do not worry about restoring regs_available_for_popping
11636 to its correct value:
11638 assert (pops_needed == 0)
11639 assert (regs_available_for_popping == (1 << frame_pointer))
11640 assert (regs_to_pop == (1 << STACK_POINTER)) */
11642 else
11644 /* Since we have just move the popped value into the frame
11645 pointer, the popping register is available for reuse, and
11646 we know that we still have the stack pointer left to pop. */
11647 regs_available_for_popping |= (1 << frame_pointer);
11651 /* If we still have registers left on the stack, but we no longer have
11652 any registers into which we can pop them, then we must move the return
11653 address into the link register and make available the register that
11654 contained it. */
11655 if (regs_available_for_popping == 0 && pops_needed > 0)
11657 regs_available_for_popping |= 1 << reg_containing_return_addr;
11659 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM,
11660 reg_containing_return_addr);
11662 reg_containing_return_addr = LR_REGNUM;
11665 /* If we have registers left on the stack then pop some more.
11666 We know that at most we will want to pop FP and SP. */
11667 if (pops_needed > 0)
11669 int popped_into;
11670 int move_to;
11672 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
11673 regs_available_for_popping);
11675 /* We have popped either FP or SP.
11676 Move whichever one it is into the correct register. */
11677 popped_into = number_of_first_bit_set (regs_available_for_popping);
11678 move_to = number_of_first_bit_set (regs_to_pop);
11680 asm_fprintf (f, "\tmov\t%r, %r\n", move_to, popped_into);
11682 regs_to_pop &= ~(1 << move_to);
11684 --pops_needed;
11687 /* If we still have not popped everything then we must have only
11688 had one register available to us and we are now popping the SP. */
11689 if (pops_needed > 0)
11691 int popped_into;
11693 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
11694 regs_available_for_popping);
11696 popped_into = number_of_first_bit_set (regs_available_for_popping);
11698 asm_fprintf (f, "\tmov\t%r, %r\n", SP_REGNUM, popped_into);
11700 assert (regs_to_pop == (1 << STACK_POINTER))
11701 assert (pops_needed == 1)
11705 /* If necessary restore the a4 register. */
11706 if (restore_a4)
11708 if (reg_containing_return_addr != LR_REGNUM)
11710 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM, LAST_ARG_REGNUM);
11711 reg_containing_return_addr = LR_REGNUM;
11714 asm_fprintf (f, "\tmov\t%r, %r\n", LAST_ARG_REGNUM, IP_REGNUM);
11717 if (eh_ofs)
11718 asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, REGNO (eh_ofs));
11720 /* Return to caller. */
11721 asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
11724 /* Emit code to push or pop registers to or from the stack. F is the
11725 assembly file. MASK is the registers to push or pop. PUSH is
11726 non-zero if we should push, and zero if we should pop. For debugging
11727 output, if pushing, adjust CFA_OFFSET by the amount of space added
11728 to the stack. REAL_REGS should have the same number of bits set as
11729 MASK, and will be used instead (in the same order) to describe which
11730 registers were saved - this is used to mark the save slots when we
11731 push high registers after moving them to low registers. */
11732 static void
11733 thumb_pushpop (FILE *f, int mask, int push, int *cfa_offset, int real_regs)
11735 int regno;
11736 int lo_mask = mask & 0xFF;
11737 int pushed_words = 0;
11739 if (lo_mask == 0 && !push && (mask & (1 << 15)))
11741 /* Special case. Do not generate a POP PC statement here, do it in
11742 thumb_exit() */
11743 thumb_exit (f, -1, NULL_RTX);
11744 return;
11747 fprintf (f, "\t%s\t{", push ? "push" : "pop");
11749 /* Look at the low registers first. */
11750 for (regno = 0; regno <= LAST_LO_REGNUM; regno++, lo_mask >>= 1)
11752 if (lo_mask & 1)
11754 asm_fprintf (f, "%r", regno);
11756 if ((lo_mask & ~1) != 0)
11757 fprintf (f, ", ");
11759 pushed_words++;
11763 if (push && (mask & (1 << LR_REGNUM)))
11765 /* Catch pushing the LR. */
11766 if (mask & 0xFF)
11767 fprintf (f, ", ");
11769 asm_fprintf (f, "%r", LR_REGNUM);
11771 pushed_words++;
11773 else if (!push && (mask & (1 << PC_REGNUM)))
11775 /* Catch popping the PC. */
11776 if (TARGET_INTERWORK || TARGET_BACKTRACE)
11778 /* The PC is never poped directly, instead
11779 it is popped into r3 and then BX is used. */
11780 fprintf (f, "}\n");
11782 thumb_exit (f, -1, NULL_RTX);
11784 return;
11786 else
11788 if (mask & 0xFF)
11789 fprintf (f, ", ");
11791 asm_fprintf (f, "%r", PC_REGNUM);
11795 fprintf (f, "}\n");
11797 if (push && pushed_words && dwarf2out_do_frame ())
11799 char *l = dwarf2out_cfi_label ();
11800 int pushed_mask = real_regs;
11802 *cfa_offset += pushed_words * 4;
11803 dwarf2out_def_cfa (l, SP_REGNUM, *cfa_offset);
11805 pushed_words = 0;
11806 pushed_mask = real_regs;
11807 for (regno = 0; regno <= 14; regno++, pushed_mask >>= 1)
11809 if (pushed_mask & 1)
11810 dwarf2out_reg_save (l, regno, 4 * pushed_words++ - *cfa_offset);
11815 void
11816 thumb_final_prescan_insn (rtx insn)
11818 if (flag_print_asm_name)
11819 asm_fprintf (asm_out_file, "%@ 0x%04x\n",
11820 INSN_ADDRESSES (INSN_UID (insn)));
11824 thumb_shiftable_const (unsigned HOST_WIDE_INT val)
11826 unsigned HOST_WIDE_INT mask = 0xff;
11827 int i;
11829 if (val == 0) /* XXX */
11830 return 0;
11832 for (i = 0; i < 25; i++)
11833 if ((val & (mask << i)) == val)
11834 return 1;
11836 return 0;
11839 /* Returns nonzero if the current function contains,
11840 or might contain a far jump. */
11842 thumb_far_jump_used_p (int in_prologue)
11844 rtx insn;
11846 /* This test is only important for leaf functions. */
11847 /* assert (!leaf_function_p ()); */
11849 /* If we have already decided that far jumps may be used,
11850 do not bother checking again, and always return true even if
11851 it turns out that they are not being used. Once we have made
11852 the decision that far jumps are present (and that hence the link
11853 register will be pushed onto the stack) we cannot go back on it. */
11854 if (cfun->machine->far_jump_used)
11855 return 1;
11857 /* If this function is not being called from the prologue/epilogue
11858 generation code then it must be being called from the
11859 INITIAL_ELIMINATION_OFFSET macro. */
11860 if (!in_prologue)
11862 /* In this case we know that we are being asked about the elimination
11863 of the arg pointer register. If that register is not being used,
11864 then there are no arguments on the stack, and we do not have to
11865 worry that a far jump might force the prologue to push the link
11866 register, changing the stack offsets. In this case we can just
11867 return false, since the presence of far jumps in the function will
11868 not affect stack offsets.
11870 If the arg pointer is live (or if it was live, but has now been
11871 eliminated and so set to dead) then we do have to test to see if
11872 the function might contain a far jump. This test can lead to some
11873 false negatives, since before reload is completed, then length of
11874 branch instructions is not known, so gcc defaults to returning their
11875 longest length, which in turn sets the far jump attribute to true.
11877 A false negative will not result in bad code being generated, but it
11878 will result in a needless push and pop of the link register. We
11879 hope that this does not occur too often. */
11880 if (regs_ever_live [ARG_POINTER_REGNUM])
11881 cfun->machine->arg_pointer_live = 1;
11882 else if (!cfun->machine->arg_pointer_live)
11883 return 0;
11886 /* Check to see if the function contains a branch
11887 insn with the far jump attribute set. */
11888 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
11890 if (GET_CODE (insn) == JUMP_INSN
11891 /* Ignore tablejump patterns. */
11892 && GET_CODE (PATTERN (insn)) != ADDR_VEC
11893 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC
11894 && get_attr_far_jump (insn) == FAR_JUMP_YES
11897 /* Record the fact that we have decided that
11898 the function does use far jumps. */
11899 cfun->machine->far_jump_used = 1;
11900 return 1;
11904 return 0;
11907 /* Return nonzero if FUNC must be entered in ARM mode. */
11909 is_called_in_ARM_mode (tree func)
11911 if (TREE_CODE (func) != FUNCTION_DECL)
11912 abort ();
11914 /* Ignore the problem about functions whoes address is taken. */
11915 if (TARGET_CALLEE_INTERWORKING && TREE_PUBLIC (func))
11916 return TRUE;
11918 #ifdef ARM_PE
11919 return lookup_attribute ("interfacearm", DECL_ATTRIBUTES (func)) != NULL_TREE;
11920 #else
11921 return FALSE;
11922 #endif
11925 /* The bits which aren't usefully expanded as rtl. */
11926 const char *
11927 thumb_unexpanded_epilogue (void)
11929 int regno;
11930 int live_regs_mask = 0;
11931 int high_regs_pushed = 0;
11932 int leaf_function = leaf_function_p ();
11933 int had_to_push_lr;
11934 rtx eh_ofs = cfun->machine->eh_epilogue_sp_ofs;
11936 if (return_used_this_function)
11937 return "";
11939 if (IS_NAKED (arm_current_func_type ()))
11940 return "";
11942 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
11943 if (THUMB_REG_PUSHED_P (regno))
11944 live_regs_mask |= 1 << regno;
11946 for (regno = 8; regno < 13; regno++)
11947 if (THUMB_REG_PUSHED_P (regno))
11948 high_regs_pushed++;
11950 /* The prolog may have pushed some high registers to use as
11951 work registers. eg the testsuite file:
11952 gcc/testsuite/gcc/gcc.c-torture/execute/complex-2.c
11953 compiles to produce:
11954 push {r4, r5, r6, r7, lr}
11955 mov r7, r9
11956 mov r6, r8
11957 push {r6, r7}
11958 as part of the prolog. We have to undo that pushing here. */
11960 if (high_regs_pushed)
11962 int mask = live_regs_mask;
11963 int next_hi_reg;
11964 int size;
11965 int mode;
11967 #ifdef RTX_CODE
11968 /* If we can deduce the registers used from the function's return value.
11969 This is more reliable that examining regs_ever_live[] because that
11970 will be set if the register is ever used in the function, not just if
11971 the register is used to hold a return value. */
11973 if (current_function_return_rtx != 0)
11974 mode = GET_MODE (current_function_return_rtx);
11975 else
11976 #endif
11977 mode = DECL_MODE (DECL_RESULT (current_function_decl));
11979 size = GET_MODE_SIZE (mode);
11981 /* Unless we are returning a type of size > 12 register r3 is
11982 available. */
11983 if (size < 13)
11984 mask |= 1 << 3;
11986 if (mask == 0)
11987 /* Oh dear! We have no low registers into which we can pop
11988 high registers! */
11989 internal_error
11990 ("no low registers available for popping high registers");
11992 for (next_hi_reg = 8; next_hi_reg < 13; next_hi_reg++)
11993 if (THUMB_REG_PUSHED_P (next_hi_reg))
11994 break;
11996 while (high_regs_pushed)
11998 /* Find lo register(s) into which the high register(s) can
11999 be popped. */
12000 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
12002 if (mask & (1 << regno))
12003 high_regs_pushed--;
12004 if (high_regs_pushed == 0)
12005 break;
12008 mask &= (2 << regno) - 1; /* A noop if regno == 8 */
12010 /* Pop the values into the low register(s). */
12011 thumb_pushpop (asm_out_file, mask, 0, NULL, mask);
12013 /* Move the value(s) into the high registers. */
12014 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
12016 if (mask & (1 << regno))
12018 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", next_hi_reg,
12019 regno);
12021 for (next_hi_reg++; next_hi_reg < 13; next_hi_reg++)
12022 if (THUMB_REG_PUSHED_P (next_hi_reg))
12023 break;
12029 had_to_push_lr = (live_regs_mask || !leaf_function
12030 || thumb_far_jump_used_p (1));
12032 if (TARGET_BACKTRACE
12033 && ((live_regs_mask & 0xFF) == 0)
12034 && regs_ever_live [LAST_ARG_REGNUM] != 0)
12036 /* The stack backtrace structure creation code had to
12037 push R7 in order to get a work register, so we pop
12038 it now. */
12039 live_regs_mask |= (1 << LAST_LO_REGNUM);
12042 if (current_function_pretend_args_size == 0 || TARGET_BACKTRACE)
12044 if (had_to_push_lr
12045 && !is_called_in_ARM_mode (current_function_decl)
12046 && !eh_ofs)
12047 live_regs_mask |= 1 << PC_REGNUM;
12049 /* Either no argument registers were pushed or a backtrace
12050 structure was created which includes an adjusted stack
12051 pointer, so just pop everything. */
12052 if (live_regs_mask)
12053 thumb_pushpop (asm_out_file, live_regs_mask, FALSE, NULL,
12054 live_regs_mask);
12056 if (eh_ofs)
12057 thumb_exit (asm_out_file, 2, eh_ofs);
12058 /* We have either just popped the return address into the
12059 PC or it is was kept in LR for the entire function or
12060 it is still on the stack because we do not want to
12061 return by doing a pop {pc}. */
12062 else if ((live_regs_mask & (1 << PC_REGNUM)) == 0)
12063 thumb_exit (asm_out_file,
12064 (had_to_push_lr
12065 && is_called_in_ARM_mode (current_function_decl)) ?
12066 -1 : LR_REGNUM, NULL_RTX);
12068 else
12070 /* Pop everything but the return address. */
12071 live_regs_mask &= ~(1 << PC_REGNUM);
12073 if (live_regs_mask)
12074 thumb_pushpop (asm_out_file, live_regs_mask, FALSE, NULL,
12075 live_regs_mask);
12077 if (had_to_push_lr)
12078 /* Get the return address into a temporary register. */
12079 thumb_pushpop (asm_out_file, 1 << LAST_ARG_REGNUM, 0, NULL,
12080 1 << LAST_ARG_REGNUM);
12082 /* Remove the argument registers that were pushed onto the stack. */
12083 asm_fprintf (asm_out_file, "\tadd\t%r, %r, #%d\n",
12084 SP_REGNUM, SP_REGNUM,
12085 current_function_pretend_args_size);
12087 if (eh_ofs)
12088 thumb_exit (asm_out_file, 2, eh_ofs);
12089 else
12090 thumb_exit (asm_out_file,
12091 had_to_push_lr ? LAST_ARG_REGNUM : LR_REGNUM, NULL_RTX);
12094 return "";
12097 /* Functions to save and restore machine-specific function data. */
12098 static struct machine_function *
12099 arm_init_machine_status (void)
12101 struct machine_function *machine;
12102 machine = (machine_function *) ggc_alloc_cleared (sizeof (machine_function));
12104 #if ARM_FT_UNKNOWN != 0
12105 machine->func_type = ARM_FT_UNKNOWN;
12106 #endif
12107 return machine;
12110 /* Return an RTX indicating where the return address to the
12111 calling function can be found. */
12113 arm_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
12115 if (count != 0)
12116 return NULL_RTX;
12118 if (TARGET_APCS_32)
12119 return get_hard_reg_initial_val (Pmode, LR_REGNUM);
12120 else
12122 rtx lr = gen_rtx_AND (Pmode, gen_rtx_REG (Pmode, LR_REGNUM),
12123 GEN_INT (RETURN_ADDR_MASK26));
12124 return get_func_hard_reg_initial_val (cfun, lr);
12128 /* Do anything needed before RTL is emitted for each function. */
12129 void
12130 arm_init_expanders (void)
12132 /* Arrange to initialize and mark the machine per-function status. */
12133 init_machine_status = arm_init_machine_status;
12136 HOST_WIDE_INT
12137 thumb_get_frame_size (void)
12139 int regno;
12141 int base_size = ROUND_UP_WORD (get_frame_size ());
12142 int count_regs = 0;
12143 int entry_size = 0;
12144 int leaf;
12146 if (! TARGET_THUMB)
12147 abort ();
12149 if (! TARGET_ATPCS)
12150 return base_size;
12152 /* We need to know if we are a leaf function. Unfortunately, it
12153 is possible to be called after start_sequence has been called,
12154 which causes get_insns to return the insns for the sequence,
12155 not the function, which will cause leaf_function_p to return
12156 the incorrect result.
12158 To work around this, we cache the computed frame size. This
12159 works because we will only be calling RTL expanders that need
12160 to know about leaf functions once reload has completed, and the
12161 frame size cannot be changed after that time, so we can safely
12162 use the cached value. */
12164 if (reload_completed)
12165 return cfun->machine->frame_size;
12167 leaf = leaf_function_p ();
12169 /* A leaf function does not need any stack alignment if it has nothing
12170 on the stack. */
12171 if (leaf && base_size == 0)
12173 cfun->machine->frame_size = 0;
12174 return 0;
12177 /* We know that SP will be word aligned on entry, and we must
12178 preserve that condition at any subroutine call. But those are
12179 the only constraints. */
12181 /* Space for variadic functions. */
12182 if (current_function_pretend_args_size)
12183 entry_size += current_function_pretend_args_size;
12185 /* Space for pushed lo registers. */
12186 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
12187 if (THUMB_REG_PUSHED_P (regno))
12188 count_regs++;
12190 /* Space for backtrace structure. */
12191 if (TARGET_BACKTRACE)
12193 if (count_regs == 0 && regs_ever_live[LAST_ARG_REGNUM] != 0)
12194 entry_size += 20;
12195 else
12196 entry_size += 16;
12199 if (count_regs || !leaf || thumb_far_jump_used_p (1))
12200 count_regs++; /* LR */
12202 entry_size += count_regs * 4;
12203 count_regs = 0;
12205 /* Space for pushed hi regs. */
12206 for (regno = 8; regno < 13; regno++)
12207 if (THUMB_REG_PUSHED_P (regno))
12208 count_regs++;
12210 entry_size += count_regs * 4;
12212 if ((entry_size + base_size + current_function_outgoing_args_size) & 7)
12213 base_size += 4;
12214 if ((entry_size + base_size + current_function_outgoing_args_size) & 7)
12215 abort ();
12217 cfun->machine->frame_size = base_size;
12219 return base_size;
12222 /* Generate the rest of a function's prologue. */
12223 void
12224 thumb_expand_prologue (void)
12226 rtx insn, dwarf;
12228 HOST_WIDE_INT amount = (thumb_get_frame_size ()
12229 + current_function_outgoing_args_size);
12230 unsigned long func_type;
12232 func_type = arm_current_func_type ();
12234 /* Naked functions don't have prologues. */
12235 if (IS_NAKED (func_type))
12236 return;
12238 if (IS_INTERRUPT (func_type))
12240 error ("interrupt Service Routines cannot be coded in Thumb mode");
12241 return;
12244 if (frame_pointer_needed)
12246 insn = emit_insn (gen_movsi (hard_frame_pointer_rtx, stack_pointer_rtx));
12247 RTX_FRAME_RELATED_P (insn) = 1;
12250 if (amount)
12252 amount = ROUND_UP_WORD (amount);
12254 if (amount < 512)
12256 insn = emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
12257 GEN_INT (- amount)));
12258 RTX_FRAME_RELATED_P (insn) = 1;
12260 else
12262 int regno;
12263 rtx reg;
12265 /* The stack decrement is too big for an immediate value in a single
12266 insn. In theory we could issue multiple subtracts, but after
12267 three of them it becomes more space efficient to place the full
12268 value in the constant pool and load into a register. (Also the
12269 ARM debugger really likes to see only one stack decrement per
12270 function). So instead we look for a scratch register into which
12271 we can load the decrement, and then we subtract this from the
12272 stack pointer. Unfortunately on the thumb the only available
12273 scratch registers are the argument registers, and we cannot use
12274 these as they may hold arguments to the function. Instead we
12275 attempt to locate a call preserved register which is used by this
12276 function. If we can find one, then we know that it will have
12277 been pushed at the start of the prologue and so we can corrupt
12278 it now. */
12279 for (regno = LAST_ARG_REGNUM + 1; regno <= LAST_LO_REGNUM; regno++)
12280 if (THUMB_REG_PUSHED_P (regno)
12281 && !(frame_pointer_needed
12282 && (regno == THUMB_HARD_FRAME_POINTER_REGNUM)))
12283 break;
12285 if (regno > LAST_LO_REGNUM) /* Very unlikely. */
12287 rtx spare = gen_rtx (REG, SImode, IP_REGNUM);
12289 /* Choose an arbitrary, non-argument low register. */
12290 reg = gen_rtx (REG, SImode, LAST_LO_REGNUM);
12292 /* Save it by copying it into a high, scratch register. */
12293 emit_insn (gen_movsi (spare, reg));
12294 /* Add a USE to stop propagate_one_insn() from barfing. */
12295 emit_insn (gen_prologue_use (spare));
12297 /* Decrement the stack. */
12298 emit_insn (gen_movsi (reg, GEN_INT (- amount)));
12299 insn = emit_insn (gen_addsi3 (stack_pointer_rtx,
12300 stack_pointer_rtx, reg));
12301 RTX_FRAME_RELATED_P (insn) = 1;
12302 dwarf = gen_rtx_SET (SImode, stack_pointer_rtx,
12303 plus_constant (stack_pointer_rtx,
12304 GEN_INT (- amount)));
12305 RTX_FRAME_RELATED_P (dwarf) = 1;
12306 REG_NOTES (insn)
12307 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
12308 REG_NOTES (insn));
12310 /* Restore the low register's original value. */
12311 emit_insn (gen_movsi (reg, spare));
12313 /* Emit a USE of the restored scratch register, so that flow
12314 analysis will not consider the restore redundant. The
12315 register won't be used again in this function and isn't
12316 restored by the epilogue. */
12317 emit_insn (gen_prologue_use (reg));
12319 else
12321 reg = gen_rtx (REG, SImode, regno);
12323 emit_insn (gen_movsi (reg, GEN_INT (- amount)));
12325 insn = emit_insn (gen_addsi3 (stack_pointer_rtx,
12326 stack_pointer_rtx, reg));
12327 RTX_FRAME_RELATED_P (insn) = 1;
12328 dwarf = gen_rtx_SET (SImode, stack_pointer_rtx,
12329 plus_constant (stack_pointer_rtx,
12330 GEN_INT (- amount)));
12331 RTX_FRAME_RELATED_P (dwarf) = 1;
12332 REG_NOTES (insn)
12333 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
12334 REG_NOTES (insn));
12339 if (current_function_profile || TARGET_NO_SCHED_PRO)
12340 emit_insn (gen_blockage ());
12343 void
12344 thumb_expand_epilogue (void)
12346 HOST_WIDE_INT amount = (thumb_get_frame_size ()
12347 + current_function_outgoing_args_size);
12348 int regno;
12350 /* Naked functions don't have prologues. */
12351 if (IS_NAKED (arm_current_func_type ()))
12352 return;
12354 if (frame_pointer_needed)
12355 emit_insn (gen_movsi (stack_pointer_rtx, hard_frame_pointer_rtx));
12356 else if (amount)
12358 amount = ROUND_UP_WORD (amount);
12360 if (amount < 512)
12361 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
12362 GEN_INT (amount)));
12363 else
12365 /* r3 is always free in the epilogue. */
12366 rtx reg = gen_rtx (REG, SImode, LAST_ARG_REGNUM);
12368 emit_insn (gen_movsi (reg, GEN_INT (amount)));
12369 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, reg));
12373 /* Emit a USE (stack_pointer_rtx), so that
12374 the stack adjustment will not be deleted. */
12375 emit_insn (gen_prologue_use (stack_pointer_rtx));
12377 if (current_function_profile || TARGET_NO_SCHED_PRO)
12378 emit_insn (gen_blockage ());
12380 /* Emit a clobber for each insn that will be restored in the epilogue,
12381 so that flow2 will get register lifetimes correct. */
12382 for (regno = 0; regno < 13; regno++)
12383 if (regs_ever_live[regno] && !call_used_regs[regno])
12384 emit_insn (gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, regno)));
12386 if (! regs_ever_live[LR_REGNUM])
12387 emit_insn (gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, LR_REGNUM)));
12390 static void
12391 thumb_output_function_prologue (FILE *f, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
12393 int live_regs_mask = 0;
12394 int high_regs_pushed = 0;
12395 int cfa_offset = 0;
12396 int regno;
12398 if (IS_NAKED (arm_current_func_type ()))
12399 return;
12401 if (is_called_in_ARM_mode (current_function_decl))
12403 const char * name;
12405 if (GET_CODE (DECL_RTL (current_function_decl)) != MEM)
12406 abort ();
12407 if (GET_CODE (XEXP (DECL_RTL (current_function_decl), 0)) != SYMBOL_REF)
12408 abort ();
12409 name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
12411 /* Generate code sequence to switch us into Thumb mode. */
12412 /* The .code 32 directive has already been emitted by
12413 ASM_DECLARE_FUNCTION_NAME. */
12414 asm_fprintf (f, "\torr\t%r, %r, #1\n", IP_REGNUM, PC_REGNUM);
12415 asm_fprintf (f, "\tbx\t%r\n", IP_REGNUM);
12417 /* Generate a label, so that the debugger will notice the
12418 change in instruction sets. This label is also used by
12419 the assembler to bypass the ARM code when this function
12420 is called from a Thumb encoded function elsewhere in the
12421 same file. Hence the definition of STUB_NAME here must
12422 agree with the definition in gas/config/tc-arm.c. */
12424 #define STUB_NAME ".real_start_of"
12426 fprintf (f, "\t.code\t16\n");
12427 #ifdef ARM_PE
12428 if (arm_dllexport_name_p (name))
12429 name = arm_strip_name_encoding (name);
12430 #endif
12431 asm_fprintf (f, "\t.globl %s%U%s\n", STUB_NAME, name);
12432 fprintf (f, "\t.thumb_func\n");
12433 asm_fprintf (f, "%s%U%s:\n", STUB_NAME, name);
12436 if (current_function_pretend_args_size)
12438 if (cfun->machine->uses_anonymous_args)
12440 int num_pushes;
12442 fprintf (f, "\tpush\t{");
12444 num_pushes = ARM_NUM_INTS (current_function_pretend_args_size);
12446 for (regno = LAST_ARG_REGNUM + 1 - num_pushes;
12447 regno <= LAST_ARG_REGNUM;
12448 regno++)
12449 asm_fprintf (f, "%r%s", regno,
12450 regno == LAST_ARG_REGNUM ? "" : ", ");
12452 fprintf (f, "}\n");
12454 else
12455 asm_fprintf (f, "\tsub\t%r, %r, #%d\n",
12456 SP_REGNUM, SP_REGNUM,
12457 current_function_pretend_args_size);
12459 /* We don't need to record the stores for unwinding (would it
12460 help the debugger any if we did?), but record the change in
12461 the stack pointer. */
12462 if (dwarf2out_do_frame ())
12464 char *l = dwarf2out_cfi_label ();
12465 cfa_offset = cfa_offset + current_function_pretend_args_size;
12466 dwarf2out_def_cfa (l, SP_REGNUM, cfa_offset);
12470 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
12471 if (THUMB_REG_PUSHED_P (regno))
12472 live_regs_mask |= 1 << regno;
12474 if (live_regs_mask || !leaf_function_p () || thumb_far_jump_used_p (1))
12475 live_regs_mask |= 1 << LR_REGNUM;
12477 if (TARGET_BACKTRACE)
12479 int offset;
12480 int work_register = 0;
12481 int wr;
12483 /* We have been asked to create a stack backtrace structure.
12484 The code looks like this:
12486 0 .align 2
12487 0 func:
12488 0 sub SP, #16 Reserve space for 4 registers.
12489 2 push {R7} Get a work register.
12490 4 add R7, SP, #20 Get the stack pointer before the push.
12491 6 str R7, [SP, #8] Store the stack pointer (before reserving the space).
12492 8 mov R7, PC Get hold of the start of this code plus 12.
12493 10 str R7, [SP, #16] Store it.
12494 12 mov R7, FP Get hold of the current frame pointer.
12495 14 str R7, [SP, #4] Store it.
12496 16 mov R7, LR Get hold of the current return address.
12497 18 str R7, [SP, #12] Store it.
12498 20 add R7, SP, #16 Point at the start of the backtrace structure.
12499 22 mov FP, R7 Put this value into the frame pointer. */
12501 if ((live_regs_mask & 0xFF) == 0)
12503 /* See if the a4 register is free. */
12505 if (regs_ever_live [LAST_ARG_REGNUM] == 0)
12506 work_register = LAST_ARG_REGNUM;
12507 else /* We must push a register of our own. */
12508 live_regs_mask |= (1 << LAST_LO_REGNUM);
12511 if (work_register == 0)
12513 /* Select a register from the list that will be pushed to
12514 use as our work register. */
12515 for (work_register = (LAST_LO_REGNUM + 1); work_register--;)
12516 if ((1 << work_register) & live_regs_mask)
12517 break;
12520 asm_fprintf
12521 (f, "\tsub\t%r, %r, #16\t%@ Create stack backtrace structure\n",
12522 SP_REGNUM, SP_REGNUM);
12524 if (dwarf2out_do_frame ())
12526 char *l = dwarf2out_cfi_label ();
12527 cfa_offset = cfa_offset + 16;
12528 dwarf2out_def_cfa (l, SP_REGNUM, cfa_offset);
12531 if (live_regs_mask)
12532 thumb_pushpop (f, live_regs_mask, 1, &cfa_offset, live_regs_mask);
12534 for (offset = 0, wr = 1 << 15; wr != 0; wr >>= 1)
12535 if (wr & live_regs_mask)
12536 offset += 4;
12538 asm_fprintf (f, "\tadd\t%r, %r, #%d\n", work_register, SP_REGNUM,
12539 offset + 16 + current_function_pretend_args_size);
12541 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
12542 offset + 4);
12544 /* Make sure that the instruction fetching the PC is in the right place
12545 to calculate "start of backtrace creation code + 12". */
12546 if (live_regs_mask)
12548 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, PC_REGNUM);
12549 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
12550 offset + 12);
12551 asm_fprintf (f, "\tmov\t%r, %r\n", work_register,
12552 ARM_HARD_FRAME_POINTER_REGNUM);
12553 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
12554 offset);
12556 else
12558 asm_fprintf (f, "\tmov\t%r, %r\n", work_register,
12559 ARM_HARD_FRAME_POINTER_REGNUM);
12560 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
12561 offset);
12562 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, PC_REGNUM);
12563 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
12564 offset + 12);
12567 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, LR_REGNUM);
12568 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
12569 offset + 8);
12570 asm_fprintf (f, "\tadd\t%r, %r, #%d\n", work_register, SP_REGNUM,
12571 offset + 12);
12572 asm_fprintf (f, "\tmov\t%r, %r\t\t%@ Backtrace structure created\n",
12573 ARM_HARD_FRAME_POINTER_REGNUM, work_register);
12575 else if (live_regs_mask)
12576 thumb_pushpop (f, live_regs_mask, 1, &cfa_offset, live_regs_mask);
12578 for (regno = 8; regno < 13; regno++)
12579 if (THUMB_REG_PUSHED_P (regno))
12580 high_regs_pushed++;
12582 if (high_regs_pushed)
12584 int pushable_regs = 0;
12585 int mask = live_regs_mask & 0xff;
12586 int next_hi_reg;
12588 for (next_hi_reg = 12; next_hi_reg > LAST_LO_REGNUM; next_hi_reg--)
12589 if (THUMB_REG_PUSHED_P (next_hi_reg))
12590 break;
12592 pushable_regs = mask;
12594 if (pushable_regs == 0)
12596 /* Desperation time -- this probably will never happen. */
12597 if (THUMB_REG_PUSHED_P (LAST_ARG_REGNUM))
12598 asm_fprintf (f, "\tmov\t%r, %r\n", IP_REGNUM, LAST_ARG_REGNUM);
12599 mask = 1 << LAST_ARG_REGNUM;
12602 while (high_regs_pushed > 0)
12604 int real_regs_mask = 0;
12606 for (regno = LAST_LO_REGNUM; regno >= 0; regno--)
12608 if (mask & (1 << regno))
12610 asm_fprintf (f, "\tmov\t%r, %r\n", regno, next_hi_reg);
12612 high_regs_pushed--;
12613 real_regs_mask |= (1 << next_hi_reg);
12615 if (high_regs_pushed)
12617 for (next_hi_reg--; next_hi_reg > LAST_LO_REGNUM;
12618 next_hi_reg--)
12619 if (THUMB_REG_PUSHED_P (next_hi_reg))
12620 break;
12622 else
12624 mask &= ~((1 << regno) - 1);
12625 break;
12630 thumb_pushpop (f, mask, 1, &cfa_offset, real_regs_mask);
12633 if (pushable_regs == 0
12634 && (THUMB_REG_PUSHED_P (LAST_ARG_REGNUM)))
12635 asm_fprintf (f, "\tmov\t%r, %r\n", LAST_ARG_REGNUM, IP_REGNUM);
12639 /* Handle the case of a double word load into a low register from
12640 a computed memory address. The computed address may involve a
12641 register which is overwritten by the load. */
12642 const char *
12643 thumb_load_double_from_address (rtx *operands)
12645 rtx addr;
12646 rtx base;
12647 rtx offset;
12648 rtx arg1;
12649 rtx arg2;
12651 if (GET_CODE (operands[0]) != REG)
12652 abort ();
12654 if (GET_CODE (operands[1]) != MEM)
12655 abort ();
12657 /* Get the memory address. */
12658 addr = XEXP (operands[1], 0);
12660 /* Work out how the memory address is computed. */
12661 switch (GET_CODE (addr))
12663 case REG:
12664 operands[2] = gen_rtx (MEM, SImode,
12665 plus_constant (XEXP (operands[1], 0), 4));
12667 if (REGNO (operands[0]) == REGNO (addr))
12669 output_asm_insn ("ldr\t%H0, %2", operands);
12670 output_asm_insn ("ldr\t%0, %1", operands);
12672 else
12674 output_asm_insn ("ldr\t%0, %1", operands);
12675 output_asm_insn ("ldr\t%H0, %2", operands);
12677 break;
12679 case CONST:
12680 /* Compute <address> + 4 for the high order load. */
12681 operands[2] = gen_rtx (MEM, SImode,
12682 plus_constant (XEXP (operands[1], 0), 4));
12684 output_asm_insn ("ldr\t%0, %1", operands);
12685 output_asm_insn ("ldr\t%H0, %2", operands);
12686 break;
12688 case PLUS:
12689 arg1 = XEXP (addr, 0);
12690 arg2 = XEXP (addr, 1);
12692 if (CONSTANT_P (arg1))
12693 base = arg2, offset = arg1;
12694 else
12695 base = arg1, offset = arg2;
12697 if (GET_CODE (base) != REG)
12698 abort ();
12700 /* Catch the case of <address> = <reg> + <reg> */
12701 if (GET_CODE (offset) == REG)
12703 int reg_offset = REGNO (offset);
12704 int reg_base = REGNO (base);
12705 int reg_dest = REGNO (operands[0]);
12707 /* Add the base and offset registers together into the
12708 higher destination register. */
12709 asm_fprintf (asm_out_file, "\tadd\t%r, %r, %r",
12710 reg_dest + 1, reg_base, reg_offset);
12712 /* Load the lower destination register from the address in
12713 the higher destination register. */
12714 asm_fprintf (asm_out_file, "\tldr\t%r, [%r, #0]",
12715 reg_dest, reg_dest + 1);
12717 /* Load the higher destination register from its own address
12718 plus 4. */
12719 asm_fprintf (asm_out_file, "\tldr\t%r, [%r, #4]",
12720 reg_dest + 1, reg_dest + 1);
12722 else
12724 /* Compute <address> + 4 for the high order load. */
12725 operands[2] = gen_rtx (MEM, SImode,
12726 plus_constant (XEXP (operands[1], 0), 4));
12728 /* If the computed address is held in the low order register
12729 then load the high order register first, otherwise always
12730 load the low order register first. */
12731 if (REGNO (operands[0]) == REGNO (base))
12733 output_asm_insn ("ldr\t%H0, %2", operands);
12734 output_asm_insn ("ldr\t%0, %1", operands);
12736 else
12738 output_asm_insn ("ldr\t%0, %1", operands);
12739 output_asm_insn ("ldr\t%H0, %2", operands);
12742 break;
12744 case LABEL_REF:
12745 /* With no registers to worry about we can just load the value
12746 directly. */
12747 operands[2] = gen_rtx (MEM, SImode,
12748 plus_constant (XEXP (operands[1], 0), 4));
12750 output_asm_insn ("ldr\t%H0, %2", operands);
12751 output_asm_insn ("ldr\t%0, %1", operands);
12752 break;
12754 default:
12755 abort ();
12756 break;
12759 return "";
12762 const char *
12763 thumb_output_move_mem_multiple (int n, rtx *operands)
12765 rtx tmp;
12767 switch (n)
12769 case 2:
12770 if (REGNO (operands[4]) > REGNO (operands[5]))
12772 tmp = operands[4];
12773 operands[4] = operands[5];
12774 operands[5] = tmp;
12776 output_asm_insn ("ldmia\t%1!, {%4, %5}", operands);
12777 output_asm_insn ("stmia\t%0!, {%4, %5}", operands);
12778 break;
12780 case 3:
12781 if (REGNO (operands[4]) > REGNO (operands[5]))
12783 tmp = operands[4];
12784 operands[4] = operands[5];
12785 operands[5] = tmp;
12787 if (REGNO (operands[5]) > REGNO (operands[6]))
12789 tmp = operands[5];
12790 operands[5] = operands[6];
12791 operands[6] = tmp;
12793 if (REGNO (operands[4]) > REGNO (operands[5]))
12795 tmp = operands[4];
12796 operands[4] = operands[5];
12797 operands[5] = tmp;
12800 output_asm_insn ("ldmia\t%1!, {%4, %5, %6}", operands);
12801 output_asm_insn ("stmia\t%0!, {%4, %5, %6}", operands);
12802 break;
12804 default:
12805 abort ();
12808 return "";
12811 /* Routines for generating rtl. */
12812 void
12813 thumb_expand_movstrqi (rtx *operands)
12815 rtx out = copy_to_mode_reg (SImode, XEXP (operands[0], 0));
12816 rtx in = copy_to_mode_reg (SImode, XEXP (operands[1], 0));
12817 HOST_WIDE_INT len = INTVAL (operands[2]);
12818 HOST_WIDE_INT offset = 0;
12820 while (len >= 12)
12822 emit_insn (gen_movmem12b (out, in, out, in));
12823 len -= 12;
12826 if (len >= 8)
12828 emit_insn (gen_movmem8b (out, in, out, in));
12829 len -= 8;
12832 if (len >= 4)
12834 rtx reg = gen_reg_rtx (SImode);
12835 emit_insn (gen_movsi (reg, gen_rtx (MEM, SImode, in)));
12836 emit_insn (gen_movsi (gen_rtx (MEM, SImode, out), reg));
12837 len -= 4;
12838 offset += 4;
12841 if (len >= 2)
12843 rtx reg = gen_reg_rtx (HImode);
12844 emit_insn (gen_movhi (reg, gen_rtx (MEM, HImode,
12845 plus_constant (in, offset))));
12846 emit_insn (gen_movhi (gen_rtx (MEM, HImode, plus_constant (out, offset)),
12847 reg));
12848 len -= 2;
12849 offset += 2;
12852 if (len)
12854 rtx reg = gen_reg_rtx (QImode);
12855 emit_insn (gen_movqi (reg, gen_rtx (MEM, QImode,
12856 plus_constant (in, offset))));
12857 emit_insn (gen_movqi (gen_rtx (MEM, QImode, plus_constant (out, offset)),
12858 reg));
12863 thumb_cmp_operand (rtx op, enum machine_mode mode)
12865 return ((GET_CODE (op) == CONST_INT
12866 && INTVAL (op) < 256
12867 && INTVAL (op) >= 0)
12868 || s_register_operand (op, mode));
12872 thumb_cmpneg_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
12874 return (GET_CODE (op) == CONST_INT
12875 && INTVAL (op) < 0
12876 && INTVAL (op) > -256);
12879 /* Return TRUE if a result can be stored in OP without clobbering the
12880 condition code register. Prior to reload we only accept a
12881 register. After reload we have to be able to handle memory as
12882 well, since a pseudo may not get a hard reg and reload cannot
12883 handle output-reloads on jump insns.
12885 We could possibly handle mem before reload as well, but that might
12886 complicate things with the need to handle increment
12887 side-effects. */
12890 thumb_cbrch_target_operand (rtx op, enum machine_mode mode)
12892 return (s_register_operand (op, mode)
12893 || ((reload_in_progress || reload_completed)
12894 && memory_operand (op, mode)));
12897 /* Handle storing a half-word to memory during reload. */
12898 void
12899 thumb_reload_out_hi (rtx *operands)
12901 emit_insn (gen_thumb_movhi_clobber (operands[0], operands[1], operands[2]));
12904 /* Handle reading a half-word from memory during reload. */
12905 void
12906 thumb_reload_in_hi (rtx *operands ATTRIBUTE_UNUSED)
12908 abort ();
12911 /* Return the length of a function name prefix
12912 that starts with the character 'c'. */
12913 static int
12914 arm_get_strip_length (int c)
12916 switch (c)
12918 ARM_NAME_ENCODING_LENGTHS
12919 default: return 0;
12923 /* Return a pointer to a function's name with any
12924 and all prefix encodings stripped from it. */
12925 const char *
12926 arm_strip_name_encoding (const char *name)
12928 int skip;
12930 while ((skip = arm_get_strip_length (* name)))
12931 name += skip;
12933 return name;
12936 /* If there is a '*' anywhere in the name's prefix, then
12937 emit the stripped name verbatim, otherwise prepend an
12938 underscore if leading underscores are being used. */
12939 void
12940 arm_asm_output_labelref (FILE *stream, const char *name)
12942 int skip;
12943 int verbatim = 0;
12945 while ((skip = arm_get_strip_length (* name)))
12947 verbatim |= (*name == '*');
12948 name += skip;
12951 if (verbatim)
12952 fputs (name, stream);
12953 else
12954 asm_fprintf (stream, "%U%s", name);
12957 rtx aof_pic_label;
12959 #ifdef AOF_ASSEMBLER
12960 /* Special functions only needed when producing AOF syntax assembler. */
12962 struct pic_chain
12964 struct pic_chain * next;
12965 const char * symname;
12968 static struct pic_chain * aof_pic_chain = NULL;
12971 aof_pic_entry (rtx x)
12973 struct pic_chain ** chainp;
12974 int offset;
12976 if (aof_pic_label == NULL_RTX)
12978 aof_pic_label = gen_rtx_SYMBOL_REF (Pmode, "x$adcons");
12981 for (offset = 0, chainp = &aof_pic_chain; *chainp;
12982 offset += 4, chainp = &(*chainp)->next)
12983 if ((*chainp)->symname == XSTR (x, 0))
12984 return plus_constant (aof_pic_label, offset);
12986 *chainp = (struct pic_chain *) xmalloc (sizeof (struct pic_chain));
12987 (*chainp)->next = NULL;
12988 (*chainp)->symname = XSTR (x, 0);
12989 return plus_constant (aof_pic_label, offset);
12992 void
12993 aof_dump_pic_table (FILE *f)
12995 struct pic_chain * chain;
12997 if (aof_pic_chain == NULL)
12998 return;
13000 asm_fprintf (f, "\tAREA |%r$$adcons|, BASED %r\n",
13001 PIC_OFFSET_TABLE_REGNUM,
13002 PIC_OFFSET_TABLE_REGNUM);
13003 fputs ("|x$adcons|\n", f);
13005 for (chain = aof_pic_chain; chain; chain = chain->next)
13007 fputs ("\tDCD\t", f);
13008 assemble_name (f, chain->symname);
13009 fputs ("\n", f);
13013 int arm_text_section_count = 1;
13015 char *
13016 aof_text_section (void )
13018 static char buf[100];
13019 sprintf (buf, "\tAREA |C$$code%d|, CODE, READONLY",
13020 arm_text_section_count++);
13021 if (flag_pic)
13022 strcat (buf, ", PIC, REENTRANT");
13023 return buf;
13026 static int arm_data_section_count = 1;
13028 char *
13029 aof_data_section (void)
13031 static char buf[100];
13032 sprintf (buf, "\tAREA |C$$data%d|, DATA", arm_data_section_count++);
13033 return buf;
13036 /* The AOF assembler is religiously strict about declarations of
13037 imported and exported symbols, so that it is impossible to declare
13038 a function as imported near the beginning of the file, and then to
13039 export it later on. It is, however, possible to delay the decision
13040 until all the functions in the file have been compiled. To get
13041 around this, we maintain a list of the imports and exports, and
13042 delete from it any that are subsequently defined. At the end of
13043 compilation we spit the remainder of the list out before the END
13044 directive. */
13046 struct import
13048 struct import * next;
13049 const char * name;
13052 static struct import * imports_list = NULL;
13054 void
13055 aof_add_import (const char *name)
13057 struct import * new;
13059 for (new = imports_list; new; new = new->next)
13060 if (new->name == name)
13061 return;
13063 new = (struct import *) xmalloc (sizeof (struct import));
13064 new->next = imports_list;
13065 imports_list = new;
13066 new->name = name;
13069 void
13070 aof_delete_import (const char *name)
13072 struct import ** old;
13074 for (old = &imports_list; *old; old = & (*old)->next)
13076 if ((*old)->name == name)
13078 *old = (*old)->next;
13079 return;
13084 int arm_main_function = 0;
13086 static void
13087 aof_dump_imports (FILE *f)
13089 /* The AOF assembler needs this to cause the startup code to be extracted
13090 from the library. Brining in __main causes the whole thing to work
13091 automagically. */
13092 if (arm_main_function)
13094 text_section ();
13095 fputs ("\tIMPORT __main\n", f);
13096 fputs ("\tDCD __main\n", f);
13099 /* Now dump the remaining imports. */
13100 while (imports_list)
13102 fprintf (f, "\tIMPORT\t");
13103 assemble_name (f, imports_list->name);
13104 fputc ('\n', f);
13105 imports_list = imports_list->next;
13109 static void
13110 aof_globalize_label (FILE *stream, const char *name)
13112 default_globalize_label (stream, name);
13113 if (! strcmp (name, "main"))
13114 arm_main_function = 1;
13117 static void
13118 aof_file_start (void)
13120 fputs ("__r0\tRN\t0\n", asm_out_file);
13121 fputs ("__a1\tRN\t0\n", asm_out_file);
13122 fputs ("__a2\tRN\t1\n", asm_out_file);
13123 fputs ("__a3\tRN\t2\n", asm_out_file);
13124 fputs ("__a4\tRN\t3\n", asm_out_file);
13125 fputs ("__v1\tRN\t4\n", asm_out_file);
13126 fputs ("__v2\tRN\t5\n", asm_out_file);
13127 fputs ("__v3\tRN\t6\n", asm_out_file);
13128 fputs ("__v4\tRN\t7\n", asm_out_file);
13129 fputs ("__v5\tRN\t8\n", asm_out_file);
13130 fputs ("__v6\tRN\t9\n", asm_out_file);
13131 fputs ("__sl\tRN\t10\n", asm_out_file);
13132 fputs ("__fp\tRN\t11\n", asm_out_file);
13133 fputs ("__ip\tRN\t12\n", asm_out_file);
13134 fputs ("__sp\tRN\t13\n", asm_out_file);
13135 fputs ("__lr\tRN\t14\n", asm_out_file);
13136 fputs ("__pc\tRN\t15\n", asm_out_file);
13137 fputs ("__f0\tFN\t0\n", asm_out_file);
13138 fputs ("__f1\tFN\t1\n", asm_out_file);
13139 fputs ("__f2\tFN\t2\n", asm_out_file);
13140 fputs ("__f3\tFN\t3\n", asm_out_file);
13141 fputs ("__f4\tFN\t4\n", asm_out_file);
13142 fputs ("__f5\tFN\t5\n", asm_out_file);
13143 fputs ("__f6\tFN\t6\n", asm_out_file);
13144 fputs ("__f7\tFN\t7\n", asm_out_file);
13145 text_section ();
13148 static void
13149 aof_file_end (void)
13151 if (flag_pic)
13152 aof_dump_pic_table (asm_out_file);
13153 aof_dump_imports (asm_out_file);
13154 fputs ("\tEND\n", asm_out_file);
13156 #endif /* AOF_ASSEMBLER */
13158 #ifdef OBJECT_FORMAT_ELF
13159 /* Switch to an arbitrary section NAME with attributes as specified
13160 by FLAGS. ALIGN specifies any known alignment requirements for
13161 the section; 0 if the default should be used.
13163 Differs from the default elf version only in the prefix character
13164 used before the section type. */
13166 static void
13167 arm_elf_asm_named_section (const char *name, unsigned int flags)
13169 char flagchars[10], *f = flagchars;
13171 if (! named_section_first_declaration (name))
13173 fprintf (asm_out_file, "\t.section\t%s\n", name);
13174 return;
13177 if (!(flags & SECTION_DEBUG))
13178 *f++ = 'a';
13179 if (flags & SECTION_WRITE)
13180 *f++ = 'w';
13181 if (flags & SECTION_CODE)
13182 *f++ = 'x';
13183 if (flags & SECTION_SMALL)
13184 *f++ = 's';
13185 if (flags & SECTION_MERGE)
13186 *f++ = 'M';
13187 if (flags & SECTION_STRINGS)
13188 *f++ = 'S';
13189 if (flags & SECTION_TLS)
13190 *f++ = 'T';
13191 *f = '\0';
13193 fprintf (asm_out_file, "\t.section\t%s,\"%s\"", name, flagchars);
13195 if (!(flags & SECTION_NOTYPE))
13197 const char *type;
13199 if (flags & SECTION_BSS)
13200 type = "nobits";
13201 else
13202 type = "progbits";
13204 fprintf (asm_out_file, ",%%%s", type);
13206 if (flags & SECTION_ENTSIZE)
13207 fprintf (asm_out_file, ",%d", flags & SECTION_ENTSIZE);
13210 putc ('\n', asm_out_file);
13212 #endif
13214 #ifndef ARM_PE
13215 /* Symbols in the text segment can be accessed without indirecting via the
13216 constant pool; it may take an extra binary operation, but this is still
13217 faster than indirecting via memory. Don't do this when not optimizing,
13218 since we won't be calculating al of the offsets necessary to do this
13219 simplification. */
13221 static void
13222 arm_encode_section_info (tree decl, rtx rtl, int first)
13224 /* This doesn't work with AOF syntax, since the string table may be in
13225 a different AREA. */
13226 #ifndef AOF_ASSEMBLER
13227 if (optimize > 0 && TREE_CONSTANT (decl)
13228 && (!flag_writable_strings || TREE_CODE (decl) != STRING_CST))
13229 SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
13230 #endif
13232 /* If we are referencing a function that is weak then encode a long call
13233 flag in the function name, otherwise if the function is static or
13234 or known to be defined in this file then encode a short call flag. */
13235 if (first && TREE_CODE_CLASS (TREE_CODE (decl)) == 'd')
13237 if (TREE_CODE (decl) == FUNCTION_DECL && DECL_WEAK (decl))
13238 arm_encode_call_attribute (decl, LONG_CALL_FLAG_CHAR);
13239 else if (! TREE_PUBLIC (decl))
13240 arm_encode_call_attribute (decl, SHORT_CALL_FLAG_CHAR);
13243 #endif /* !ARM_PE */
13245 static void
13246 arm_internal_label (FILE *stream, const char *prefix, unsigned long labelno)
13248 if (arm_ccfsm_state == 3 && (unsigned) arm_target_label == labelno
13249 && !strcmp (prefix, "L"))
13251 arm_ccfsm_state = 0;
13252 arm_target_insn = NULL;
13254 default_internal_label (stream, prefix, labelno);
13257 /* Output code to add DELTA to the first argument, and then jump
13258 to FUNCTION. Used for C++ multiple inheritance. */
13259 static void
13260 arm_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
13261 HOST_WIDE_INT delta,
13262 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
13263 tree function)
13265 int mi_delta = delta;
13266 const char *const mi_op = mi_delta < 0 ? "sub" : "add";
13267 int shift = 0;
13268 int this_regno = (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function)
13269 ? 1 : 0);
13270 if (mi_delta < 0)
13271 mi_delta = - mi_delta;
13272 while (mi_delta != 0)
13274 if ((mi_delta & (3 << shift)) == 0)
13275 shift += 2;
13276 else
13278 asm_fprintf (file, "\t%s\t%r, %r, #%d\n",
13279 mi_op, this_regno, this_regno,
13280 mi_delta & (0xff << shift));
13281 mi_delta &= ~(0xff << shift);
13282 shift += 8;
13285 fputs ("\tb\t", file);
13286 assemble_name (file, XSTR (XEXP (DECL_RTL (function), 0), 0));
13287 if (NEED_PLT_RELOC)
13288 fputs ("(PLT)", file);
13289 fputc ('\n', file);
13293 arm_emit_vector_const (FILE *file, rtx x)
13295 int i;
13296 const char * pattern;
13298 if (GET_CODE (x) != CONST_VECTOR)
13299 abort ();
13301 switch (GET_MODE (x))
13303 case V2SImode: pattern = "%08x"; break;
13304 case V4HImode: pattern = "%04x"; break;
13305 case V8QImode: pattern = "%02x"; break;
13306 default: abort ();
13309 fprintf (file, "0x");
13310 for (i = CONST_VECTOR_NUNITS (x); i--;)
13312 rtx element;
13314 element = CONST_VECTOR_ELT (x, i);
13315 fprintf (file, pattern, INTVAL (element));
13318 return 1;
13321 const char *
13322 arm_output_load_gr (rtx *operands)
13324 rtx reg;
13325 rtx offset;
13326 rtx wcgr;
13327 rtx sum;
13329 if (GET_CODE (operands [1]) != MEM
13330 || GET_CODE (sum = XEXP (operands [1], 0)) != PLUS
13331 || GET_CODE (reg = XEXP (sum, 0)) != REG
13332 || GET_CODE (offset = XEXP (sum, 1)) != CONST_INT
13333 || ((INTVAL (offset) < 1024) && (INTVAL (offset) > -1024)))
13334 return "wldrw%?\t%0, %1";
13336 /* Fix up an out-of-range load of a GR register. */
13337 output_asm_insn ("str%?\t%0, [sp, #-4]!\t@ Start of GR load expansion", & reg);
13338 wcgr = operands[0];
13339 operands[0] = reg;
13340 output_asm_insn ("ldr%?\t%0, %1", operands);
13342 operands[0] = wcgr;
13343 operands[1] = reg;
13344 output_asm_insn ("tmcr%?\t%0, %1", operands);
13345 output_asm_insn ("ldr%?\t%0, [sp], #4\t@ End of GR load expansion", & reg);
13347 return "";
13350 static rtx
13351 arm_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
13352 int incoming ATTRIBUTE_UNUSED)
13354 #if 0
13355 /* FIXME: The ARM backend has special code to handle structure
13356 returns, and will reserve its own hidden first argument. So
13357 if this macro is enabled a *second* hidden argument will be
13358 reserved, which will break binary compatibility with old
13359 toolchains and also thunk handling. One day this should be
13360 fixed. */
13361 return 0;
13362 #else
13363 /* Register in which address to store a structure value
13364 is passed to a function. */
13365 return gen_rtx_REG (Pmode, ARG_REGISTER (1));
13366 #endif
13369 /* Worker function for TARGET_SETUP_INCOMING_VARARGS.
13371 On the ARM, PRETEND_SIZE is set in order to have the prologue push the last
13372 named arg and all anonymous args onto the stack.
13373 XXX I know the prologue shouldn't be pushing registers, but it is faster
13374 that way. */
13376 static void
13377 arm_setup_incoming_varargs (CUMULATIVE_ARGS *cum,
13378 enum machine_mode mode ATTRIBUTE_UNUSED,
13379 tree type ATTRIBUTE_UNUSED,
13380 int *pretend_size,
13381 int second_time ATTRIBUTE_UNUSED)
13383 cfun->machine->uses_anonymous_args = 1;
13384 if (cum->nregs < NUM_ARG_REGS)
13385 *pretend_size = (NUM_ARG_REGS - cum->nregs) * UNITS_PER_WORD;