PR target/14040
[official-gcc.git] / gcc / config / arm / arm.c
blobb16836d5d698280ab04822e111e04961c5014f45
1 /* Output routines for GCC for ARM.
2 Copyright (C) 1991, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004 Free Software Foundation, Inc.
4 Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
5 and Martin Simmons (@harleqn.co.uk).
6 More major hacks by Richard Earnshaw (rearnsha@arm.com).
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it
11 under the terms of the GNU General Public License as published
12 by the Free Software Foundation; either version 2, or (at your
13 option) any later version.
15 GCC is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
17 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
18 License for more details.
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING. If not, write to
22 the Free Software Foundation, 59 Temple Place - Suite 330,
23 Boston, MA 02111-1307, USA. */
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "tm.h"
29 #include "rtl.h"
30 #include "tree.h"
31 #include "obstack.h"
32 #include "regs.h"
33 #include "hard-reg-set.h"
34 #include "real.h"
35 #include "insn-config.h"
36 #include "conditions.h"
37 #include "output.h"
38 #include "insn-attr.h"
39 #include "flags.h"
40 #include "reload.h"
41 #include "function.h"
42 #include "expr.h"
43 #include "optabs.h"
44 #include "toplev.h"
45 #include "recog.h"
46 #include "ggc.h"
47 #include "except.h"
48 #include "c-pragma.h"
49 #include "integrate.h"
50 #include "tm_p.h"
51 #include "target.h"
52 #include "target-def.h"
53 #include "debug.h"
55 /* Forward definitions of types. */
56 typedef struct minipool_node Mnode;
57 typedef struct minipool_fixup Mfix;
59 const struct attribute_spec arm_attribute_table[];
61 /* Forward function declarations. */
62 static arm_stack_offsets *arm_get_frame_offsets (void);
63 static void arm_add_gc_roots (void);
64 static int arm_gen_constant (enum rtx_code, enum machine_mode, rtx,
65 HOST_WIDE_INT, rtx, rtx, int, int);
66 static unsigned bit_count (unsigned long);
67 static int arm_address_register_rtx_p (rtx, int);
68 static int arm_legitimate_index_p (enum machine_mode, rtx, RTX_CODE, int);
69 static int thumb_base_register_rtx_p (rtx, enum machine_mode, int);
70 inline static int thumb_index_register_rtx_p (rtx, int);
71 static int thumb_far_jump_used_p (void);
72 static int const_ok_for_op (HOST_WIDE_INT, enum rtx_code);
73 static rtx emit_multi_reg_push (int);
74 static rtx emit_sfm (int, int);
75 #ifndef AOF_ASSEMBLER
76 static bool arm_assemble_integer (rtx, unsigned int, int);
77 #endif
78 static const char *fp_const_from_val (REAL_VALUE_TYPE *);
79 static arm_cc get_arm_condition_code (rtx);
80 static HOST_WIDE_INT int_log2 (HOST_WIDE_INT);
81 static rtx is_jump_table (rtx);
82 static const char *output_multi_immediate (rtx *, const char *, const char *,
83 int, HOST_WIDE_INT);
84 static void print_multi_reg (FILE *, const char *, int, int);
85 static const char *shift_op (rtx, HOST_WIDE_INT *);
86 static struct machine_function *arm_init_machine_status (void);
87 static int number_of_first_bit_set (int);
88 static void replace_symbols_in_block (tree, rtx, rtx);
89 static void thumb_exit (FILE *, int, rtx);
90 static void thumb_pushpop (FILE *, int, int, int *, int);
91 static rtx is_jump_table (rtx);
92 static HOST_WIDE_INT get_jump_table_size (rtx);
93 static Mnode *move_minipool_fix_forward_ref (Mnode *, Mnode *, HOST_WIDE_INT);
94 static Mnode *add_minipool_forward_ref (Mfix *);
95 static Mnode *move_minipool_fix_backward_ref (Mnode *, Mnode *, HOST_WIDE_INT);
96 static Mnode *add_minipool_backward_ref (Mfix *);
97 static void assign_minipool_offsets (Mfix *);
98 static void arm_print_value (FILE *, rtx);
99 static void dump_minipool (rtx);
100 static int arm_barrier_cost (rtx);
101 static Mfix *create_fix_barrier (Mfix *, HOST_WIDE_INT);
102 static void push_minipool_barrier (rtx, HOST_WIDE_INT);
103 static void push_minipool_fix (rtx, HOST_WIDE_INT, rtx *, enum machine_mode,
104 rtx);
105 static void arm_reorg (void);
106 static bool note_invalid_constants (rtx, HOST_WIDE_INT, int);
107 static int current_file_function_operand (rtx);
108 static unsigned long arm_compute_save_reg0_reg12_mask (void);
109 static unsigned long arm_compute_save_reg_mask (void);
110 static unsigned long arm_isr_value (tree);
111 static unsigned long arm_compute_func_type (void);
112 static tree arm_handle_fndecl_attribute (tree *, tree, tree, int, bool *);
113 static tree arm_handle_isr_attribute (tree *, tree, tree, int, bool *);
114 static void arm_output_function_epilogue (FILE *, HOST_WIDE_INT);
115 static void arm_output_function_prologue (FILE *, HOST_WIDE_INT);
116 static void thumb_output_function_prologue (FILE *, HOST_WIDE_INT);
117 static int arm_comp_type_attributes (tree, tree);
118 static void arm_set_default_type_attributes (tree);
119 static int arm_adjust_cost (rtx, rtx, rtx, int);
120 static int arm_use_dfa_pipeline_interface (void);
121 static int count_insns_for_constant (HOST_WIDE_INT, int);
122 static int arm_get_strip_length (int);
123 static bool arm_function_ok_for_sibcall (tree, tree);
124 static void arm_internal_label (FILE *, const char *, unsigned long);
125 static void arm_output_mi_thunk (FILE *, tree, HOST_WIDE_INT, HOST_WIDE_INT,
126 tree);
127 static int arm_rtx_costs_1 (rtx, enum rtx_code, enum rtx_code);
128 static bool arm_slowmul_rtx_costs (rtx, int, int, int *);
129 static bool arm_fastmul_rtx_costs (rtx, int, int, int *);
130 static bool arm_xscale_rtx_costs (rtx, int, int, int *);
131 static bool arm_9e_rtx_costs (rtx, int, int, int *);
132 static int arm_address_cost (rtx);
133 static bool arm_memory_load_p (rtx);
134 static bool arm_cirrus_insn_p (rtx);
135 static void cirrus_reorg (rtx);
136 static void arm_init_builtins (void);
137 static rtx arm_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
138 static void arm_init_iwmmxt_builtins (void);
139 static rtx safe_vector_operand (rtx, enum machine_mode);
140 static rtx arm_expand_binop_builtin (enum insn_code, tree, rtx);
141 static rtx arm_expand_unop_builtin (enum insn_code, tree, rtx, int);
142 static rtx arm_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
143 static void emit_constant_insn (rtx cond, rtx pattern);
145 #ifdef OBJECT_FORMAT_ELF
146 static void arm_elf_asm_named_section (const char *, unsigned int);
147 #endif
148 #ifndef ARM_PE
149 static void arm_encode_section_info (tree, rtx, int);
150 #endif
151 #ifdef AOF_ASSEMBLER
152 static void aof_globalize_label (FILE *, const char *);
153 static void aof_dump_imports (FILE *);
154 static void aof_dump_pic_table (FILE *);
155 static void aof_file_start (void);
156 static void aof_file_end (void);
157 #endif
158 static rtx arm_struct_value_rtx (tree, int);
159 static void arm_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
160 tree, int *, int);
163 /* Initialize the GCC target structure. */
164 #ifdef TARGET_DLLIMPORT_DECL_ATTRIBUTES
165 #undef TARGET_MERGE_DECL_ATTRIBUTES
166 #define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
167 #endif
169 #undef TARGET_ATTRIBUTE_TABLE
170 #define TARGET_ATTRIBUTE_TABLE arm_attribute_table
172 #ifdef AOF_ASSEMBLER
173 #undef TARGET_ASM_BYTE_OP
174 #define TARGET_ASM_BYTE_OP "\tDCB\t"
175 #undef TARGET_ASM_ALIGNED_HI_OP
176 #define TARGET_ASM_ALIGNED_HI_OP "\tDCW\t"
177 #undef TARGET_ASM_ALIGNED_SI_OP
178 #define TARGET_ASM_ALIGNED_SI_OP "\tDCD\t"
179 #undef TARGET_ASM_GLOBALIZE_LABEL
180 #define TARGET_ASM_GLOBALIZE_LABEL aof_globalize_label
181 #undef TARGET_ASM_FILE_START
182 #define TARGET_ASM_FILE_START aof_file_start
183 #undef TARGET_ASM_FILE_END
184 #define TARGET_ASM_FILE_END aof_file_end
185 #else
186 #undef TARGET_ASM_ALIGNED_SI_OP
187 #define TARGET_ASM_ALIGNED_SI_OP NULL
188 #undef TARGET_ASM_INTEGER
189 #define TARGET_ASM_INTEGER arm_assemble_integer
190 #endif
192 #undef TARGET_ASM_FUNCTION_PROLOGUE
193 #define TARGET_ASM_FUNCTION_PROLOGUE arm_output_function_prologue
195 #undef TARGET_ASM_FUNCTION_EPILOGUE
196 #define TARGET_ASM_FUNCTION_EPILOGUE arm_output_function_epilogue
198 #undef TARGET_COMP_TYPE_ATTRIBUTES
199 #define TARGET_COMP_TYPE_ATTRIBUTES arm_comp_type_attributes
201 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
202 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES arm_set_default_type_attributes
204 #undef TARGET_SCHED_ADJUST_COST
205 #define TARGET_SCHED_ADJUST_COST arm_adjust_cost
207 #undef TARGET_SCHED_USE_DFA_PIPELINE_INTERFACE
208 #define TARGET_SCHED_USE_DFA_PIPELINE_INTERFACE arm_use_dfa_pipeline_interface
210 #undef TARGET_ENCODE_SECTION_INFO
211 #ifdef ARM_PE
212 #define TARGET_ENCODE_SECTION_INFO arm_pe_encode_section_info
213 #else
214 #define TARGET_ENCODE_SECTION_INFO arm_encode_section_info
215 #endif
217 #undef TARGET_STRIP_NAME_ENCODING
218 #define TARGET_STRIP_NAME_ENCODING arm_strip_name_encoding
220 #undef TARGET_ASM_INTERNAL_LABEL
221 #define TARGET_ASM_INTERNAL_LABEL arm_internal_label
223 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
224 #define TARGET_FUNCTION_OK_FOR_SIBCALL arm_function_ok_for_sibcall
226 #undef TARGET_ASM_OUTPUT_MI_THUNK
227 #define TARGET_ASM_OUTPUT_MI_THUNK arm_output_mi_thunk
228 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
229 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
231 /* This will be overridden in arm_override_options. */
232 #undef TARGET_RTX_COSTS
233 #define TARGET_RTX_COSTS arm_slowmul_rtx_costs
234 #undef TARGET_ADDRESS_COST
235 #define TARGET_ADDRESS_COST arm_address_cost
237 #undef TARGET_MACHINE_DEPENDENT_REORG
238 #define TARGET_MACHINE_DEPENDENT_REORG arm_reorg
240 #undef TARGET_INIT_BUILTINS
241 #define TARGET_INIT_BUILTINS arm_init_builtins
242 #undef TARGET_EXPAND_BUILTIN
243 #define TARGET_EXPAND_BUILTIN arm_expand_builtin
245 #undef TARGET_PROMOTE_FUNCTION_ARGS
246 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
247 #undef TARGET_PROMOTE_PROTOTYPES
248 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
250 #undef TARGET_STRUCT_VALUE_RTX
251 #define TARGET_STRUCT_VALUE_RTX arm_struct_value_rtx
253 #undef TARGET_SETUP_INCOMING_VARARGS
254 #define TARGET_SETUP_INCOMING_VARARGS arm_setup_incoming_varargs
256 struct gcc_target targetm = TARGET_INITIALIZER;
258 /* Obstack for minipool constant handling. */
259 static struct obstack minipool_obstack;
260 static char * minipool_startobj;
262 /* The maximum number of insns skipped which
263 will be conditionalised if possible. */
264 static int max_insns_skipped = 5;
266 extern FILE * asm_out_file;
268 /* True if we are currently building a constant table. */
269 int making_const_table;
271 /* Define the information needed to generate branch insns. This is
272 stored from the compare operation. */
273 rtx arm_compare_op0, arm_compare_op1;
275 /* The processor for which instructions should be scheduled. */
276 enum processor_type arm_tune = arm_none;
278 /* Which floating point model to use. */
279 enum arm_fp_model arm_fp_model;
281 /* Which floating point hardware is available. */
282 enum fputype arm_fpu_arch;
284 /* Which floating point hardware to schedule for. */
285 enum fputype arm_fpu_tune;
287 /* Whether to use floating point hardware. */
288 enum float_abi_type arm_float_abi;
290 /* Which ABI to use. */
291 enum arm_abi_type arm_abi;
293 /* What program mode is the cpu running in? 26-bit mode or 32-bit mode. */
294 enum prog_mode_type arm_prgmode;
296 /* Set by the -mfpu=... option. */
297 const char * target_fpu_name = NULL;
299 /* Set by the -mfpe=... option. */
300 const char * target_fpe_name = NULL;
302 /* Set by the -mfloat-abi=... option. */
303 const char * target_float_abi_name = NULL;
305 /* Set by the -mabi=... option. */
306 const char * target_abi_name = NULL;
308 /* Used to parse -mstructure_size_boundary command line option. */
309 const char * structure_size_string = NULL;
310 int arm_structure_size_boundary = DEFAULT_STRUCTURE_SIZE_BOUNDARY;
312 /* Bit values used to identify processor capabilities. */
313 #define FL_CO_PROC (1 << 0) /* Has external co-processor bus */
314 #define FL_ARCH3M (1 << 1) /* Extended multiply */
315 #define FL_MODE26 (1 << 2) /* 26-bit mode support */
316 #define FL_MODE32 (1 << 3) /* 32-bit mode support */
317 #define FL_ARCH4 (1 << 4) /* Architecture rel 4 */
318 #define FL_ARCH5 (1 << 5) /* Architecture rel 5 */
319 #define FL_THUMB (1 << 6) /* Thumb aware */
320 #define FL_LDSCHED (1 << 7) /* Load scheduling necessary */
321 #define FL_STRONG (1 << 8) /* StrongARM */
322 #define FL_ARCH5E (1 << 9) /* DSP extensions to v5 */
323 #define FL_XSCALE (1 << 10) /* XScale */
324 #define FL_CIRRUS (1 << 11) /* Cirrus/DSP. */
325 #define FL_ARCH6 (1 << 12) /* Architecture rel 6. Adds
326 media instructions. */
327 #define FL_VFPV2 (1 << 13) /* Vector Floating Point V2. */
329 #define FL_IWMMXT (1 << 29) /* XScale v2 or "Intel Wireless MMX technology". */
331 /* The bits in this mask specify which
332 instructions we are allowed to generate. */
333 static unsigned long insn_flags = 0;
335 /* The bits in this mask specify which instruction scheduling options should
336 be used. */
337 static unsigned long tune_flags = 0;
339 /* The following are used in the arm.md file as equivalents to bits
340 in the above two flag variables. */
342 /* Nonzero if this chip supports the ARM Architecture 3M extensions. */
343 int arm_arch3m = 0;
345 /* Nonzero if this chip supports the ARM Architecture 4 extensions. */
346 int arm_arch4 = 0;
348 /* Nonzero if this chip supports the ARM Architecture 5 extensions. */
349 int arm_arch5 = 0;
351 /* Nonzero if this chip supports the ARM Architecture 5E extensions. */
352 int arm_arch5e = 0;
354 /* Nonzero if this chip supports the ARM Architecture 6 extensions. */
355 int arm_arch6 = 0;
357 /* Nonzero if this chip can benefit from load scheduling. */
358 int arm_ld_sched = 0;
360 /* Nonzero if this chip is a StrongARM. */
361 int arm_is_strong = 0;
363 /* Nonzero if this chip supports Intel Wireless MMX technology. */
364 int arm_arch_iwmmxt = 0;
366 /* Nonzero if this chip is an XScale. */
367 int arm_arch_xscale = 0;
369 /* Nonzero if tuning for XScale */
370 int arm_tune_xscale = 0;
372 /* Nonzero if this chip is an ARM6 or an ARM7. */
373 int arm_is_6_or_7 = 0;
375 /* Nonzero if generating Thumb instructions. */
376 int thumb_code = 0;
378 /* In case of a PRE_INC, POST_INC, PRE_DEC, POST_DEC memory reference, we
379 must report the mode of the memory reference from PRINT_OPERAND to
380 PRINT_OPERAND_ADDRESS. */
381 enum machine_mode output_memory_reference_mode;
383 /* The register number to be used for the PIC offset register. */
384 const char * arm_pic_register_string = NULL;
385 int arm_pic_register = INVALID_REGNUM;
387 /* Set to 1 when a return insn is output, this means that the epilogue
388 is not needed. */
389 int return_used_this_function;
391 /* Set to 1 after arm_reorg has started. Reset to start at the start of
392 the next function. */
393 static int after_arm_reorg = 0;
395 /* The maximum number of insns to be used when loading a constant. */
396 static int arm_constant_limit = 3;
398 /* For an explanation of these variables, see final_prescan_insn below. */
399 int arm_ccfsm_state;
400 enum arm_cond_code arm_current_cc;
401 rtx arm_target_insn;
402 int arm_target_label;
404 /* The condition codes of the ARM, and the inverse function. */
405 static const char * const arm_condition_codes[] =
407 "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc",
408 "hi", "ls", "ge", "lt", "gt", "le", "al", "nv"
411 #define streq(string1, string2) (strcmp (string1, string2) == 0)
413 /* Initialization code. */
415 struct processors
417 const char *const name;
418 enum processor_type core;
419 const unsigned long flags;
420 bool (* rtx_costs) (rtx, int, int, int *);
423 /* Not all of these give usefully different compilation alternatives,
424 but there is no simple way of generalizing them. */
425 static const struct processors all_cores[] =
427 /* ARM Cores */
428 #define ARM_CORE(NAME, FLAGS, COSTS) \
429 {#NAME, arm_none, FLAGS, arm_##COSTS##_rtx_costs},
430 #include "arm-cores.def"
431 #undef ARM_CORE
432 {NULL, arm_none, 0, NULL}
435 static const struct processors all_architectures[] =
437 /* ARM Architectures */
438 /* We don't specify rtx_costs here as it will be figured out
439 from the core. */
441 { "armv2", arm2, FL_CO_PROC | FL_MODE26 , NULL},
442 { "armv2a", arm2, FL_CO_PROC | FL_MODE26 , NULL},
443 { "armv3", arm6, FL_CO_PROC | FL_MODE26 | FL_MODE32 , NULL},
444 { "armv3m", arm7m, FL_CO_PROC | FL_MODE26 | FL_MODE32 | FL_ARCH3M , NULL},
445 { "armv4", arm7tdmi, FL_CO_PROC | FL_MODE26 | FL_MODE32 | FL_ARCH3M | FL_ARCH4 , NULL},
446 /* Strictly, FL_MODE26 is a permitted option for v4t, but there are no
447 implementations that support it, so we will leave it out for now. */
448 { "armv4t", arm7tdmi, FL_CO_PROC | FL_MODE32 | FL_ARCH3M | FL_ARCH4 | FL_THUMB , NULL},
449 { "armv5", arm10tdmi, FL_CO_PROC | FL_MODE32 | FL_ARCH3M | FL_ARCH4 | FL_THUMB | FL_ARCH5 , NULL},
450 { "armv5t", arm10tdmi, FL_CO_PROC | FL_MODE32 | FL_ARCH3M | FL_ARCH4 | FL_THUMB | FL_ARCH5 , NULL},
451 { "armv5te", arm1026ejs, FL_CO_PROC | FL_MODE32 | FL_ARCH3M | FL_ARCH4 | FL_THUMB | FL_ARCH5 | FL_ARCH5E , NULL},
452 { "armv6", arm1136js, FL_CO_PROC | FL_MODE32 | FL_ARCH3M | FL_ARCH4 | FL_THUMB | FL_ARCH5 | FL_ARCH5E | FL_ARCH6 , NULL},
453 { "armv6j", arm1136js, FL_CO_PROC | FL_MODE32 | FL_ARCH3M | FL_ARCH4 | FL_THUMB | FL_ARCH5 | FL_ARCH5E | FL_ARCH6 , NULL},
454 { "ep9312", ep9312, FL_MODE32 | FL_ARCH3M | FL_ARCH4 | FL_LDSCHED | FL_CIRRUS , NULL},
455 {"iwmmxt", iwmmxt, FL_MODE32 | FL_ARCH3M | FL_ARCH4 | FL_THUMB | FL_LDSCHED | FL_STRONG | FL_ARCH5 | FL_ARCH5E | FL_XSCALE | FL_IWMMXT , NULL},
456 { NULL, arm_none, 0 , NULL}
459 /* This is a magic structure. The 'string' field is magically filled in
460 with a pointer to the value specified by the user on the command line
461 assuming that the user has specified such a value. */
463 struct arm_cpu_select arm_select[] =
465 /* string name processors */
466 { NULL, "-mcpu=", all_cores },
467 { NULL, "-march=", all_architectures },
468 { NULL, "-mtune=", all_cores }
471 struct fpu_desc
473 const char * name;
474 enum fputype fpu;
478 /* Available values for for -mfpu=. */
480 static const struct fpu_desc all_fpus[] =
482 {"fpa", FPUTYPE_FPA},
483 {"fpe2", FPUTYPE_FPA_EMU2},
484 {"fpe3", FPUTYPE_FPA_EMU2},
485 {"maverick", FPUTYPE_MAVERICK},
486 {"vfp", FPUTYPE_VFP}
490 /* Floating point models used by the different hardware.
491 See fputype in arm.h. */
493 static const enum fputype fp_model_for_fpu[] =
495 /* No FP hardware. */
496 ARM_FP_MODEL_UNKNOWN, /* FPUTYPE_NONE */
497 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA */
498 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA_EMU2 */
499 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA_EMU3 */
500 ARM_FP_MODEL_MAVERICK, /* FPUTYPE_MAVERICK */
501 ARM_FP_MODEL_VFP /* FPUTYPE_VFP */
505 struct float_abi
507 const char * name;
508 enum float_abi_type abi_type;
512 /* Available values for -mfloat-abi=. */
514 static const struct float_abi all_float_abis[] =
516 {"soft", ARM_FLOAT_ABI_SOFT},
517 {"softfp", ARM_FLOAT_ABI_SOFTFP},
518 {"hard", ARM_FLOAT_ABI_HARD}
522 struct abi_name
524 const char *name;
525 enum arm_abi_type abi_type;
529 /* Available values for -mabi=. */
531 static const struct abi_name arm_all_abis[] =
533 {"apcs-gnu", ARM_ABI_APCS},
534 {"atpcs", ARM_ABI_ATPCS},
535 {"aapcs", ARM_ABI_AAPCS},
536 {"iwmmxt", ARM_ABI_IWMMXT}
539 /* Return the number of bits set in VALUE. */
540 static unsigned
541 bit_count (unsigned long value)
543 unsigned long count = 0;
545 while (value)
547 count++;
548 value &= value - 1; /* Clear the least-significant set bit. */
551 return count;
554 /* Fix up any incompatible options that the user has specified.
555 This has now turned into a maze. */
556 void
557 arm_override_options (void)
559 unsigned i;
561 /* Set up the flags based on the cpu/architecture selected by the user. */
562 for (i = ARRAY_SIZE (arm_select); i--;)
564 struct arm_cpu_select * ptr = arm_select + i;
566 if (ptr->string != NULL && ptr->string[0] != '\0')
568 const struct processors * sel;
570 for (sel = ptr->processors; sel->name != NULL; sel++)
571 if (streq (ptr->string, sel->name))
573 /* Determine the processor core for which we should
574 tune code-generation. */
575 if (/* -mcpu= is a sensible default. */
576 i == 0
577 /* If -march= is used, and -mcpu= has not been used,
578 assume that we should tune for a representative
579 CPU from that architecture. */
580 || i == 1
581 /* -mtune= overrides -mcpu= and -march=. */
582 || i == 2)
583 arm_tune = (enum processor_type) (sel - ptr->processors);
585 if (i != 2)
587 /* If we have been given an architecture and a processor
588 make sure that they are compatible. We only generate
589 a warning though, and we prefer the CPU over the
590 architecture. */
591 if (insn_flags != 0 && (insn_flags ^ sel->flags))
592 warning ("switch -mcpu=%s conflicts with -march= switch",
593 ptr->string);
595 insn_flags = sel->flags;
598 break;
601 if (sel->name == NULL)
602 error ("bad value (%s) for %s switch", ptr->string, ptr->name);
606 /* If the user did not specify a processor, choose one for them. */
607 if (insn_flags == 0)
609 const struct processors * sel;
610 unsigned int sought;
611 static const struct cpu_default
613 const int cpu;
614 const char *const name;
616 cpu_defaults[] =
618 { TARGET_CPU_arm2, "arm2" },
619 { TARGET_CPU_arm6, "arm6" },
620 { TARGET_CPU_arm610, "arm610" },
621 { TARGET_CPU_arm710, "arm710" },
622 { TARGET_CPU_arm7m, "arm7m" },
623 { TARGET_CPU_arm7500fe, "arm7500fe" },
624 { TARGET_CPU_arm7tdmi, "arm7tdmi" },
625 { TARGET_CPU_arm8, "arm8" },
626 { TARGET_CPU_arm810, "arm810" },
627 { TARGET_CPU_arm9, "arm9" },
628 { TARGET_CPU_strongarm, "strongarm" },
629 { TARGET_CPU_xscale, "xscale" },
630 { TARGET_CPU_ep9312, "ep9312" },
631 { TARGET_CPU_iwmmxt, "iwmmxt" },
632 { TARGET_CPU_arm926ejs, "arm926ejs" },
633 { TARGET_CPU_arm1026ejs, "arm1026ejs" },
634 { TARGET_CPU_arm1136js, "arm1136js" },
635 { TARGET_CPU_arm1136jfs, "arm1136jfs" },
636 { TARGET_CPU_generic, "arm" },
637 { 0, 0 }
639 const struct cpu_default * def;
641 /* Find the default. */
642 for (def = cpu_defaults; def->name; def++)
643 if (def->cpu == TARGET_CPU_DEFAULT)
644 break;
646 /* Make sure we found the default CPU. */
647 if (def->name == NULL)
648 abort ();
650 /* Find the default CPU's flags. */
651 for (sel = all_cores; sel->name != NULL; sel++)
652 if (streq (def->name, sel->name))
653 break;
655 if (sel->name == NULL)
656 abort ();
658 insn_flags = sel->flags;
660 /* Now check to see if the user has specified some command line
661 switch that require certain abilities from the cpu. */
662 sought = 0;
664 if (TARGET_INTERWORK || TARGET_THUMB)
666 sought |= (FL_THUMB | FL_MODE32);
668 /* Force apcs-32 to be used for interworking. */
669 target_flags |= ARM_FLAG_APCS_32;
671 /* There are no ARM processors that support both APCS-26 and
672 interworking. Therefore we force FL_MODE26 to be removed
673 from insn_flags here (if it was set), so that the search
674 below will always be able to find a compatible processor. */
675 insn_flags &= ~FL_MODE26;
677 else if (!TARGET_APCS_32)
678 sought |= FL_MODE26;
680 if (sought != 0 && ((sought & insn_flags) != sought))
682 /* Try to locate a CPU type that supports all of the abilities
683 of the default CPU, plus the extra abilities requested by
684 the user. */
685 for (sel = all_cores; sel->name != NULL; sel++)
686 if ((sel->flags & sought) == (sought | insn_flags))
687 break;
689 if (sel->name == NULL)
691 unsigned current_bit_count = 0;
692 const struct processors * best_fit = NULL;
694 /* Ideally we would like to issue an error message here
695 saying that it was not possible to find a CPU compatible
696 with the default CPU, but which also supports the command
697 line options specified by the programmer, and so they
698 ought to use the -mcpu=<name> command line option to
699 override the default CPU type.
701 Unfortunately this does not work with multilibing. We
702 need to be able to support multilibs for -mapcs-26 and for
703 -mthumb-interwork and there is no CPU that can support both
704 options. Instead if we cannot find a cpu that has both the
705 characteristics of the default cpu and the given command line
706 options we scan the array again looking for a best match. */
707 for (sel = all_cores; sel->name != NULL; sel++)
708 if ((sel->flags & sought) == sought)
710 unsigned count;
712 count = bit_count (sel->flags & insn_flags);
714 if (count >= current_bit_count)
716 best_fit = sel;
717 current_bit_count = count;
721 if (best_fit == NULL)
722 abort ();
723 else
724 sel = best_fit;
727 insn_flags = sel->flags;
729 if (arm_tune == arm_none)
730 arm_tune = (enum processor_type) (sel - all_cores);
733 /* The processor for which we should tune should now have been
734 chosen. */
735 if (arm_tune == arm_none)
736 abort ();
738 tune_flags = all_cores[(int)arm_tune].flags;
739 targetm.rtx_costs = all_cores[(int)arm_tune].rtx_costs;
741 /* Make sure that the processor choice does not conflict with any of the
742 other command line choices. */
743 if (TARGET_APCS_32 && !(insn_flags & FL_MODE32))
745 /* If APCS-32 was not the default then it must have been set by the
746 user, so issue a warning message. If the user has specified
747 "-mapcs-32 -mcpu=arm2" then we loose here. */
748 if ((TARGET_DEFAULT & ARM_FLAG_APCS_32) == 0)
749 warning ("target CPU does not support APCS-32" );
750 target_flags &= ~ARM_FLAG_APCS_32;
752 else if (!TARGET_APCS_32 && !(insn_flags & FL_MODE26))
754 warning ("target CPU does not support APCS-26" );
755 target_flags |= ARM_FLAG_APCS_32;
758 if (TARGET_INTERWORK && !(insn_flags & FL_THUMB))
760 warning ("target CPU does not support interworking" );
761 target_flags &= ~ARM_FLAG_INTERWORK;
764 if (TARGET_THUMB && !(insn_flags & FL_THUMB))
766 warning ("target CPU does not support THUMB instructions");
767 target_flags &= ~ARM_FLAG_THUMB;
770 if (TARGET_APCS_FRAME && TARGET_THUMB)
772 /* warning ("ignoring -mapcs-frame because -mthumb was used"); */
773 target_flags &= ~ARM_FLAG_APCS_FRAME;
776 /* TARGET_BACKTRACE calls leaf_function_p, which causes a crash if done
777 from here where no function is being compiled currently. */
778 if ((target_flags & (THUMB_FLAG_LEAF_BACKTRACE | THUMB_FLAG_BACKTRACE))
779 && TARGET_ARM)
780 warning ("enabling backtrace support is only meaningful when compiling for the Thumb");
782 if (TARGET_ARM && TARGET_CALLEE_INTERWORKING)
783 warning ("enabling callee interworking support is only meaningful when compiling for the Thumb");
785 if (TARGET_ARM && TARGET_CALLER_INTERWORKING)
786 warning ("enabling caller interworking support is only meaningful when compiling for the Thumb");
788 /* If interworking is enabled then APCS-32 must be selected as well. */
789 if (TARGET_INTERWORK)
791 if (!TARGET_APCS_32)
792 warning ("interworking forces APCS-32 to be used" );
793 target_flags |= ARM_FLAG_APCS_32;
796 if (TARGET_APCS_STACK && !TARGET_APCS_FRAME)
798 warning ("-mapcs-stack-check incompatible with -mno-apcs-frame");
799 target_flags |= ARM_FLAG_APCS_FRAME;
802 if (TARGET_POKE_FUNCTION_NAME)
803 target_flags |= ARM_FLAG_APCS_FRAME;
805 if (TARGET_APCS_REENT && flag_pic)
806 error ("-fpic and -mapcs-reent are incompatible");
808 if (TARGET_APCS_REENT)
809 warning ("APCS reentrant code not supported. Ignored");
811 /* If this target is normally configured to use APCS frames, warn if they
812 are turned off and debugging is turned on. */
813 if (TARGET_ARM
814 && write_symbols != NO_DEBUG
815 && !TARGET_APCS_FRAME
816 && (TARGET_DEFAULT & ARM_FLAG_APCS_FRAME))
817 warning ("-g with -mno-apcs-frame may not give sensible debugging");
819 /* If stack checking is disabled, we can use r10 as the PIC register,
820 which keeps r9 available. */
821 if (flag_pic)
822 arm_pic_register = TARGET_APCS_STACK ? 9 : 10;
824 if (TARGET_APCS_FLOAT)
825 warning ("passing floating point arguments in fp regs not yet supported");
827 /* Initialize boolean versions of the flags, for use in the arm.md file. */
828 arm_arch3m = (insn_flags & FL_ARCH3M) != 0;
829 arm_arch4 = (insn_flags & FL_ARCH4) != 0;
830 arm_arch5 = (insn_flags & FL_ARCH5) != 0;
831 arm_arch5e = (insn_flags & FL_ARCH5E) != 0;
832 arm_arch6 = (insn_flags & FL_ARCH6) != 0;
833 arm_arch_xscale = (insn_flags & FL_XSCALE) != 0;
835 arm_ld_sched = (tune_flags & FL_LDSCHED) != 0;
836 arm_is_strong = (tune_flags & FL_STRONG) != 0;
837 thumb_code = (TARGET_ARM == 0);
838 arm_is_6_or_7 = (((tune_flags & (FL_MODE26 | FL_MODE32))
839 && !(tune_flags & FL_ARCH4))) != 0;
840 arm_tune_xscale = (tune_flags & FL_XSCALE) != 0;
841 arm_arch_iwmmxt = (insn_flags & FL_IWMMXT) != 0;
843 if (target_abi_name)
845 for (i = 0; i < ARRAY_SIZE (arm_all_abis); i++)
847 if (streq (arm_all_abis[i].name, target_abi_name))
849 arm_abi = arm_all_abis[i].abi_type;
850 break;
853 if (i == ARRAY_SIZE (arm_all_abis))
854 error ("invalid ABI option: -mabi=%s", target_abi_name);
856 else
857 arm_abi = ARM_DEFAULT_ABI;
859 if (TARGET_IWMMXT && !ARM_DOUBLEWORD_ALIGN)
860 error ("iwmmxt requires an AAPCS compatible ABI for proper operation");
862 if (TARGET_IWMMXT_ABI && !TARGET_IWMMXT)
863 error ("iwmmxt abi requires an iwmmxt capable cpu");
865 arm_fp_model = ARM_FP_MODEL_UNKNOWN;
866 if (target_fpu_name == NULL && target_fpe_name != NULL)
868 if (streq (target_fpe_name, "2"))
869 target_fpu_name = "fpe2";
870 else if (streq (target_fpe_name, "3"))
871 target_fpu_name = "fpe3";
872 else
873 error ("invalid floating point emulation option: -mfpe=%s",
874 target_fpe_name);
876 if (target_fpu_name != NULL)
878 /* The user specified a FPU. */
879 for (i = 0; i < ARRAY_SIZE (all_fpus); i++)
881 if (streq (all_fpus[i].name, target_fpu_name))
883 arm_fpu_arch = all_fpus[i].fpu;
884 arm_fpu_tune = arm_fpu_arch;
885 arm_fp_model = fp_model_for_fpu[arm_fpu_arch];
886 break;
889 if (arm_fp_model == ARM_FP_MODEL_UNKNOWN)
890 error ("invalid floating point option: -mfpu=%s", target_fpu_name);
892 else
894 #ifdef FPUTYPE_DEFAULT
895 /* Use the default is it is specified for this platform. */
896 arm_fpu_arch = FPUTYPE_DEFAULT;
897 arm_fpu_tune = FPUTYPE_DEFAULT;
898 #else
899 /* Pick one based on CPU type. */
900 if ((insn_flags & FL_VFP) != 0)
901 arm_fpu_arch = FPUTYPE_VFP;
902 else if (insn_flags & FL_CIRRUS)
903 arm_fpu_arch = FPUTYPE_MAVERICK;
904 else
905 arm_fpu_arch = FPUTYPE_FPA_EMU2;
906 #endif
907 if (tune_flags & FL_CO_PROC && arm_fpu_arch == FPUTYPE_FPA_EMU2)
908 arm_fpu_tune = FPUTYPE_FPA;
909 else
910 arm_fpu_tune = arm_fpu_arch;
911 arm_fp_model = fp_model_for_fpu[arm_fpu_arch];
912 if (arm_fp_model == ARM_FP_MODEL_UNKNOWN)
913 abort ();
916 if (target_float_abi_name != NULL)
918 /* The user specified a FP ABI. */
919 for (i = 0; i < ARRAY_SIZE (all_float_abis); i++)
921 if (streq (all_float_abis[i].name, target_float_abi_name))
923 arm_float_abi = all_float_abis[i].abi_type;
924 break;
927 if (i == ARRAY_SIZE (all_float_abis))
928 error ("invalid floating point abi: -mfloat-abi=%s",
929 target_float_abi_name);
931 else
933 /* Use soft-float target flag. */
934 if (target_flags & ARM_FLAG_SOFT_FLOAT)
935 arm_float_abi = ARM_FLOAT_ABI_SOFT;
936 else
937 arm_float_abi = ARM_FLOAT_ABI_HARD;
940 if (arm_float_abi == ARM_FLOAT_ABI_SOFTFP)
941 sorry ("-mfloat-abi=softfp");
942 /* If soft-float is specified then don't use FPU. */
943 if (TARGET_SOFT_FLOAT)
944 arm_fpu_arch = FPUTYPE_NONE;
946 /* For arm2/3 there is no need to do any scheduling if there is only
947 a floating point emulator, or we are doing software floating-point. */
948 if ((TARGET_SOFT_FLOAT
949 || arm_fpu_tune == FPUTYPE_FPA_EMU2
950 || arm_fpu_tune == FPUTYPE_FPA_EMU3)
951 && (tune_flags & FL_MODE32) == 0)
952 flag_schedule_insns = flag_schedule_insns_after_reload = 0;
954 arm_prgmode = TARGET_APCS_32 ? PROG_MODE_PROG32 : PROG_MODE_PROG26;
956 /* Override the default structure alignment for AAPCS ABI. */
957 if (arm_abi == ARM_ABI_AAPCS)
958 arm_structure_size_boundary = 8;
960 if (structure_size_string != NULL)
962 int size = strtol (structure_size_string, NULL, 0);
964 if (size == 8 || size == 32
965 || (ARM_DOUBLEWORD_ALIGN && size == 64))
966 arm_structure_size_boundary = size;
967 else
968 warning ("structure size boundary can only be set to %s",
969 ARM_DOUBLEWORD_ALIGN ? "8, 32 or 64": "8 or 32");
972 if (arm_pic_register_string != NULL)
974 int pic_register = decode_reg_name (arm_pic_register_string);
976 if (!flag_pic)
977 warning ("-mpic-register= is useless without -fpic");
979 /* Prevent the user from choosing an obviously stupid PIC register. */
980 else if (pic_register < 0 || call_used_regs[pic_register]
981 || pic_register == HARD_FRAME_POINTER_REGNUM
982 || pic_register == STACK_POINTER_REGNUM
983 || pic_register >= PC_REGNUM)
984 error ("unable to use '%s' for PIC register", arm_pic_register_string);
985 else
986 arm_pic_register = pic_register;
989 if (TARGET_THUMB && flag_schedule_insns)
991 /* Don't warn since it's on by default in -O2. */
992 flag_schedule_insns = 0;
995 if (optimize_size)
997 /* There's some dispute as to whether this should be 1 or 2. However,
998 experiments seem to show that in pathological cases a setting of
999 1 degrades less severely than a setting of 2. This could change if
1000 other parts of the compiler change their behavior. */
1001 arm_constant_limit = 1;
1003 /* If optimizing for size, bump the number of instructions that we
1004 are prepared to conditionally execute (even on a StrongARM). */
1005 max_insns_skipped = 6;
1007 else
1009 /* For processors with load scheduling, it never costs more than
1010 2 cycles to load a constant, and the load scheduler may well
1011 reduce that to 1. */
1012 if (tune_flags & FL_LDSCHED)
1013 arm_constant_limit = 1;
1015 /* On XScale the longer latency of a load makes it more difficult
1016 to achieve a good schedule, so it's faster to synthesize
1017 constants that can be done in two insns. */
1018 if (arm_tune_xscale)
1019 arm_constant_limit = 2;
1021 /* StrongARM has early execution of branches, so a sequence
1022 that is worth skipping is shorter. */
1023 if (arm_is_strong)
1024 max_insns_skipped = 3;
1027 /* Register global variables with the garbage collector. */
1028 arm_add_gc_roots ();
1031 static void
1032 arm_add_gc_roots (void)
1034 gcc_obstack_init(&minipool_obstack);
1035 minipool_startobj = (char *) obstack_alloc (&minipool_obstack, 0);
1038 /* A table of known ARM exception types.
1039 For use with the interrupt function attribute. */
1041 typedef struct
1043 const char *const arg;
1044 const unsigned long return_value;
1046 isr_attribute_arg;
1048 static const isr_attribute_arg isr_attribute_args [] =
1050 { "IRQ", ARM_FT_ISR },
1051 { "irq", ARM_FT_ISR },
1052 { "FIQ", ARM_FT_FIQ },
1053 { "fiq", ARM_FT_FIQ },
1054 { "ABORT", ARM_FT_ISR },
1055 { "abort", ARM_FT_ISR },
1056 { "ABORT", ARM_FT_ISR },
1057 { "abort", ARM_FT_ISR },
1058 { "UNDEF", ARM_FT_EXCEPTION },
1059 { "undef", ARM_FT_EXCEPTION },
1060 { "SWI", ARM_FT_EXCEPTION },
1061 { "swi", ARM_FT_EXCEPTION },
1062 { NULL, ARM_FT_NORMAL }
1065 /* Returns the (interrupt) function type of the current
1066 function, or ARM_FT_UNKNOWN if the type cannot be determined. */
1068 static unsigned long
1069 arm_isr_value (tree argument)
1071 const isr_attribute_arg * ptr;
1072 const char * arg;
1074 /* No argument - default to IRQ. */
1075 if (argument == NULL_TREE)
1076 return ARM_FT_ISR;
1078 /* Get the value of the argument. */
1079 if (TREE_VALUE (argument) == NULL_TREE
1080 || TREE_CODE (TREE_VALUE (argument)) != STRING_CST)
1081 return ARM_FT_UNKNOWN;
1083 arg = TREE_STRING_POINTER (TREE_VALUE (argument));
1085 /* Check it against the list of known arguments. */
1086 for (ptr = isr_attribute_args; ptr->arg != NULL; ptr++)
1087 if (streq (arg, ptr->arg))
1088 return ptr->return_value;
1090 /* An unrecognized interrupt type. */
1091 return ARM_FT_UNKNOWN;
1094 /* Computes the type of the current function. */
1096 static unsigned long
1097 arm_compute_func_type (void)
1099 unsigned long type = ARM_FT_UNKNOWN;
1100 tree a;
1101 tree attr;
1103 if (TREE_CODE (current_function_decl) != FUNCTION_DECL)
1104 abort ();
1106 /* Decide if the current function is volatile. Such functions
1107 never return, and many memory cycles can be saved by not storing
1108 register values that will never be needed again. This optimization
1109 was added to speed up context switching in a kernel application. */
1110 if (optimize > 0
1111 && current_function_nothrow
1112 && TREE_THIS_VOLATILE (current_function_decl))
1113 type |= ARM_FT_VOLATILE;
1115 if (current_function_needs_context)
1116 type |= ARM_FT_NESTED;
1118 attr = DECL_ATTRIBUTES (current_function_decl);
1120 a = lookup_attribute ("naked", attr);
1121 if (a != NULL_TREE)
1122 type |= ARM_FT_NAKED;
1124 if (cfun->machine->eh_epilogue_sp_ofs != NULL_RTX)
1125 type |= ARM_FT_EXCEPTION_HANDLER;
1126 else
1128 a = lookup_attribute ("isr", attr);
1129 if (a == NULL_TREE)
1130 a = lookup_attribute ("interrupt", attr);
1132 if (a == NULL_TREE)
1133 type |= TARGET_INTERWORK ? ARM_FT_INTERWORKED : ARM_FT_NORMAL;
1134 else
1135 type |= arm_isr_value (TREE_VALUE (a));
1138 return type;
1141 /* Returns the type of the current function. */
1143 unsigned long
1144 arm_current_func_type (void)
1146 if (ARM_FUNC_TYPE (cfun->machine->func_type) == ARM_FT_UNKNOWN)
1147 cfun->machine->func_type = arm_compute_func_type ();
1149 return cfun->machine->func_type;
1152 /* Return 1 if it is possible to return using a single instruction.
1153 If SIBLING is non-null, this is a test for a return before a sibling
1154 call. SIBLING is the call insn, so we can examine its register usage. */
1157 use_return_insn (int iscond, rtx sibling)
1159 int regno;
1160 unsigned int func_type;
1161 unsigned long saved_int_regs;
1162 unsigned HOST_WIDE_INT stack_adjust;
1163 arm_stack_offsets *offsets;
1165 /* Never use a return instruction before reload has run. */
1166 if (!reload_completed)
1167 return 0;
1169 func_type = arm_current_func_type ();
1171 /* Naked functions and volatile functions need special
1172 consideration. */
1173 if (func_type & (ARM_FT_VOLATILE | ARM_FT_NAKED))
1174 return 0;
1176 /* So do interrupt functions that use the frame pointer. */
1177 if (IS_INTERRUPT (func_type) && frame_pointer_needed)
1178 return 0;
1180 offsets = arm_get_frame_offsets ();
1181 stack_adjust = offsets->outgoing_args - offsets->saved_regs;
1183 /* As do variadic functions. */
1184 if (current_function_pretend_args_size
1185 || cfun->machine->uses_anonymous_args
1186 /* Or if the function calls __builtin_eh_return () */
1187 || ARM_FUNC_TYPE (func_type) == ARM_FT_EXCEPTION_HANDLER
1188 /* Or if the function calls alloca */
1189 || current_function_calls_alloca
1190 /* Or if there is a stack adjustment. However, if the stack pointer
1191 is saved on the stack, we can use a pre-incrementing stack load. */
1192 || !(stack_adjust == 0 || (frame_pointer_needed && stack_adjust == 4)))
1193 return 0;
1195 saved_int_regs = arm_compute_save_reg_mask ();
1197 /* Unfortunately, the insn
1199 ldmib sp, {..., sp, ...}
1201 triggers a bug on most SA-110 based devices, such that the stack
1202 pointer won't be correctly restored if the instruction takes a
1203 page fault. We work around this problem by popping r3 along with
1204 the other registers, since that is never slower than executing
1205 another instruction.
1207 We test for !arm_arch5 here, because code for any architecture
1208 less than this could potentially be run on one of the buggy
1209 chips. */
1210 if (stack_adjust == 4 && !arm_arch5)
1212 /* Validate that r3 is a call-clobbered register (always true in
1213 the default abi) ... */
1214 if (!call_used_regs[3])
1215 return 0;
1217 /* ... that it isn't being used for a return value (always true
1218 until we implement return-in-regs), or for a tail-call
1219 argument ... */
1220 if (sibling)
1222 if (GET_CODE (sibling) != CALL_INSN)
1223 abort ();
1225 if (find_regno_fusage (sibling, USE, 3))
1226 return 0;
1229 /* ... and that there are no call-saved registers in r0-r2
1230 (always true in the default ABI). */
1231 if (saved_int_regs & 0x7)
1232 return 0;
1235 /* Can't be done if interworking with Thumb, and any registers have been
1236 stacked. */
1237 if (TARGET_INTERWORK && saved_int_regs != 0)
1238 return 0;
1240 /* On StrongARM, conditional returns are expensive if they aren't
1241 taken and multiple registers have been stacked. */
1242 if (iscond && arm_is_strong)
1244 /* Conditional return when just the LR is stored is a simple
1245 conditional-load instruction, that's not expensive. */
1246 if (saved_int_regs != 0 && saved_int_regs != (1 << LR_REGNUM))
1247 return 0;
1249 if (flag_pic && regs_ever_live[PIC_OFFSET_TABLE_REGNUM])
1250 return 0;
1253 /* If there are saved registers but the LR isn't saved, then we need
1254 two instructions for the return. */
1255 if (saved_int_regs && !(saved_int_regs & (1 << LR_REGNUM)))
1256 return 0;
1258 /* Can't be done if any of the FPA regs are pushed,
1259 since this also requires an insn. */
1260 if (TARGET_HARD_FLOAT && TARGET_FPA)
1261 for (regno = FIRST_FPA_REGNUM; regno <= LAST_FPA_REGNUM; regno++)
1262 if (regs_ever_live[regno] && !call_used_regs[regno])
1263 return 0;
1265 /* Likewise VFP regs. */
1266 if (TARGET_HARD_FLOAT && TARGET_VFP)
1267 for (regno = FIRST_VFP_REGNUM; regno <= LAST_VFP_REGNUM; regno++)
1268 if (regs_ever_live[regno] && !call_used_regs[regno])
1269 return 0;
1271 if (TARGET_REALLY_IWMMXT)
1272 for (regno = FIRST_IWMMXT_REGNUM; regno <= LAST_IWMMXT_REGNUM; regno++)
1273 if (regs_ever_live[regno] && ! call_used_regs [regno])
1274 return 0;
1276 return 1;
1279 /* Return TRUE if int I is a valid immediate ARM constant. */
1282 const_ok_for_arm (HOST_WIDE_INT i)
1284 unsigned HOST_WIDE_INT mask = ~(unsigned HOST_WIDE_INT)0xFF;
1286 /* For machines with >32 bit HOST_WIDE_INT, the bits above bit 31 must
1287 be all zero, or all one. */
1288 if ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff) != 0
1289 && ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff)
1290 != ((~(unsigned HOST_WIDE_INT) 0)
1291 & ~(unsigned HOST_WIDE_INT) 0xffffffff)))
1292 return FALSE;
1294 /* Fast return for 0 and powers of 2 */
1295 if ((i & (i - 1)) == 0)
1296 return TRUE;
1300 if ((i & mask & (unsigned HOST_WIDE_INT) 0xffffffff) == 0)
1301 return TRUE;
1302 mask =
1303 (mask << 2) | ((mask & (unsigned HOST_WIDE_INT) 0xffffffff)
1304 >> (32 - 2)) | ~(unsigned HOST_WIDE_INT) 0xffffffff;
1306 while (mask != ~(unsigned HOST_WIDE_INT) 0xFF);
1308 return FALSE;
1311 /* Return true if I is a valid constant for the operation CODE. */
1312 static int
1313 const_ok_for_op (HOST_WIDE_INT i, enum rtx_code code)
1315 if (const_ok_for_arm (i))
1316 return 1;
1318 switch (code)
1320 case PLUS:
1321 return const_ok_for_arm (ARM_SIGN_EXTEND (-i));
1323 case MINUS: /* Should only occur with (MINUS I reg) => rsb */
1324 case XOR:
1325 case IOR:
1326 return 0;
1328 case AND:
1329 return const_ok_for_arm (ARM_SIGN_EXTEND (~i));
1331 default:
1332 abort ();
1336 /* Emit a sequence of insns to handle a large constant.
1337 CODE is the code of the operation required, it can be any of SET, PLUS,
1338 IOR, AND, XOR, MINUS;
1339 MODE is the mode in which the operation is being performed;
1340 VAL is the integer to operate on;
1341 SOURCE is the other operand (a register, or a null-pointer for SET);
1342 SUBTARGETS means it is safe to create scratch registers if that will
1343 either produce a simpler sequence, or we will want to cse the values.
1344 Return value is the number of insns emitted. */
1347 arm_split_constant (enum rtx_code code, enum machine_mode mode, rtx insn,
1348 HOST_WIDE_INT val, rtx target, rtx source, int subtargets)
1350 rtx cond;
1352 if (insn && GET_CODE (PATTERN (insn)) == COND_EXEC)
1353 cond = COND_EXEC_TEST (PATTERN (insn));
1354 else
1355 cond = NULL_RTX;
1357 if (subtargets || code == SET
1358 || (GET_CODE (target) == REG && GET_CODE (source) == REG
1359 && REGNO (target) != REGNO (source)))
1361 /* After arm_reorg has been called, we can't fix up expensive
1362 constants by pushing them into memory so we must synthesize
1363 them in-line, regardless of the cost. This is only likely to
1364 be more costly on chips that have load delay slots and we are
1365 compiling without running the scheduler (so no splitting
1366 occurred before the final instruction emission).
1368 Ref: gcc -O1 -mcpu=strongarm gcc.c-torture/compile/980506-2.c
1370 if (!after_arm_reorg
1371 && !cond
1372 && (arm_gen_constant (code, mode, NULL_RTX, val, target, source,
1373 1, 0)
1374 > arm_constant_limit + (code != SET)))
1376 if (code == SET)
1378 /* Currently SET is the only monadic value for CODE, all
1379 the rest are diadic. */
1380 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (val)));
1381 return 1;
1383 else
1385 rtx temp = subtargets ? gen_reg_rtx (mode) : target;
1387 emit_insn (gen_rtx_SET (VOIDmode, temp, GEN_INT (val)));
1388 /* For MINUS, the value is subtracted from, since we never
1389 have subtraction of a constant. */
1390 if (code == MINUS)
1391 emit_insn (gen_rtx_SET (VOIDmode, target,
1392 gen_rtx_MINUS (mode, temp, source)));
1393 else
1394 emit_insn (gen_rtx_SET (VOIDmode, target,
1395 gen_rtx_fmt_ee (code, mode, source, temp)));
1396 return 2;
1401 return arm_gen_constant (code, mode, cond, val, target, source, subtargets,
1405 static int
1406 count_insns_for_constant (HOST_WIDE_INT remainder, int i)
1408 HOST_WIDE_INT temp1;
1409 int num_insns = 0;
1412 int end;
1414 if (i <= 0)
1415 i += 32;
1416 if (remainder & (3 << (i - 2)))
1418 end = i - 8;
1419 if (end < 0)
1420 end += 32;
1421 temp1 = remainder & ((0x0ff << end)
1422 | ((i < end) ? (0xff >> (32 - end)) : 0));
1423 remainder &= ~temp1;
1424 num_insns++;
1425 i -= 6;
1427 i -= 2;
1428 } while (remainder);
1429 return num_insns;
1432 /* Emit an instruction with the indicated PATTERN. If COND is
1433 non-NULL, conditionalize the execution of the instruction on COND
1434 being true. */
1436 static void
1437 emit_constant_insn (rtx cond, rtx pattern)
1439 if (cond)
1440 pattern = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (cond), pattern);
1441 emit_insn (pattern);
1444 /* As above, but extra parameter GENERATE which, if clear, suppresses
1445 RTL generation. */
1447 static int
1448 arm_gen_constant (enum rtx_code code, enum machine_mode mode, rtx cond,
1449 HOST_WIDE_INT val, rtx target, rtx source, int subtargets,
1450 int generate)
1452 int can_invert = 0;
1453 int can_negate = 0;
1454 int can_negate_initial = 0;
1455 int can_shift = 0;
1456 int i;
1457 int num_bits_set = 0;
1458 int set_sign_bit_copies = 0;
1459 int clear_sign_bit_copies = 0;
1460 int clear_zero_bit_copies = 0;
1461 int set_zero_bit_copies = 0;
1462 int insns = 0;
1463 unsigned HOST_WIDE_INT temp1, temp2;
1464 unsigned HOST_WIDE_INT remainder = val & 0xffffffff;
1466 /* Find out which operations are safe for a given CODE. Also do a quick
1467 check for degenerate cases; these can occur when DImode operations
1468 are split. */
1469 switch (code)
1471 case SET:
1472 can_invert = 1;
1473 can_shift = 1;
1474 can_negate = 1;
1475 break;
1477 case PLUS:
1478 can_negate = 1;
1479 can_negate_initial = 1;
1480 break;
1482 case IOR:
1483 if (remainder == 0xffffffff)
1485 if (generate)
1486 emit_constant_insn (cond,
1487 gen_rtx_SET (VOIDmode, target,
1488 GEN_INT (ARM_SIGN_EXTEND (val))));
1489 return 1;
1491 if (remainder == 0)
1493 if (reload_completed && rtx_equal_p (target, source))
1494 return 0;
1495 if (generate)
1496 emit_constant_insn (cond,
1497 gen_rtx_SET (VOIDmode, target, source));
1498 return 1;
1500 break;
1502 case AND:
1503 if (remainder == 0)
1505 if (generate)
1506 emit_constant_insn (cond,
1507 gen_rtx_SET (VOIDmode, target, const0_rtx));
1508 return 1;
1510 if (remainder == 0xffffffff)
1512 if (reload_completed && rtx_equal_p (target, source))
1513 return 0;
1514 if (generate)
1515 emit_constant_insn (cond,
1516 gen_rtx_SET (VOIDmode, target, source));
1517 return 1;
1519 can_invert = 1;
1520 break;
1522 case XOR:
1523 if (remainder == 0)
1525 if (reload_completed && rtx_equal_p (target, source))
1526 return 0;
1527 if (generate)
1528 emit_constant_insn (cond,
1529 gen_rtx_SET (VOIDmode, target, source));
1530 return 1;
1532 if (remainder == 0xffffffff)
1534 if (generate)
1535 emit_constant_insn (cond,
1536 gen_rtx_SET (VOIDmode, target,
1537 gen_rtx_NOT (mode, source)));
1538 return 1;
1541 /* We don't know how to handle this yet below. */
1542 abort ();
1544 case MINUS:
1545 /* We treat MINUS as (val - source), since (source - val) is always
1546 passed as (source + (-val)). */
1547 if (remainder == 0)
1549 if (generate)
1550 emit_constant_insn (cond,
1551 gen_rtx_SET (VOIDmode, target,
1552 gen_rtx_NEG (mode, source)));
1553 return 1;
1555 if (const_ok_for_arm (val))
1557 if (generate)
1558 emit_constant_insn (cond,
1559 gen_rtx_SET (VOIDmode, target,
1560 gen_rtx_MINUS (mode, GEN_INT (val),
1561 source)));
1562 return 1;
1564 can_negate = 1;
1566 break;
1568 default:
1569 abort ();
1572 /* If we can do it in one insn get out quickly. */
1573 if (const_ok_for_arm (val)
1574 || (can_negate_initial && const_ok_for_arm (-val))
1575 || (can_invert && const_ok_for_arm (~val)))
1577 if (generate)
1578 emit_constant_insn (cond,
1579 gen_rtx_SET (VOIDmode, target,
1580 (source
1581 ? gen_rtx_fmt_ee (code, mode, source,
1582 GEN_INT (val))
1583 : GEN_INT (val))));
1584 return 1;
1587 /* Calculate a few attributes that may be useful for specific
1588 optimizations. */
1589 for (i = 31; i >= 0; i--)
1591 if ((remainder & (1 << i)) == 0)
1592 clear_sign_bit_copies++;
1593 else
1594 break;
1597 for (i = 31; i >= 0; i--)
1599 if ((remainder & (1 << i)) != 0)
1600 set_sign_bit_copies++;
1601 else
1602 break;
1605 for (i = 0; i <= 31; i++)
1607 if ((remainder & (1 << i)) == 0)
1608 clear_zero_bit_copies++;
1609 else
1610 break;
1613 for (i = 0; i <= 31; i++)
1615 if ((remainder & (1 << i)) != 0)
1616 set_zero_bit_copies++;
1617 else
1618 break;
1621 switch (code)
1623 case SET:
1624 /* See if we can do this by sign_extending a constant that is known
1625 to be negative. This is a good, way of doing it, since the shift
1626 may well merge into a subsequent insn. */
1627 if (set_sign_bit_copies > 1)
1629 if (const_ok_for_arm
1630 (temp1 = ARM_SIGN_EXTEND (remainder
1631 << (set_sign_bit_copies - 1))))
1633 if (generate)
1635 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1636 emit_constant_insn (cond,
1637 gen_rtx_SET (VOIDmode, new_src,
1638 GEN_INT (temp1)));
1639 emit_constant_insn (cond,
1640 gen_ashrsi3 (target, new_src,
1641 GEN_INT (set_sign_bit_copies - 1)));
1643 return 2;
1645 /* For an inverted constant, we will need to set the low bits,
1646 these will be shifted out of harm's way. */
1647 temp1 |= (1 << (set_sign_bit_copies - 1)) - 1;
1648 if (const_ok_for_arm (~temp1))
1650 if (generate)
1652 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1653 emit_constant_insn (cond,
1654 gen_rtx_SET (VOIDmode, new_src,
1655 GEN_INT (temp1)));
1656 emit_constant_insn (cond,
1657 gen_ashrsi3 (target, new_src,
1658 GEN_INT (set_sign_bit_copies - 1)));
1660 return 2;
1664 /* See if we can generate this by setting the bottom (or the top)
1665 16 bits, and then shifting these into the other half of the
1666 word. We only look for the simplest cases, to do more would cost
1667 too much. Be careful, however, not to generate this when the
1668 alternative would take fewer insns. */
1669 if (val & 0xffff0000)
1671 temp1 = remainder & 0xffff0000;
1672 temp2 = remainder & 0x0000ffff;
1674 /* Overlaps outside this range are best done using other methods. */
1675 for (i = 9; i < 24; i++)
1677 if ((((temp2 | (temp2 << i)) & 0xffffffff) == remainder)
1678 && !const_ok_for_arm (temp2))
1680 rtx new_src = (subtargets
1681 ? (generate ? gen_reg_rtx (mode) : NULL_RTX)
1682 : target);
1683 insns = arm_gen_constant (code, mode, cond, temp2, new_src,
1684 source, subtargets, generate);
1685 source = new_src;
1686 if (generate)
1687 emit_constant_insn
1688 (cond,
1689 gen_rtx_SET
1690 (VOIDmode, target,
1691 gen_rtx_IOR (mode,
1692 gen_rtx_ASHIFT (mode, source,
1693 GEN_INT (i)),
1694 source)));
1695 return insns + 1;
1699 /* Don't duplicate cases already considered. */
1700 for (i = 17; i < 24; i++)
1702 if (((temp1 | (temp1 >> i)) == remainder)
1703 && !const_ok_for_arm (temp1))
1705 rtx new_src = (subtargets
1706 ? (generate ? gen_reg_rtx (mode) : NULL_RTX)
1707 : target);
1708 insns = arm_gen_constant (code, mode, cond, temp1, new_src,
1709 source, subtargets, generate);
1710 source = new_src;
1711 if (generate)
1712 emit_constant_insn
1713 (cond,
1714 gen_rtx_SET (VOIDmode, target,
1715 gen_rtx_IOR
1716 (mode,
1717 gen_rtx_LSHIFTRT (mode, source,
1718 GEN_INT (i)),
1719 source)));
1720 return insns + 1;
1724 break;
1726 case IOR:
1727 case XOR:
1728 /* If we have IOR or XOR, and the constant can be loaded in a
1729 single instruction, and we can find a temporary to put it in,
1730 then this can be done in two instructions instead of 3-4. */
1731 if (subtargets
1732 /* TARGET can't be NULL if SUBTARGETS is 0 */
1733 || (reload_completed && !reg_mentioned_p (target, source)))
1735 if (const_ok_for_arm (ARM_SIGN_EXTEND (~val)))
1737 if (generate)
1739 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1741 emit_constant_insn (cond,
1742 gen_rtx_SET (VOIDmode, sub,
1743 GEN_INT (val)));
1744 emit_constant_insn (cond,
1745 gen_rtx_SET (VOIDmode, target,
1746 gen_rtx_fmt_ee (code, mode,
1747 source, sub)));
1749 return 2;
1753 if (code == XOR)
1754 break;
1756 if (set_sign_bit_copies > 8
1757 && (val & (-1 << (32 - set_sign_bit_copies))) == val)
1759 if (generate)
1761 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1762 rtx shift = GEN_INT (set_sign_bit_copies);
1764 emit_constant_insn
1765 (cond,
1766 gen_rtx_SET (VOIDmode, sub,
1767 gen_rtx_NOT (mode,
1768 gen_rtx_ASHIFT (mode,
1769 source,
1770 shift))));
1771 emit_constant_insn
1772 (cond,
1773 gen_rtx_SET (VOIDmode, target,
1774 gen_rtx_NOT (mode,
1775 gen_rtx_LSHIFTRT (mode, sub,
1776 shift))));
1778 return 2;
1781 if (set_zero_bit_copies > 8
1782 && (remainder & ((1 << set_zero_bit_copies) - 1)) == remainder)
1784 if (generate)
1786 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1787 rtx shift = GEN_INT (set_zero_bit_copies);
1789 emit_constant_insn
1790 (cond,
1791 gen_rtx_SET (VOIDmode, sub,
1792 gen_rtx_NOT (mode,
1793 gen_rtx_LSHIFTRT (mode,
1794 source,
1795 shift))));
1796 emit_constant_insn
1797 (cond,
1798 gen_rtx_SET (VOIDmode, target,
1799 gen_rtx_NOT (mode,
1800 gen_rtx_ASHIFT (mode, sub,
1801 shift))));
1803 return 2;
1806 if (const_ok_for_arm (temp1 = ARM_SIGN_EXTEND (~val)))
1808 if (generate)
1810 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1811 emit_constant_insn (cond,
1812 gen_rtx_SET (VOIDmode, sub,
1813 gen_rtx_NOT (mode, source)));
1814 source = sub;
1815 if (subtargets)
1816 sub = gen_reg_rtx (mode);
1817 emit_constant_insn (cond,
1818 gen_rtx_SET (VOIDmode, sub,
1819 gen_rtx_AND (mode, source,
1820 GEN_INT (temp1))));
1821 emit_constant_insn (cond,
1822 gen_rtx_SET (VOIDmode, target,
1823 gen_rtx_NOT (mode, sub)));
1825 return 3;
1827 break;
1829 case AND:
1830 /* See if two shifts will do 2 or more insn's worth of work. */
1831 if (clear_sign_bit_copies >= 16 && clear_sign_bit_copies < 24)
1833 HOST_WIDE_INT shift_mask = ((0xffffffff
1834 << (32 - clear_sign_bit_copies))
1835 & 0xffffffff);
1837 if ((remainder | shift_mask) != 0xffffffff)
1839 if (generate)
1841 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1842 insns = arm_gen_constant (AND, mode, cond,
1843 remainder | shift_mask,
1844 new_src, source, subtargets, 1);
1845 source = new_src;
1847 else
1849 rtx targ = subtargets ? NULL_RTX : target;
1850 insns = arm_gen_constant (AND, mode, cond,
1851 remainder | shift_mask,
1852 targ, source, subtargets, 0);
1856 if (generate)
1858 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1859 rtx shift = GEN_INT (clear_sign_bit_copies);
1861 emit_insn (gen_ashlsi3 (new_src, source, shift));
1862 emit_insn (gen_lshrsi3 (target, new_src, shift));
1865 return insns + 2;
1868 if (clear_zero_bit_copies >= 16 && clear_zero_bit_copies < 24)
1870 HOST_WIDE_INT shift_mask = (1 << clear_zero_bit_copies) - 1;
1872 if ((remainder | shift_mask) != 0xffffffff)
1874 if (generate)
1876 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1878 insns = arm_gen_constant (AND, mode, cond,
1879 remainder | shift_mask,
1880 new_src, source, subtargets, 1);
1881 source = new_src;
1883 else
1885 rtx targ = subtargets ? NULL_RTX : target;
1887 insns = arm_gen_constant (AND, mode, cond,
1888 remainder | shift_mask,
1889 targ, source, subtargets, 0);
1893 if (generate)
1895 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1896 rtx shift = GEN_INT (clear_zero_bit_copies);
1898 emit_insn (gen_lshrsi3 (new_src, source, shift));
1899 emit_insn (gen_ashlsi3 (target, new_src, shift));
1902 return insns + 2;
1905 break;
1907 default:
1908 break;
1911 for (i = 0; i < 32; i++)
1912 if (remainder & (1 << i))
1913 num_bits_set++;
1915 if (code == AND || (can_invert && num_bits_set > 16))
1916 remainder = (~remainder) & 0xffffffff;
1917 else if (code == PLUS && num_bits_set > 16)
1918 remainder = (-remainder) & 0xffffffff;
1919 else
1921 can_invert = 0;
1922 can_negate = 0;
1925 /* Now try and find a way of doing the job in either two or three
1926 instructions.
1927 We start by looking for the largest block of zeros that are aligned on
1928 a 2-bit boundary, we then fill up the temps, wrapping around to the
1929 top of the word when we drop off the bottom.
1930 In the worst case this code should produce no more than four insns. */
1932 int best_start = 0;
1933 int best_consecutive_zeros = 0;
1935 for (i = 0; i < 32; i += 2)
1937 int consecutive_zeros = 0;
1939 if (!(remainder & (3 << i)))
1941 while ((i < 32) && !(remainder & (3 << i)))
1943 consecutive_zeros += 2;
1944 i += 2;
1946 if (consecutive_zeros > best_consecutive_zeros)
1948 best_consecutive_zeros = consecutive_zeros;
1949 best_start = i - consecutive_zeros;
1951 i -= 2;
1955 /* So long as it won't require any more insns to do so, it's
1956 desirable to emit a small constant (in bits 0...9) in the last
1957 insn. This way there is more chance that it can be combined with
1958 a later addressing insn to form a pre-indexed load or store
1959 operation. Consider:
1961 *((volatile int *)0xe0000100) = 1;
1962 *((volatile int *)0xe0000110) = 2;
1964 We want this to wind up as:
1966 mov rA, #0xe0000000
1967 mov rB, #1
1968 str rB, [rA, #0x100]
1969 mov rB, #2
1970 str rB, [rA, #0x110]
1972 rather than having to synthesize both large constants from scratch.
1974 Therefore, we calculate how many insns would be required to emit
1975 the constant starting from `best_start', and also starting from
1976 zero (ie with bit 31 first to be output). If `best_start' doesn't
1977 yield a shorter sequence, we may as well use zero. */
1978 if (best_start != 0
1979 && ((((unsigned HOST_WIDE_INT) 1) << best_start) < remainder)
1980 && (count_insns_for_constant (remainder, 0) <=
1981 count_insns_for_constant (remainder, best_start)))
1982 best_start = 0;
1984 /* Now start emitting the insns. */
1985 i = best_start;
1988 int end;
1990 if (i <= 0)
1991 i += 32;
1992 if (remainder & (3 << (i - 2)))
1994 end = i - 8;
1995 if (end < 0)
1996 end += 32;
1997 temp1 = remainder & ((0x0ff << end)
1998 | ((i < end) ? (0xff >> (32 - end)) : 0));
1999 remainder &= ~temp1;
2001 if (generate)
2003 rtx new_src, temp1_rtx;
2005 if (code == SET || code == MINUS)
2007 new_src = (subtargets ? gen_reg_rtx (mode) : target);
2008 if (can_invert && code != MINUS)
2009 temp1 = ~temp1;
2011 else
2013 if (remainder && subtargets)
2014 new_src = gen_reg_rtx (mode);
2015 else
2016 new_src = target;
2017 if (can_invert)
2018 temp1 = ~temp1;
2019 else if (can_negate)
2020 temp1 = -temp1;
2023 temp1 = trunc_int_for_mode (temp1, mode);
2024 temp1_rtx = GEN_INT (temp1);
2026 if (code == SET)
2028 else if (code == MINUS)
2029 temp1_rtx = gen_rtx_MINUS (mode, temp1_rtx, source);
2030 else
2031 temp1_rtx = gen_rtx_fmt_ee (code, mode, source, temp1_rtx);
2033 emit_constant_insn (cond,
2034 gen_rtx_SET (VOIDmode, new_src,
2035 temp1_rtx));
2036 source = new_src;
2039 if (code == SET)
2041 can_invert = 0;
2042 code = PLUS;
2044 else if (code == MINUS)
2045 code = PLUS;
2047 insns++;
2048 i -= 6;
2050 i -= 2;
2052 while (remainder);
2055 return insns;
2058 /* Canonicalize a comparison so that we are more likely to recognize it.
2059 This can be done for a few constant compares, where we can make the
2060 immediate value easier to load. */
2062 enum rtx_code
2063 arm_canonicalize_comparison (enum rtx_code code, rtx * op1)
2065 unsigned HOST_WIDE_INT i = INTVAL (*op1);
2067 switch (code)
2069 case EQ:
2070 case NE:
2071 return code;
2073 case GT:
2074 case LE:
2075 if (i != ((((unsigned HOST_WIDE_INT) 1) << (HOST_BITS_PER_WIDE_INT - 1)) - 1)
2076 && (const_ok_for_arm (i + 1) || const_ok_for_arm (-(i + 1))))
2078 *op1 = GEN_INT (i + 1);
2079 return code == GT ? GE : LT;
2081 break;
2083 case GE:
2084 case LT:
2085 if (i != (((unsigned HOST_WIDE_INT) 1) << (HOST_BITS_PER_WIDE_INT - 1))
2086 && (const_ok_for_arm (i - 1) || const_ok_for_arm (-(i - 1))))
2088 *op1 = GEN_INT (i - 1);
2089 return code == GE ? GT : LE;
2091 break;
2093 case GTU:
2094 case LEU:
2095 if (i != ~((unsigned HOST_WIDE_INT) 0)
2096 && (const_ok_for_arm (i + 1) || const_ok_for_arm (-(i + 1))))
2098 *op1 = GEN_INT (i + 1);
2099 return code == GTU ? GEU : LTU;
2101 break;
2103 case GEU:
2104 case LTU:
2105 if (i != 0
2106 && (const_ok_for_arm (i - 1) || const_ok_for_arm (-(i - 1))))
2108 *op1 = GEN_INT (i - 1);
2109 return code == GEU ? GTU : LEU;
2111 break;
2113 default:
2114 abort ();
2117 return code;
2120 /* Decide whether a type should be returned in memory (true)
2121 or in a register (false). This is called by the macro
2122 RETURN_IN_MEMORY. */
2124 arm_return_in_memory (tree type)
2126 HOST_WIDE_INT size;
2128 if (!AGGREGATE_TYPE_P (type))
2129 /* All simple types are returned in registers. */
2130 return 0;
2132 size = int_size_in_bytes (type);
2134 if (arm_abi != ARM_ABI_APCS)
2136 /* ATPCS and later return aggregate types in memory only if they are
2137 larger than a word (or are variable size). */
2138 return (size < 0 || size > UNITS_PER_WORD);
2141 /* For the arm-wince targets we choose to be compatible with Microsoft's
2142 ARM and Thumb compilers, which always return aggregates in memory. */
2143 #ifndef ARM_WINCE
2144 /* All structures/unions bigger than one word are returned in memory.
2145 Also catch the case where int_size_in_bytes returns -1. In this case
2146 the aggregate is either huge or of variable size, and in either case
2147 we will want to return it via memory and not in a register. */
2148 if (size < 0 || size > UNITS_PER_WORD)
2149 return 1;
2151 if (TREE_CODE (type) == RECORD_TYPE)
2153 tree field;
2155 /* For a struct the APCS says that we only return in a register
2156 if the type is 'integer like' and every addressable element
2157 has an offset of zero. For practical purposes this means
2158 that the structure can have at most one non bit-field element
2159 and that this element must be the first one in the structure. */
2161 /* Find the first field, ignoring non FIELD_DECL things which will
2162 have been created by C++. */
2163 for (field = TYPE_FIELDS (type);
2164 field && TREE_CODE (field) != FIELD_DECL;
2165 field = TREE_CHAIN (field))
2166 continue;
2168 if (field == NULL)
2169 return 0; /* An empty structure. Allowed by an extension to ANSI C. */
2171 /* Check that the first field is valid for returning in a register. */
2173 /* ... Floats are not allowed */
2174 if (FLOAT_TYPE_P (TREE_TYPE (field)))
2175 return 1;
2177 /* ... Aggregates that are not themselves valid for returning in
2178 a register are not allowed. */
2179 if (RETURN_IN_MEMORY (TREE_TYPE (field)))
2180 return 1;
2182 /* Now check the remaining fields, if any. Only bitfields are allowed,
2183 since they are not addressable. */
2184 for (field = TREE_CHAIN (field);
2185 field;
2186 field = TREE_CHAIN (field))
2188 if (TREE_CODE (field) != FIELD_DECL)
2189 continue;
2191 if (!DECL_BIT_FIELD_TYPE (field))
2192 return 1;
2195 return 0;
2198 if (TREE_CODE (type) == UNION_TYPE)
2200 tree field;
2202 /* Unions can be returned in registers if every element is
2203 integral, or can be returned in an integer register. */
2204 for (field = TYPE_FIELDS (type);
2205 field;
2206 field = TREE_CHAIN (field))
2208 if (TREE_CODE (field) != FIELD_DECL)
2209 continue;
2211 if (FLOAT_TYPE_P (TREE_TYPE (field)))
2212 return 1;
2214 if (RETURN_IN_MEMORY (TREE_TYPE (field)))
2215 return 1;
2218 return 0;
2220 #endif /* not ARM_WINCE */
2222 /* Return all other types in memory. */
2223 return 1;
2226 /* Indicate whether or not words of a double are in big-endian order. */
2229 arm_float_words_big_endian (void)
2231 if (TARGET_MAVERICK)
2232 return 0;
2234 /* For FPA, float words are always big-endian. For VFP, floats words
2235 follow the memory system mode. */
2237 if (TARGET_FPA)
2239 return 1;
2242 if (TARGET_VFP)
2243 return (TARGET_BIG_END ? 1 : 0);
2245 return 1;
2248 /* Initialize a variable CUM of type CUMULATIVE_ARGS
2249 for a call to a function whose data type is FNTYPE.
2250 For a library call, FNTYPE is NULL. */
2251 void
2252 arm_init_cumulative_args (CUMULATIVE_ARGS *pcum, tree fntype,
2253 rtx libname ATTRIBUTE_UNUSED,
2254 tree fndecl ATTRIBUTE_UNUSED)
2256 /* On the ARM, the offset starts at 0. */
2257 pcum->nregs = ((fntype && aggregate_value_p (TREE_TYPE (fntype), fntype)) ? 1 : 0);
2258 pcum->iwmmxt_nregs = 0;
2259 pcum->can_split = true;
2261 pcum->call_cookie = CALL_NORMAL;
2263 if (TARGET_LONG_CALLS)
2264 pcum->call_cookie = CALL_LONG;
2266 /* Check for long call/short call attributes. The attributes
2267 override any command line option. */
2268 if (fntype)
2270 if (lookup_attribute ("short_call", TYPE_ATTRIBUTES (fntype)))
2271 pcum->call_cookie = CALL_SHORT;
2272 else if (lookup_attribute ("long_call", TYPE_ATTRIBUTES (fntype)))
2273 pcum->call_cookie = CALL_LONG;
2276 /* Varargs vectors are treated the same as long long.
2277 named_count avoids having to change the way arm handles 'named' */
2278 pcum->named_count = 0;
2279 pcum->nargs = 0;
2281 if (TARGET_REALLY_IWMMXT && fntype)
2283 tree fn_arg;
2285 for (fn_arg = TYPE_ARG_TYPES (fntype);
2286 fn_arg;
2287 fn_arg = TREE_CHAIN (fn_arg))
2288 pcum->named_count += 1;
2290 if (! pcum->named_count)
2291 pcum->named_count = INT_MAX;
2296 /* Return true if mode/type need doubleword alignment. */
2297 bool
2298 arm_needs_doubleword_align (enum machine_mode mode, tree type)
2300 return (mode == DImode
2301 || mode == DFmode
2302 || VECTOR_MODE_SUPPORTED_P (mode)
2303 || (mode == BLKmode
2304 && TYPE_ALIGN (type) > PARM_BOUNDARY));
2308 /* Determine where to put an argument to a function.
2309 Value is zero to push the argument on the stack,
2310 or a hard register in which to store the argument.
2312 MODE is the argument's machine mode.
2313 TYPE is the data type of the argument (as a tree).
2314 This is null for libcalls where that information may
2315 not be available.
2316 CUM is a variable of type CUMULATIVE_ARGS which gives info about
2317 the preceding args and about the function being called.
2318 NAMED is nonzero if this argument is a named parameter
2319 (otherwise it is an extra parameter matching an ellipsis). */
2322 arm_function_arg (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
2323 tree type, int named)
2325 int nregs;
2327 /* Varargs vectors are treated the same as long long.
2328 named_count avoids having to change the way arm handles 'named' */
2329 if (TARGET_IWMMXT_ABI
2330 && VECTOR_MODE_SUPPORTED_P (mode)
2331 && pcum->named_count > pcum->nargs + 1)
2333 if (pcum->iwmmxt_nregs <= 9)
2334 return gen_rtx_REG (mode, pcum->iwmmxt_nregs + FIRST_IWMMXT_REGNUM);
2335 else
2337 pcum->can_split = false;
2338 return NULL_RTX;
2342 /* Put doubleword aligned quantities in even register pairs. */
2343 if (pcum->nregs & 1
2344 && ARM_DOUBLEWORD_ALIGN
2345 && arm_needs_doubleword_align (mode, type))
2346 pcum->nregs++;
2348 if (mode == VOIDmode)
2349 /* Compute operand 2 of the call insn. */
2350 return GEN_INT (pcum->call_cookie);
2352 /* Only allow splitting an arg between regs and memory if all preceding
2353 args were allocated to regs. For args passed by reference we only count
2354 the reference pointer. */
2355 if (pcum->can_split)
2356 nregs = 1;
2357 else
2358 nregs = ARM_NUM_REGS2 (mode, type);
2360 if (!named || pcum->nregs + nregs > NUM_ARG_REGS)
2361 return NULL_RTX;
2363 return gen_rtx_REG (mode, pcum->nregs);
2366 /* Variable sized types are passed by reference. This is a GCC
2367 extension to the ARM ABI. */
2370 arm_function_arg_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
2371 enum machine_mode mode ATTRIBUTE_UNUSED,
2372 tree type, int named ATTRIBUTE_UNUSED)
2374 return type && TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST;
2377 /* Implement va_arg. */
2380 arm_va_arg (tree valist, tree type)
2382 int align;
2384 /* Variable sized types are passed by reference. */
2385 if (TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
2387 rtx addr = std_expand_builtin_va_arg (valist, build_pointer_type (type));
2388 return gen_rtx_MEM (ptr_mode, force_reg (Pmode, addr));
2391 align = FUNCTION_ARG_BOUNDARY (TYPE_MODE (type), type);
2392 if (align > PARM_BOUNDARY)
2394 tree mask;
2395 tree t;
2397 /* Maintain 64-bit alignment of the valist pointer by
2398 constructing: valist = ((valist + (8 - 1)) & -8). */
2399 mask = build_int_2 (- (align / BITS_PER_UNIT), -1);
2400 t = build_int_2 ((align / BITS_PER_UNIT) - 1, 0);
2401 t = build (PLUS_EXPR, TREE_TYPE (valist), valist, t);
2402 t = build (BIT_AND_EXPR, TREE_TYPE (t), t, mask);
2403 t = build (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
2404 TREE_SIDE_EFFECTS (t) = 1;
2405 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
2407 /* This is to stop the combine pass optimizing
2408 away the alignment adjustment. */
2409 mark_reg_pointer (arg_pointer_rtx, PARM_BOUNDARY);
2412 return std_expand_builtin_va_arg (valist, type);
2415 /* Encode the current state of the #pragma [no_]long_calls. */
2416 typedef enum
2418 OFF, /* No #pramgma [no_]long_calls is in effect. */
2419 LONG, /* #pragma long_calls is in effect. */
2420 SHORT /* #pragma no_long_calls is in effect. */
2421 } arm_pragma_enum;
2423 static arm_pragma_enum arm_pragma_long_calls = OFF;
2425 void
2426 arm_pr_long_calls (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2428 arm_pragma_long_calls = LONG;
2431 void
2432 arm_pr_no_long_calls (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2434 arm_pragma_long_calls = SHORT;
2437 void
2438 arm_pr_long_calls_off (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2440 arm_pragma_long_calls = OFF;
2443 /* Table of machine attributes. */
2444 const struct attribute_spec arm_attribute_table[] =
2446 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
2447 /* Function calls made to this symbol must be done indirectly, because
2448 it may lie outside of the 26 bit addressing range of a normal function
2449 call. */
2450 { "long_call", 0, 0, false, true, true, NULL },
2451 /* Whereas these functions are always known to reside within the 26 bit
2452 addressing range. */
2453 { "short_call", 0, 0, false, true, true, NULL },
2454 /* Interrupt Service Routines have special prologue and epilogue requirements. */
2455 { "isr", 0, 1, false, false, false, arm_handle_isr_attribute },
2456 { "interrupt", 0, 1, false, false, false, arm_handle_isr_attribute },
2457 { "naked", 0, 0, true, false, false, arm_handle_fndecl_attribute },
2458 #ifdef ARM_PE
2459 /* ARM/PE has three new attributes:
2460 interfacearm - ?
2461 dllexport - for exporting a function/variable that will live in a dll
2462 dllimport - for importing a function/variable from a dll
2464 Microsoft allows multiple declspecs in one __declspec, separating
2465 them with spaces. We do NOT support this. Instead, use __declspec
2466 multiple times.
2468 { "dllimport", 0, 0, true, false, false, NULL },
2469 { "dllexport", 0, 0, true, false, false, NULL },
2470 { "interfacearm", 0, 0, true, false, false, arm_handle_fndecl_attribute },
2471 #endif
2472 { NULL, 0, 0, false, false, false, NULL }
2475 /* Handle an attribute requiring a FUNCTION_DECL;
2476 arguments as in struct attribute_spec.handler. */
2477 static tree
2478 arm_handle_fndecl_attribute (tree *node, tree name, tree args ATTRIBUTE_UNUSED,
2479 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
2481 if (TREE_CODE (*node) != FUNCTION_DECL)
2483 warning ("`%s' attribute only applies to functions",
2484 IDENTIFIER_POINTER (name));
2485 *no_add_attrs = true;
2488 return NULL_TREE;
2491 /* Handle an "interrupt" or "isr" attribute;
2492 arguments as in struct attribute_spec.handler. */
2493 static tree
2494 arm_handle_isr_attribute (tree *node, tree name, tree args, int flags,
2495 bool *no_add_attrs)
2497 if (DECL_P (*node))
2499 if (TREE_CODE (*node) != FUNCTION_DECL)
2501 warning ("`%s' attribute only applies to functions",
2502 IDENTIFIER_POINTER (name));
2503 *no_add_attrs = true;
2505 /* FIXME: the argument if any is checked for type attributes;
2506 should it be checked for decl ones? */
2508 else
2510 if (TREE_CODE (*node) == FUNCTION_TYPE
2511 || TREE_CODE (*node) == METHOD_TYPE)
2513 if (arm_isr_value (args) == ARM_FT_UNKNOWN)
2515 warning ("`%s' attribute ignored", IDENTIFIER_POINTER (name));
2516 *no_add_attrs = true;
2519 else if (TREE_CODE (*node) == POINTER_TYPE
2520 && (TREE_CODE (TREE_TYPE (*node)) == FUNCTION_TYPE
2521 || TREE_CODE (TREE_TYPE (*node)) == METHOD_TYPE)
2522 && arm_isr_value (args) != ARM_FT_UNKNOWN)
2524 *node = build_type_copy (*node);
2525 TREE_TYPE (*node) = build_type_attribute_variant
2526 (TREE_TYPE (*node),
2527 tree_cons (name, args, TYPE_ATTRIBUTES (TREE_TYPE (*node))));
2528 *no_add_attrs = true;
2530 else
2532 /* Possibly pass this attribute on from the type to a decl. */
2533 if (flags & ((int) ATTR_FLAG_DECL_NEXT
2534 | (int) ATTR_FLAG_FUNCTION_NEXT
2535 | (int) ATTR_FLAG_ARRAY_NEXT))
2537 *no_add_attrs = true;
2538 return tree_cons (name, args, NULL_TREE);
2540 else
2542 warning ("`%s' attribute ignored", IDENTIFIER_POINTER (name));
2547 return NULL_TREE;
2550 /* Return 0 if the attributes for two types are incompatible, 1 if they
2551 are compatible, and 2 if they are nearly compatible (which causes a
2552 warning to be generated). */
2553 static int
2554 arm_comp_type_attributes (tree type1, tree type2)
2556 int l1, l2, s1, s2;
2558 /* Check for mismatch of non-default calling convention. */
2559 if (TREE_CODE (type1) != FUNCTION_TYPE)
2560 return 1;
2562 /* Check for mismatched call attributes. */
2563 l1 = lookup_attribute ("long_call", TYPE_ATTRIBUTES (type1)) != NULL;
2564 l2 = lookup_attribute ("long_call", TYPE_ATTRIBUTES (type2)) != NULL;
2565 s1 = lookup_attribute ("short_call", TYPE_ATTRIBUTES (type1)) != NULL;
2566 s2 = lookup_attribute ("short_call", TYPE_ATTRIBUTES (type2)) != NULL;
2568 /* Only bother to check if an attribute is defined. */
2569 if (l1 | l2 | s1 | s2)
2571 /* If one type has an attribute, the other must have the same attribute. */
2572 if ((l1 != l2) || (s1 != s2))
2573 return 0;
2575 /* Disallow mixed attributes. */
2576 if ((l1 & s2) || (l2 & s1))
2577 return 0;
2580 /* Check for mismatched ISR attribute. */
2581 l1 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type1)) != NULL;
2582 if (! l1)
2583 l1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type1)) != NULL;
2584 l2 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type2)) != NULL;
2585 if (! l2)
2586 l1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type2)) != NULL;
2587 if (l1 != l2)
2588 return 0;
2590 return 1;
2593 /* Encode long_call or short_call attribute by prefixing
2594 symbol name in DECL with a special character FLAG. */
2595 void
2596 arm_encode_call_attribute (tree decl, int flag)
2598 const char * str = XSTR (XEXP (DECL_RTL (decl), 0), 0);
2599 int len = strlen (str);
2600 char * newstr;
2602 /* Do not allow weak functions to be treated as short call. */
2603 if (DECL_WEAK (decl) && flag == SHORT_CALL_FLAG_CHAR)
2604 return;
2606 newstr = alloca (len + 2);
2607 newstr[0] = flag;
2608 strcpy (newstr + 1, str);
2610 newstr = (char *) ggc_alloc_string (newstr, len + 1);
2611 XSTR (XEXP (DECL_RTL (decl), 0), 0) = newstr;
2614 /* Assigns default attributes to newly defined type. This is used to
2615 set short_call/long_call attributes for function types of
2616 functions defined inside corresponding #pragma scopes. */
2617 static void
2618 arm_set_default_type_attributes (tree type)
2620 /* Add __attribute__ ((long_call)) to all functions, when
2621 inside #pragma long_calls or __attribute__ ((short_call)),
2622 when inside #pragma no_long_calls. */
2623 if (TREE_CODE (type) == FUNCTION_TYPE || TREE_CODE (type) == METHOD_TYPE)
2625 tree type_attr_list, attr_name;
2626 type_attr_list = TYPE_ATTRIBUTES (type);
2628 if (arm_pragma_long_calls == LONG)
2629 attr_name = get_identifier ("long_call");
2630 else if (arm_pragma_long_calls == SHORT)
2631 attr_name = get_identifier ("short_call");
2632 else
2633 return;
2635 type_attr_list = tree_cons (attr_name, NULL_TREE, type_attr_list);
2636 TYPE_ATTRIBUTES (type) = type_attr_list;
2640 /* Return 1 if the operand is a SYMBOL_REF for a function known to be
2641 defined within the current compilation unit. If this cannot be
2642 determined, then 0 is returned. */
2643 static int
2644 current_file_function_operand (rtx sym_ref)
2646 /* This is a bit of a fib. A function will have a short call flag
2647 applied to its name if it has the short call attribute, or it has
2648 already been defined within the current compilation unit. */
2649 if (ENCODED_SHORT_CALL_ATTR_P (XSTR (sym_ref, 0)))
2650 return 1;
2652 /* The current function is always defined within the current compilation
2653 unit. if it s a weak definition however, then this may not be the real
2654 definition of the function, and so we have to say no. */
2655 if (sym_ref == XEXP (DECL_RTL (current_function_decl), 0)
2656 && !DECL_WEAK (current_function_decl))
2657 return 1;
2659 /* We cannot make the determination - default to returning 0. */
2660 return 0;
2663 /* Return nonzero if a 32 bit "long_call" should be generated for
2664 this call. We generate a long_call if the function:
2666 a. has an __attribute__((long call))
2667 or b. is within the scope of a #pragma long_calls
2668 or c. the -mlong-calls command line switch has been specified
2670 However we do not generate a long call if the function:
2672 d. has an __attribute__ ((short_call))
2673 or e. is inside the scope of a #pragma no_long_calls
2674 or f. has an __attribute__ ((section))
2675 or g. is defined within the current compilation unit.
2677 This function will be called by C fragments contained in the machine
2678 description file. CALL_REF and CALL_COOKIE correspond to the matched
2679 rtl operands. CALL_SYMBOL is used to distinguish between
2680 two different callers of the function. It is set to 1 in the
2681 "call_symbol" and "call_symbol_value" patterns and to 0 in the "call"
2682 and "call_value" patterns. This is because of the difference in the
2683 SYM_REFs passed by these patterns. */
2685 arm_is_longcall_p (rtx sym_ref, int call_cookie, int call_symbol)
2687 if (!call_symbol)
2689 if (GET_CODE (sym_ref) != MEM)
2690 return 0;
2692 sym_ref = XEXP (sym_ref, 0);
2695 if (GET_CODE (sym_ref) != SYMBOL_REF)
2696 return 0;
2698 if (call_cookie & CALL_SHORT)
2699 return 0;
2701 if (TARGET_LONG_CALLS && flag_function_sections)
2702 return 1;
2704 if (current_file_function_operand (sym_ref))
2705 return 0;
2707 return (call_cookie & CALL_LONG)
2708 || ENCODED_LONG_CALL_ATTR_P (XSTR (sym_ref, 0))
2709 || TARGET_LONG_CALLS;
2712 /* Return nonzero if it is ok to make a tail-call to DECL. */
2713 static bool
2714 arm_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
2716 int call_type = TARGET_LONG_CALLS ? CALL_LONG : CALL_NORMAL;
2718 if (cfun->machine->sibcall_blocked)
2719 return false;
2721 /* Never tailcall something for which we have no decl, or if we
2722 are in Thumb mode. */
2723 if (decl == NULL || TARGET_THUMB)
2724 return false;
2726 /* Get the calling method. */
2727 if (lookup_attribute ("short_call", TYPE_ATTRIBUTES (TREE_TYPE (decl))))
2728 call_type = CALL_SHORT;
2729 else if (lookup_attribute ("long_call", TYPE_ATTRIBUTES (TREE_TYPE (decl))))
2730 call_type = CALL_LONG;
2732 /* Cannot tail-call to long calls, since these are out of range of
2733 a branch instruction. However, if not compiling PIC, we know
2734 we can reach the symbol if it is in this compilation unit. */
2735 if (call_type == CALL_LONG && (flag_pic || !TREE_ASM_WRITTEN (decl)))
2736 return false;
2738 /* If we are interworking and the function is not declared static
2739 then we can't tail-call it unless we know that it exists in this
2740 compilation unit (since it might be a Thumb routine). */
2741 if (TARGET_INTERWORK && TREE_PUBLIC (decl) && !TREE_ASM_WRITTEN (decl))
2742 return false;
2744 /* Never tailcall from an ISR routine - it needs a special exit sequence. */
2745 if (IS_INTERRUPT (arm_current_func_type ()))
2746 return false;
2748 /* Everything else is ok. */
2749 return true;
2753 /* Addressing mode support functions. */
2755 /* Return nonzero if X is a legitimate immediate operand when compiling
2756 for PIC. */
2758 legitimate_pic_operand_p (rtx x)
2760 if (CONSTANT_P (x)
2761 && flag_pic
2762 && (GET_CODE (x) == SYMBOL_REF
2763 || (GET_CODE (x) == CONST
2764 && GET_CODE (XEXP (x, 0)) == PLUS
2765 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF)))
2766 return 0;
2768 return 1;
2772 legitimize_pic_address (rtx orig, enum machine_mode mode, rtx reg)
2774 if (GET_CODE (orig) == SYMBOL_REF
2775 || GET_CODE (orig) == LABEL_REF)
2777 #ifndef AOF_ASSEMBLER
2778 rtx pic_ref, address;
2779 #endif
2780 rtx insn;
2781 int subregs = 0;
2783 if (reg == 0)
2785 if (no_new_pseudos)
2786 abort ();
2787 else
2788 reg = gen_reg_rtx (Pmode);
2790 subregs = 1;
2793 #ifdef AOF_ASSEMBLER
2794 /* The AOF assembler can generate relocations for these directly, and
2795 understands that the PIC register has to be added into the offset. */
2796 insn = emit_insn (gen_pic_load_addr_based (reg, orig));
2797 #else
2798 if (subregs)
2799 address = gen_reg_rtx (Pmode);
2800 else
2801 address = reg;
2803 if (TARGET_ARM)
2804 emit_insn (gen_pic_load_addr_arm (address, orig));
2805 else
2806 emit_insn (gen_pic_load_addr_thumb (address, orig));
2808 if ((GET_CODE (orig) == LABEL_REF
2809 || (GET_CODE (orig) == SYMBOL_REF &&
2810 SYMBOL_REF_LOCAL_P (orig)))
2811 && NEED_GOT_RELOC)
2812 pic_ref = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, address);
2813 else
2815 pic_ref = gen_rtx_MEM (Pmode,
2816 gen_rtx_PLUS (Pmode, pic_offset_table_rtx,
2817 address));
2818 RTX_UNCHANGING_P (pic_ref) = 1;
2821 insn = emit_move_insn (reg, pic_ref);
2822 #endif
2823 current_function_uses_pic_offset_table = 1;
2824 /* Put a REG_EQUAL note on this insn, so that it can be optimized
2825 by loop. */
2826 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EQUAL, orig,
2827 REG_NOTES (insn));
2828 return reg;
2830 else if (GET_CODE (orig) == CONST)
2832 rtx base, offset;
2834 if (GET_CODE (XEXP (orig, 0)) == PLUS
2835 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
2836 return orig;
2838 if (reg == 0)
2840 if (no_new_pseudos)
2841 abort ();
2842 else
2843 reg = gen_reg_rtx (Pmode);
2846 if (GET_CODE (XEXP (orig, 0)) == PLUS)
2848 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
2849 offset = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
2850 base == reg ? 0 : reg);
2852 else
2853 abort ();
2855 if (GET_CODE (offset) == CONST_INT)
2857 /* The base register doesn't really matter, we only want to
2858 test the index for the appropriate mode. */
2859 if (!arm_legitimate_index_p (mode, offset, SET, 0))
2861 if (!no_new_pseudos)
2862 offset = force_reg (Pmode, offset);
2863 else
2864 abort ();
2867 if (GET_CODE (offset) == CONST_INT)
2868 return plus_constant (base, INTVAL (offset));
2871 if (GET_MODE_SIZE (mode) > 4
2872 && (GET_MODE_CLASS (mode) == MODE_INT
2873 || TARGET_SOFT_FLOAT))
2875 emit_insn (gen_addsi3 (reg, base, offset));
2876 return reg;
2879 return gen_rtx_PLUS (Pmode, base, offset);
2882 return orig;
2885 /* Generate code to load the PIC register. PROLOGUE is true if
2886 called from arm_expand_prologue (in which case we want the
2887 generated insns at the start of the function); false if called
2888 by an exception receiver that needs the PIC register reloaded
2889 (in which case the insns are just dumped at the current location). */
2890 void
2891 arm_finalize_pic (int prologue ATTRIBUTE_UNUSED)
2893 #ifndef AOF_ASSEMBLER
2894 rtx l1, pic_tmp, pic_tmp2, seq, pic_rtx;
2895 rtx global_offset_table;
2897 if (current_function_uses_pic_offset_table == 0 || TARGET_SINGLE_PIC_BASE)
2898 return;
2900 if (!flag_pic)
2901 abort ();
2903 start_sequence ();
2904 l1 = gen_label_rtx ();
2906 global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
2907 /* On the ARM the PC register contains 'dot + 8' at the time of the
2908 addition, on the Thumb it is 'dot + 4'. */
2909 pic_tmp = plus_constant (gen_rtx_LABEL_REF (Pmode, l1), TARGET_ARM ? 8 : 4);
2910 if (GOT_PCREL)
2911 pic_tmp2 = gen_rtx_CONST (VOIDmode,
2912 gen_rtx_PLUS (Pmode, global_offset_table, pc_rtx));
2913 else
2914 pic_tmp2 = gen_rtx_CONST (VOIDmode, global_offset_table);
2916 pic_rtx = gen_rtx_CONST (Pmode, gen_rtx_MINUS (Pmode, pic_tmp2, pic_tmp));
2918 if (TARGET_ARM)
2920 emit_insn (gen_pic_load_addr_arm (pic_offset_table_rtx, pic_rtx));
2921 emit_insn (gen_pic_add_dot_plus_eight (pic_offset_table_rtx, l1));
2923 else
2925 emit_insn (gen_pic_load_addr_thumb (pic_offset_table_rtx, pic_rtx));
2926 emit_insn (gen_pic_add_dot_plus_four (pic_offset_table_rtx, l1));
2929 seq = get_insns ();
2930 end_sequence ();
2931 if (prologue)
2932 emit_insn_after (seq, get_insns ());
2933 else
2934 emit_insn (seq);
2936 /* Need to emit this whether or not we obey regdecls,
2937 since setjmp/longjmp can cause life info to screw up. */
2938 emit_insn (gen_rtx_USE (VOIDmode, pic_offset_table_rtx));
2939 #endif /* AOF_ASSEMBLER */
2942 /* Return nonzero if X is valid as an ARM state addressing register. */
2943 static int
2944 arm_address_register_rtx_p (rtx x, int strict_p)
2946 int regno;
2948 if (GET_CODE (x) != REG)
2949 return 0;
2951 regno = REGNO (x);
2953 if (strict_p)
2954 return ARM_REGNO_OK_FOR_BASE_P (regno);
2956 return (regno <= LAST_ARM_REGNUM
2957 || regno >= FIRST_PSEUDO_REGISTER
2958 || regno == FRAME_POINTER_REGNUM
2959 || regno == ARG_POINTER_REGNUM);
2962 /* Return nonzero if X is a valid ARM state address operand. */
2964 arm_legitimate_address_p (enum machine_mode mode, rtx x, RTX_CODE outer,
2965 int strict_p)
2967 if (arm_address_register_rtx_p (x, strict_p))
2968 return 1;
2970 else if (GET_CODE (x) == POST_INC || GET_CODE (x) == PRE_DEC)
2971 return arm_address_register_rtx_p (XEXP (x, 0), strict_p);
2973 else if ((GET_CODE (x) == POST_MODIFY || GET_CODE (x) == PRE_MODIFY)
2974 && GET_MODE_SIZE (mode) <= 4
2975 && arm_address_register_rtx_p (XEXP (x, 0), strict_p)
2976 && GET_CODE (XEXP (x, 1)) == PLUS
2977 && XEXP (XEXP (x, 1), 0) == XEXP (x, 0))
2978 return arm_legitimate_index_p (mode, XEXP (XEXP (x, 1), 1), outer,
2979 strict_p);
2981 /* After reload constants split into minipools will have addresses
2982 from a LABEL_REF. */
2983 else if (reload_completed
2984 && (GET_CODE (x) == LABEL_REF
2985 || (GET_CODE (x) == CONST
2986 && GET_CODE (XEXP (x, 0)) == PLUS
2987 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
2988 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
2989 return 1;
2991 else if (mode == TImode)
2992 return 0;
2994 else if (mode == DImode || (TARGET_SOFT_FLOAT && mode == DFmode))
2996 if (GET_CODE (x) == PLUS
2997 && arm_address_register_rtx_p (XEXP (x, 0), strict_p)
2998 && GET_CODE (XEXP (x, 1)) == CONST_INT)
3000 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
3002 if (val == 4 || val == -4 || val == -8)
3003 return 1;
3007 else if (TARGET_HARD_FLOAT && TARGET_VFP && mode == DFmode)
3009 if (GET_CODE (x) == PLUS
3010 && arm_address_register_rtx_p (XEXP (x, 0), strict_p)
3011 && GET_CODE (XEXP (x, 1)) == CONST_INT)
3013 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
3015 /* ??? valid arm offsets are a subset of VFP offsets.
3016 For now only allow this subset. Proper fix is to add an
3017 additional memory constraint for arm address modes.
3018 Alternatively allow full vfp addressing and let
3019 output_move_double fix it up with a sub-optimal sequence. */
3020 if (val == 4 || val == -4 || val == -8)
3021 return 1;
3025 else if (GET_CODE (x) == PLUS)
3027 rtx xop0 = XEXP (x, 0);
3028 rtx xop1 = XEXP (x, 1);
3030 return ((arm_address_register_rtx_p (xop0, strict_p)
3031 && arm_legitimate_index_p (mode, xop1, outer, strict_p))
3032 || (arm_address_register_rtx_p (xop1, strict_p)
3033 && arm_legitimate_index_p (mode, xop0, outer, strict_p)));
3036 #if 0
3037 /* Reload currently can't handle MINUS, so disable this for now */
3038 else if (GET_CODE (x) == MINUS)
3040 rtx xop0 = XEXP (x, 0);
3041 rtx xop1 = XEXP (x, 1);
3043 return (arm_address_register_rtx_p (xop0, strict_p)
3044 && arm_legitimate_index_p (mode, xop1, outer, strict_p));
3046 #endif
3048 else if (GET_MODE_CLASS (mode) != MODE_FLOAT
3049 && GET_CODE (x) == SYMBOL_REF
3050 && CONSTANT_POOL_ADDRESS_P (x)
3051 && ! (flag_pic
3052 && symbol_mentioned_p (get_pool_constant (x))))
3053 return 1;
3055 else if ((GET_CODE (x) == PRE_INC || GET_CODE (x) == POST_DEC)
3056 && (GET_MODE_SIZE (mode) <= 4)
3057 && arm_address_register_rtx_p (XEXP (x, 0), strict_p))
3058 return 1;
3060 return 0;
3063 /* Return nonzero if INDEX is valid for an address index operand in
3064 ARM state. */
3065 static int
3066 arm_legitimate_index_p (enum machine_mode mode, rtx index, RTX_CODE outer,
3067 int strict_p)
3069 HOST_WIDE_INT range;
3070 enum rtx_code code = GET_CODE (index);
3072 if (TARGET_HARD_FLOAT && TARGET_FPA && GET_MODE_CLASS (mode) == MODE_FLOAT)
3073 return (code == CONST_INT && INTVAL (index) < 1024
3074 && INTVAL (index) > -1024
3075 && (INTVAL (index) & 3) == 0);
3077 if (TARGET_HARD_FLOAT && TARGET_MAVERICK
3078 && (GET_MODE_CLASS (mode) == MODE_FLOAT || mode == DImode))
3079 return (code == CONST_INT
3080 && INTVAL (index) < 255
3081 && INTVAL (index) > -255);
3083 if (arm_address_register_rtx_p (index, strict_p)
3084 && GET_MODE_SIZE (mode) <= 4)
3085 return 1;
3087 if (TARGET_REALLY_IWMMXT && VALID_IWMMXT_REG_MODE (mode))
3088 return (code == CONST_INT
3089 && INTVAL (index) < 256
3090 && INTVAL (index) > -256);
3092 if (GET_MODE_SIZE (mode) <= 4
3093 && ! (arm_arch4
3094 && (mode == HImode
3095 || (mode == QImode && outer == SIGN_EXTEND))))
3097 if (code == MULT)
3099 rtx xiop0 = XEXP (index, 0);
3100 rtx xiop1 = XEXP (index, 1);
3102 return ((arm_address_register_rtx_p (xiop0, strict_p)
3103 && power_of_two_operand (xiop1, SImode))
3104 || (arm_address_register_rtx_p (xiop1, strict_p)
3105 && power_of_two_operand (xiop0, SImode)));
3107 else if (code == LSHIFTRT || code == ASHIFTRT
3108 || code == ASHIFT || code == ROTATERT)
3110 rtx op = XEXP (index, 1);
3112 return (arm_address_register_rtx_p (XEXP (index, 0), strict_p)
3113 && GET_CODE (op) == CONST_INT
3114 && INTVAL (op) > 0
3115 && INTVAL (op) <= 31);
3119 /* For ARM v4 we may be doing a sign-extend operation during the
3120 load. */
3121 if (arm_arch4)
3123 if (mode == HImode || (outer == SIGN_EXTEND && mode == QImode))
3124 range = 256;
3125 else
3126 range = 4096;
3128 else
3129 range = (mode == HImode) ? 4095 : 4096;
3131 return (code == CONST_INT
3132 && INTVAL (index) < range
3133 && INTVAL (index) > -range);
3136 /* Return nonzero if X is valid as a Thumb state base register. */
3137 static int
3138 thumb_base_register_rtx_p (rtx x, enum machine_mode mode, int strict_p)
3140 int regno;
3142 if (GET_CODE (x) != REG)
3143 return 0;
3145 regno = REGNO (x);
3147 if (strict_p)
3148 return THUMB_REGNO_MODE_OK_FOR_BASE_P (regno, mode);
3150 return (regno <= LAST_LO_REGNUM
3151 || regno > LAST_VIRTUAL_REGISTER
3152 || regno == FRAME_POINTER_REGNUM
3153 || (GET_MODE_SIZE (mode) >= 4
3154 && (regno == STACK_POINTER_REGNUM
3155 || regno >= FIRST_PSEUDO_REGISTER
3156 || x == hard_frame_pointer_rtx
3157 || x == arg_pointer_rtx)));
3160 /* Return nonzero if x is a legitimate index register. This is the case
3161 for any base register that can access a QImode object. */
3162 inline static int
3163 thumb_index_register_rtx_p (rtx x, int strict_p)
3165 return thumb_base_register_rtx_p (x, QImode, strict_p);
3168 /* Return nonzero if x is a legitimate Thumb-state address.
3170 The AP may be eliminated to either the SP or the FP, so we use the
3171 least common denominator, e.g. SImode, and offsets from 0 to 64.
3173 ??? Verify whether the above is the right approach.
3175 ??? Also, the FP may be eliminated to the SP, so perhaps that
3176 needs special handling also.
3178 ??? Look at how the mips16 port solves this problem. It probably uses
3179 better ways to solve some of these problems.
3181 Although it is not incorrect, we don't accept QImode and HImode
3182 addresses based on the frame pointer or arg pointer until the
3183 reload pass starts. This is so that eliminating such addresses
3184 into stack based ones won't produce impossible code. */
3186 thumb_legitimate_address_p (enum machine_mode mode, rtx x, int strict_p)
3188 /* ??? Not clear if this is right. Experiment. */
3189 if (GET_MODE_SIZE (mode) < 4
3190 && !(reload_in_progress || reload_completed)
3191 && (reg_mentioned_p (frame_pointer_rtx, x)
3192 || reg_mentioned_p (arg_pointer_rtx, x)
3193 || reg_mentioned_p (virtual_incoming_args_rtx, x)
3194 || reg_mentioned_p (virtual_outgoing_args_rtx, x)
3195 || reg_mentioned_p (virtual_stack_dynamic_rtx, x)
3196 || reg_mentioned_p (virtual_stack_vars_rtx, x)))
3197 return 0;
3199 /* Accept any base register. SP only in SImode or larger. */
3200 else if (thumb_base_register_rtx_p (x, mode, strict_p))
3201 return 1;
3203 /* This is PC relative data before arm_reorg runs. */
3204 else if (GET_MODE_SIZE (mode) >= 4 && CONSTANT_P (x)
3205 && GET_CODE (x) == SYMBOL_REF
3206 && CONSTANT_POOL_ADDRESS_P (x) && ! flag_pic)
3207 return 1;
3209 /* This is PC relative data after arm_reorg runs. */
3210 else if (GET_MODE_SIZE (mode) >= 4 && reload_completed
3211 && (GET_CODE (x) == LABEL_REF
3212 || (GET_CODE (x) == CONST
3213 && GET_CODE (XEXP (x, 0)) == PLUS
3214 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
3215 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
3216 return 1;
3218 /* Post-inc indexing only supported for SImode and larger. */
3219 else if (GET_CODE (x) == POST_INC && GET_MODE_SIZE (mode) >= 4
3220 && thumb_index_register_rtx_p (XEXP (x, 0), strict_p))
3221 return 1;
3223 else if (GET_CODE (x) == PLUS)
3225 /* REG+REG address can be any two index registers. */
3226 /* We disallow FRAME+REG addressing since we know that FRAME
3227 will be replaced with STACK, and SP relative addressing only
3228 permits SP+OFFSET. */
3229 if (GET_MODE_SIZE (mode) <= 4
3230 && XEXP (x, 0) != frame_pointer_rtx
3231 && XEXP (x, 1) != frame_pointer_rtx
3232 && thumb_index_register_rtx_p (XEXP (x, 0), strict_p)
3233 && thumb_index_register_rtx_p (XEXP (x, 1), strict_p))
3234 return 1;
3236 /* REG+const has 5-7 bit offset for non-SP registers. */
3237 else if ((thumb_index_register_rtx_p (XEXP (x, 0), strict_p)
3238 || XEXP (x, 0) == arg_pointer_rtx)
3239 && GET_CODE (XEXP (x, 1)) == CONST_INT
3240 && thumb_legitimate_offset_p (mode, INTVAL (XEXP (x, 1))))
3241 return 1;
3243 /* REG+const has 10 bit offset for SP, but only SImode and
3244 larger is supported. */
3245 /* ??? Should probably check for DI/DFmode overflow here
3246 just like GO_IF_LEGITIMATE_OFFSET does. */
3247 else if (GET_CODE (XEXP (x, 0)) == REG
3248 && REGNO (XEXP (x, 0)) == STACK_POINTER_REGNUM
3249 && GET_MODE_SIZE (mode) >= 4
3250 && GET_CODE (XEXP (x, 1)) == CONST_INT
3251 && INTVAL (XEXP (x, 1)) >= 0
3252 && INTVAL (XEXP (x, 1)) + GET_MODE_SIZE (mode) <= 1024
3253 && (INTVAL (XEXP (x, 1)) & 3) == 0)
3254 return 1;
3256 else if (GET_CODE (XEXP (x, 0)) == REG
3257 && REGNO (XEXP (x, 0)) == FRAME_POINTER_REGNUM
3258 && GET_MODE_SIZE (mode) >= 4
3259 && GET_CODE (XEXP (x, 1)) == CONST_INT
3260 && (INTVAL (XEXP (x, 1)) & 3) == 0)
3261 return 1;
3264 else if (GET_MODE_CLASS (mode) != MODE_FLOAT
3265 && GET_MODE_SIZE (mode) == 4
3266 && GET_CODE (x) == SYMBOL_REF
3267 && CONSTANT_POOL_ADDRESS_P (x)
3268 && !(flag_pic
3269 && symbol_mentioned_p (get_pool_constant (x))))
3270 return 1;
3272 return 0;
3275 /* Return nonzero if VAL can be used as an offset in a Thumb-state address
3276 instruction of mode MODE. */
3278 thumb_legitimate_offset_p (enum machine_mode mode, HOST_WIDE_INT val)
3280 switch (GET_MODE_SIZE (mode))
3282 case 1:
3283 return val >= 0 && val < 32;
3285 case 2:
3286 return val >= 0 && val < 64 && (val & 1) == 0;
3288 default:
3289 return (val >= 0
3290 && (val + GET_MODE_SIZE (mode)) <= 128
3291 && (val & 3) == 0);
3295 /* Try machine-dependent ways of modifying an illegitimate address
3296 to be legitimate. If we find one, return the new, valid address. */
3298 arm_legitimize_address (rtx x, rtx orig_x, enum machine_mode mode)
3300 if (GET_CODE (x) == PLUS)
3302 rtx xop0 = XEXP (x, 0);
3303 rtx xop1 = XEXP (x, 1);
3305 if (CONSTANT_P (xop0) && !symbol_mentioned_p (xop0))
3306 xop0 = force_reg (SImode, xop0);
3308 if (CONSTANT_P (xop1) && !symbol_mentioned_p (xop1))
3309 xop1 = force_reg (SImode, xop1);
3311 if (ARM_BASE_REGISTER_RTX_P (xop0)
3312 && GET_CODE (xop1) == CONST_INT)
3314 HOST_WIDE_INT n, low_n;
3315 rtx base_reg, val;
3316 n = INTVAL (xop1);
3318 /* VFP addressing modes actually allow greater offsets, but for
3319 now we just stick with the lowest common denominator. */
3320 if (mode == DImode
3321 || ((TARGET_SOFT_FLOAT || TARGET_VFP) && mode == DFmode))
3323 low_n = n & 0x0f;
3324 n &= ~0x0f;
3325 if (low_n > 4)
3327 n += 16;
3328 low_n -= 16;
3331 else
3333 low_n = ((mode) == TImode ? 0
3334 : n >= 0 ? (n & 0xfff) : -((-n) & 0xfff));
3335 n -= low_n;
3338 base_reg = gen_reg_rtx (SImode);
3339 val = force_operand (gen_rtx_PLUS (SImode, xop0,
3340 GEN_INT (n)), NULL_RTX);
3341 emit_move_insn (base_reg, val);
3342 x = (low_n == 0 ? base_reg
3343 : gen_rtx_PLUS (SImode, base_reg, GEN_INT (low_n)));
3345 else if (xop0 != XEXP (x, 0) || xop1 != XEXP (x, 1))
3346 x = gen_rtx_PLUS (SImode, xop0, xop1);
3349 /* XXX We don't allow MINUS any more -- see comment in
3350 arm_legitimate_address_p (). */
3351 else if (GET_CODE (x) == MINUS)
3353 rtx xop0 = XEXP (x, 0);
3354 rtx xop1 = XEXP (x, 1);
3356 if (CONSTANT_P (xop0))
3357 xop0 = force_reg (SImode, xop0);
3359 if (CONSTANT_P (xop1) && ! symbol_mentioned_p (xop1))
3360 xop1 = force_reg (SImode, xop1);
3362 if (xop0 != XEXP (x, 0) || xop1 != XEXP (x, 1))
3363 x = gen_rtx_MINUS (SImode, xop0, xop1);
3366 if (flag_pic)
3368 /* We need to find and carefully transform any SYMBOL and LABEL
3369 references; so go back to the original address expression. */
3370 rtx new_x = legitimize_pic_address (orig_x, mode, NULL_RTX);
3372 if (new_x != orig_x)
3373 x = new_x;
3376 return x;
3380 /* Try machine-dependent ways of modifying an illegitimate Thumb address
3381 to be legitimate. If we find one, return the new, valid address. */
3383 thumb_legitimize_address (rtx x, rtx orig_x, enum machine_mode mode)
3385 if (GET_CODE (x) == PLUS
3386 && GET_CODE (XEXP (x, 1)) == CONST_INT
3387 && (INTVAL (XEXP (x, 1)) >= 32 * GET_MODE_SIZE (mode)
3388 || INTVAL (XEXP (x, 1)) < 0))
3390 rtx xop0 = XEXP (x, 0);
3391 rtx xop1 = XEXP (x, 1);
3392 HOST_WIDE_INT offset = INTVAL (xop1);
3394 /* Try and fold the offset into a biasing of the base register and
3395 then offsetting that. Don't do this when optimizing for space
3396 since it can cause too many CSEs. */
3397 if (optimize_size && offset >= 0
3398 && offset < 256 + 31 * GET_MODE_SIZE (mode))
3400 HOST_WIDE_INT delta;
3402 if (offset >= 256)
3403 delta = offset - (256 - GET_MODE_SIZE (mode));
3404 else if (offset < 32 * GET_MODE_SIZE (mode) + 8)
3405 delta = 31 * GET_MODE_SIZE (mode);
3406 else
3407 delta = offset & (~31 * GET_MODE_SIZE (mode));
3409 xop0 = force_operand (plus_constant (xop0, offset - delta),
3410 NULL_RTX);
3411 x = plus_constant (xop0, delta);
3413 else if (offset < 0 && offset > -256)
3414 /* Small negative offsets are best done with a subtract before the
3415 dereference, forcing these into a register normally takes two
3416 instructions. */
3417 x = force_operand (x, NULL_RTX);
3418 else
3420 /* For the remaining cases, force the constant into a register. */
3421 xop1 = force_reg (SImode, xop1);
3422 x = gen_rtx_PLUS (SImode, xop0, xop1);
3425 else if (GET_CODE (x) == PLUS
3426 && s_register_operand (XEXP (x, 1), SImode)
3427 && !s_register_operand (XEXP (x, 0), SImode))
3429 rtx xop0 = force_operand (XEXP (x, 0), NULL_RTX);
3431 x = gen_rtx_PLUS (SImode, xop0, XEXP (x, 1));
3434 if (flag_pic)
3436 /* We need to find and carefully transform any SYMBOL and LABEL
3437 references; so go back to the original address expression. */
3438 rtx new_x = legitimize_pic_address (orig_x, mode, NULL_RTX);
3440 if (new_x != orig_x)
3441 x = new_x;
3444 return x;
3449 #define REG_OR_SUBREG_REG(X) \
3450 (GET_CODE (X) == REG \
3451 || (GET_CODE (X) == SUBREG && GET_CODE (SUBREG_REG (X)) == REG))
3453 #define REG_OR_SUBREG_RTX(X) \
3454 (GET_CODE (X) == REG ? (X) : SUBREG_REG (X))
3456 #ifndef COSTS_N_INSNS
3457 #define COSTS_N_INSNS(N) ((N) * 4 - 2)
3458 #endif
3459 static inline int
3460 thumb_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer)
3462 enum machine_mode mode = GET_MODE (x);
3464 switch (code)
3466 case ASHIFT:
3467 case ASHIFTRT:
3468 case LSHIFTRT:
3469 case ROTATERT:
3470 case PLUS:
3471 case MINUS:
3472 case COMPARE:
3473 case NEG:
3474 case NOT:
3475 return COSTS_N_INSNS (1);
3477 case MULT:
3478 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
3480 int cycles = 0;
3481 unsigned HOST_WIDE_INT i = INTVAL (XEXP (x, 1));
3483 while (i)
3485 i >>= 2;
3486 cycles++;
3488 return COSTS_N_INSNS (2) + cycles;
3490 return COSTS_N_INSNS (1) + 16;
3492 case SET:
3493 return (COSTS_N_INSNS (1)
3494 + 4 * ((GET_CODE (SET_SRC (x)) == MEM)
3495 + GET_CODE (SET_DEST (x)) == MEM));
3497 case CONST_INT:
3498 if (outer == SET)
3500 if ((unsigned HOST_WIDE_INT) INTVAL (x) < 256)
3501 return 0;
3502 if (thumb_shiftable_const (INTVAL (x)))
3503 return COSTS_N_INSNS (2);
3504 return COSTS_N_INSNS (3);
3506 else if ((outer == PLUS || outer == COMPARE)
3507 && INTVAL (x) < 256 && INTVAL (x) > -256)
3508 return 0;
3509 else if (outer == AND
3510 && INTVAL (x) < 256 && INTVAL (x) >= -256)
3511 return COSTS_N_INSNS (1);
3512 else if (outer == ASHIFT || outer == ASHIFTRT
3513 || outer == LSHIFTRT)
3514 return 0;
3515 return COSTS_N_INSNS (2);
3517 case CONST:
3518 case CONST_DOUBLE:
3519 case LABEL_REF:
3520 case SYMBOL_REF:
3521 return COSTS_N_INSNS (3);
3523 case UDIV:
3524 case UMOD:
3525 case DIV:
3526 case MOD:
3527 return 100;
3529 case TRUNCATE:
3530 return 99;
3532 case AND:
3533 case XOR:
3534 case IOR:
3535 /* XXX guess. */
3536 return 8;
3538 case ADDRESSOF:
3539 case MEM:
3540 /* XXX another guess. */
3541 /* Memory costs quite a lot for the first word, but subsequent words
3542 load at the equivalent of a single insn each. */
3543 return (10 + 4 * ((GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD)
3544 + ((GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
3545 ? 4 : 0));
3547 case IF_THEN_ELSE:
3548 /* XXX a guess. */
3549 if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
3550 return 14;
3551 return 2;
3553 case ZERO_EXTEND:
3554 /* XXX still guessing. */
3555 switch (GET_MODE (XEXP (x, 0)))
3557 case QImode:
3558 return (1 + (mode == DImode ? 4 : 0)
3559 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3561 case HImode:
3562 return (4 + (mode == DImode ? 4 : 0)
3563 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3565 case SImode:
3566 return (1 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3568 default:
3569 return 99;
3572 default:
3573 return 99;
3578 /* Worker routine for arm_rtx_costs. */
3579 static inline int
3580 arm_rtx_costs_1 (rtx x, enum rtx_code code, enum rtx_code outer)
3582 enum machine_mode mode = GET_MODE (x);
3583 enum rtx_code subcode;
3584 int extra_cost;
3586 switch (code)
3588 case MEM:
3589 /* Memory costs quite a lot for the first word, but subsequent words
3590 load at the equivalent of a single insn each. */
3591 return (10 + 4 * ((GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD)
3592 + (GET_CODE (x) == SYMBOL_REF
3593 && CONSTANT_POOL_ADDRESS_P (x) ? 4 : 0));
3595 case DIV:
3596 case MOD:
3597 case UDIV:
3598 case UMOD:
3599 return optimize_size ? COSTS_N_INSNS (2) : 100;
3601 case ROTATE:
3602 if (mode == SImode && GET_CODE (XEXP (x, 1)) == REG)
3603 return 4;
3604 /* Fall through */
3605 case ROTATERT:
3606 if (mode != SImode)
3607 return 8;
3608 /* Fall through */
3609 case ASHIFT: case LSHIFTRT: case ASHIFTRT:
3610 if (mode == DImode)
3611 return (8 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : 8)
3612 + ((GET_CODE (XEXP (x, 0)) == REG
3613 || (GET_CODE (XEXP (x, 0)) == SUBREG
3614 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG))
3615 ? 0 : 8));
3616 return (1 + ((GET_CODE (XEXP (x, 0)) == REG
3617 || (GET_CODE (XEXP (x, 0)) == SUBREG
3618 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG))
3619 ? 0 : 4)
3620 + ((GET_CODE (XEXP (x, 1)) == REG
3621 || (GET_CODE (XEXP (x, 1)) == SUBREG
3622 && GET_CODE (SUBREG_REG (XEXP (x, 1))) == REG)
3623 || (GET_CODE (XEXP (x, 1)) == CONST_INT))
3624 ? 0 : 4));
3626 case MINUS:
3627 if (mode == DImode)
3628 return (4 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 8)
3629 + ((REG_OR_SUBREG_REG (XEXP (x, 0))
3630 || (GET_CODE (XEXP (x, 0)) == CONST_INT
3631 && const_ok_for_arm (INTVAL (XEXP (x, 0)))))
3632 ? 0 : 8));
3634 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
3635 return (2 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
3636 || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
3637 && arm_const_double_rtx (XEXP (x, 1))))
3638 ? 0 : 8)
3639 + ((REG_OR_SUBREG_REG (XEXP (x, 0))
3640 || (GET_CODE (XEXP (x, 0)) == CONST_DOUBLE
3641 && arm_const_double_rtx (XEXP (x, 0))))
3642 ? 0 : 8));
3644 if (((GET_CODE (XEXP (x, 0)) == CONST_INT
3645 && const_ok_for_arm (INTVAL (XEXP (x, 0)))
3646 && REG_OR_SUBREG_REG (XEXP (x, 1))))
3647 || (((subcode = GET_CODE (XEXP (x, 1))) == ASHIFT
3648 || subcode == ASHIFTRT || subcode == LSHIFTRT
3649 || subcode == ROTATE || subcode == ROTATERT
3650 || (subcode == MULT
3651 && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
3652 && ((INTVAL (XEXP (XEXP (x, 1), 1)) &
3653 (INTVAL (XEXP (XEXP (x, 1), 1)) - 1)) == 0)))
3654 && REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 0))
3655 && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 1))
3656 || GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT)
3657 && REG_OR_SUBREG_REG (XEXP (x, 0))))
3658 return 1;
3659 /* Fall through */
3661 case PLUS:
3662 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
3663 return (2 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8)
3664 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
3665 || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
3666 && arm_const_double_rtx (XEXP (x, 1))))
3667 ? 0 : 8));
3669 /* Fall through */
3670 case AND: case XOR: case IOR:
3671 extra_cost = 0;
3673 /* Normally the frame registers will be spilt into reg+const during
3674 reload, so it is a bad idea to combine them with other instructions,
3675 since then they might not be moved outside of loops. As a compromise
3676 we allow integration with ops that have a constant as their second
3677 operand. */
3678 if ((REG_OR_SUBREG_REG (XEXP (x, 0))
3679 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0)))
3680 && GET_CODE (XEXP (x, 1)) != CONST_INT)
3681 || (REG_OR_SUBREG_REG (XEXP (x, 0))
3682 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0)))))
3683 extra_cost = 4;
3685 if (mode == DImode)
3686 return (4 + extra_cost + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8)
3687 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
3688 || (GET_CODE (XEXP (x, 1)) == CONST_INT
3689 && const_ok_for_op (INTVAL (XEXP (x, 1)), code)))
3690 ? 0 : 8));
3692 if (REG_OR_SUBREG_REG (XEXP (x, 0)))
3693 return (1 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : extra_cost)
3694 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
3695 || (GET_CODE (XEXP (x, 1)) == CONST_INT
3696 && const_ok_for_op (INTVAL (XEXP (x, 1)), code)))
3697 ? 0 : 4));
3699 else if (REG_OR_SUBREG_REG (XEXP (x, 1)))
3700 return (1 + extra_cost
3701 + ((((subcode = GET_CODE (XEXP (x, 0))) == ASHIFT
3702 || subcode == LSHIFTRT || subcode == ASHIFTRT
3703 || subcode == ROTATE || subcode == ROTATERT
3704 || (subcode == MULT
3705 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
3706 && ((INTVAL (XEXP (XEXP (x, 0), 1)) &
3707 (INTVAL (XEXP (XEXP (x, 0), 1)) - 1)) == 0)))
3708 && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 0)))
3709 && ((REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 1)))
3710 || GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT))
3711 ? 0 : 4));
3713 return 8;
3715 case MULT:
3716 /* This should have been handled by the CPU specific routines. */
3717 abort ();
3719 case TRUNCATE:
3720 if (arm_arch3m && mode == SImode
3721 && GET_CODE (XEXP (x, 0)) == LSHIFTRT
3722 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
3723 && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0))
3724 == GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)))
3725 && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == ZERO_EXTEND
3726 || GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == SIGN_EXTEND))
3727 return 8;
3728 return 99;
3730 case NEG:
3731 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
3732 return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 6);
3733 /* Fall through */
3734 case NOT:
3735 if (mode == DImode)
3736 return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4);
3738 return 1 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4);
3740 case IF_THEN_ELSE:
3741 if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
3742 return 14;
3743 return 2;
3745 case COMPARE:
3746 return 1;
3748 case ABS:
3749 return 4 + (mode == DImode ? 4 : 0);
3751 case SIGN_EXTEND:
3752 if (GET_MODE (XEXP (x, 0)) == QImode)
3753 return (4 + (mode == DImode ? 4 : 0)
3754 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3755 /* Fall through */
3756 case ZERO_EXTEND:
3757 switch (GET_MODE (XEXP (x, 0)))
3759 case QImode:
3760 return (1 + (mode == DImode ? 4 : 0)
3761 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3763 case HImode:
3764 return (4 + (mode == DImode ? 4 : 0)
3765 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3767 case SImode:
3768 return (1 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3770 case V8QImode:
3771 case V4HImode:
3772 case V2SImode:
3773 case V4QImode:
3774 case V2HImode:
3775 return 1;
3777 default:
3778 break;
3780 abort ();
3782 case CONST_INT:
3783 if (const_ok_for_arm (INTVAL (x)))
3784 return outer == SET ? 2 : -1;
3785 else if (outer == AND
3786 && const_ok_for_arm (~INTVAL (x)))
3787 return -1;
3788 else if ((outer == COMPARE
3789 || outer == PLUS || outer == MINUS)
3790 && const_ok_for_arm (-INTVAL (x)))
3791 return -1;
3792 else
3793 return 5;
3795 case CONST:
3796 case LABEL_REF:
3797 case SYMBOL_REF:
3798 return 6;
3800 case CONST_DOUBLE:
3801 if (arm_const_double_rtx (x))
3802 return outer == SET ? 2 : -1;
3803 else if ((outer == COMPARE || outer == PLUS)
3804 && neg_const_double_rtx_ok_for_fpa (x))
3805 return -1;
3806 return 7;
3808 default:
3809 return 99;
3813 /* RTX costs for cores with a slow MUL implementation. */
3815 static bool
3816 arm_slowmul_rtx_costs (rtx x, int code, int outer_code, int *total)
3818 enum machine_mode mode = GET_MODE (x);
3820 if (TARGET_THUMB)
3822 *total = thumb_rtx_costs (x, code, outer_code);
3823 return true;
3826 switch (code)
3828 case MULT:
3829 if (GET_MODE_CLASS (mode) == MODE_FLOAT
3830 || mode == DImode)
3832 *total = 30;
3833 return true;
3836 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
3838 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
3839 & (unsigned HOST_WIDE_INT) 0xffffffff);
3840 int cost, const_ok = const_ok_for_arm (i);
3841 int j, booth_unit_size;
3843 /* Tune as appropriate. */
3844 cost = const_ok ? 4 : 8;
3845 booth_unit_size = 2;
3846 for (j = 0; i && j < 32; j += booth_unit_size)
3848 i >>= booth_unit_size;
3849 cost += 2;
3852 *total = cost;
3853 return true;
3856 *total = 30 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
3857 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
3858 return true;
3860 default:
3861 *total = arm_rtx_costs_1 (x, code, outer_code);
3862 return true;
3867 /* RTX cost for cores with a fast multiply unit (M variants). */
3869 static bool
3870 arm_fastmul_rtx_costs (rtx x, int code, int outer_code, int *total)
3872 enum machine_mode mode = GET_MODE (x);
3874 if (TARGET_THUMB)
3876 *total = thumb_rtx_costs (x, code, outer_code);
3877 return true;
3880 switch (code)
3882 case MULT:
3883 /* There is no point basing this on the tuning, since it is always the
3884 fast variant if it exists at all. */
3885 if (mode == DImode
3886 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
3887 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
3888 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
3890 *total = 8;
3891 return true;
3895 if (GET_MODE_CLASS (mode) == MODE_FLOAT
3896 || mode == DImode)
3898 *total = 30;
3899 return true;
3902 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
3904 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
3905 & (unsigned HOST_WIDE_INT) 0xffffffff);
3906 int cost, const_ok = const_ok_for_arm (i);
3907 int j, booth_unit_size;
3909 /* Tune as appropriate. */
3910 cost = const_ok ? 4 : 8;
3911 booth_unit_size = 8;
3912 for (j = 0; i && j < 32; j += booth_unit_size)
3914 i >>= booth_unit_size;
3915 cost += 2;
3918 *total = cost;
3919 return true;
3922 *total = 8 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
3923 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
3924 return true;
3926 default:
3927 *total = arm_rtx_costs_1 (x, code, outer_code);
3928 return true;
3933 /* RTX cost for XScale CPUs. */
3935 static bool
3936 arm_xscale_rtx_costs (rtx x, int code, int outer_code, int *total)
3938 enum machine_mode mode = GET_MODE (x);
3940 if (TARGET_THUMB)
3942 *total = thumb_rtx_costs (x, code, outer_code);
3943 return true;
3946 switch (code)
3948 case MULT:
3949 /* There is no point basing this on the tuning, since it is always the
3950 fast variant if it exists at all. */
3951 if (mode == DImode
3952 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
3953 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
3954 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
3956 *total = 8;
3957 return true;
3961 if (GET_MODE_CLASS (mode) == MODE_FLOAT
3962 || mode == DImode)
3964 *total = 30;
3965 return true;
3968 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
3970 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
3971 & (unsigned HOST_WIDE_INT) 0xffffffff);
3972 int cost, const_ok = const_ok_for_arm (i);
3973 unsigned HOST_WIDE_INT masked_const;
3975 /* The cost will be related to two insns.
3976 First a load of the constant (MOV or LDR), then a multiply. */
3977 cost = 2;
3978 if (! const_ok)
3979 cost += 1; /* LDR is probably more expensive because
3980 of longer result latency. */
3981 masked_const = i & 0xffff8000;
3982 if (masked_const != 0 && masked_const != 0xffff8000)
3984 masked_const = i & 0xf8000000;
3985 if (masked_const == 0 || masked_const == 0xf8000000)
3986 cost += 1;
3987 else
3988 cost += 2;
3990 *total = cost;
3991 return true;
3994 *total = 8 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
3995 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
3996 return true;
3998 default:
3999 *total = arm_rtx_costs_1 (x, code, outer_code);
4000 return true;
4005 /* RTX costs for 9e (and later) cores. */
4007 static bool
4008 arm_9e_rtx_costs (rtx x, int code, int outer_code, int *total)
4010 enum machine_mode mode = GET_MODE (x);
4011 int nonreg_cost;
4012 int cost;
4014 if (TARGET_THUMB)
4016 switch (code)
4018 case MULT:
4019 *total = COSTS_N_INSNS (3);
4020 return true;
4022 default:
4023 *total = thumb_rtx_costs (x, code, outer_code);
4024 return true;
4028 switch (code)
4030 case MULT:
4031 /* There is no point basing this on the tuning, since it is always the
4032 fast variant if it exists at all. */
4033 if (mode == DImode
4034 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
4035 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
4036 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
4038 *total = 3;
4039 return true;
4043 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
4045 *total = 30;
4046 return true;
4048 if (mode == DImode)
4050 cost = 7;
4051 nonreg_cost = 8;
4053 else
4055 cost = 2;
4056 nonreg_cost = 4;
4060 *total = cost + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : nonreg_cost)
4061 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : nonreg_cost);
4062 return true;
4064 default:
4065 *total = arm_rtx_costs_1 (x, code, outer_code);
4066 return true;
4069 /* All address computations that can be done are free, but rtx cost returns
4070 the same for practically all of them. So we weight the different types
4071 of address here in the order (most pref first):
4072 PRE/POST_INC/DEC, SHIFT or NON-INT sum, INT sum, REG, MEM or LABEL. */
4073 static inline int
4074 arm_arm_address_cost (rtx x)
4076 enum rtx_code c = GET_CODE (x);
4078 if (c == PRE_INC || c == PRE_DEC || c == POST_INC || c == POST_DEC)
4079 return 0;
4080 if (c == MEM || c == LABEL_REF || c == SYMBOL_REF)
4081 return 10;
4083 if (c == PLUS || c == MINUS)
4085 if (GET_CODE (XEXP (x, 0)) == CONST_INT)
4086 return 2;
4088 if (ARITHMETIC_P (XEXP (x, 0)) || ARITHMETIC_P (XEXP (x, 1)))
4089 return 3;
4091 return 4;
4094 return 6;
4097 static inline int
4098 arm_thumb_address_cost (rtx x)
4100 enum rtx_code c = GET_CODE (x);
4102 if (c == REG)
4103 return 1;
4104 if (c == PLUS
4105 && GET_CODE (XEXP (x, 0)) == REG
4106 && GET_CODE (XEXP (x, 1)) == CONST_INT)
4107 return 1;
4109 return 2;
4112 static int
4113 arm_address_cost (rtx x)
4115 return TARGET_ARM ? arm_arm_address_cost (x) : arm_thumb_address_cost (x);
4118 static int
4119 arm_use_dfa_pipeline_interface (void)
4121 return true;
4124 static int
4125 arm_adjust_cost (rtx insn, rtx link, rtx dep, int cost)
4127 rtx i_pat, d_pat;
4129 /* Some true dependencies can have a higher cost depending
4130 on precisely how certain input operands are used. */
4131 if (arm_tune_xscale
4132 && REG_NOTE_KIND (link) == 0
4133 && recog_memoized (insn) >= 0
4134 && recog_memoized (dep) >= 0)
4136 int shift_opnum = get_attr_shift (insn);
4137 enum attr_type attr_type = get_attr_type (dep);
4139 /* If nonzero, SHIFT_OPNUM contains the operand number of a shifted
4140 operand for INSN. If we have a shifted input operand and the
4141 instruction we depend on is another ALU instruction, then we may
4142 have to account for an additional stall. */
4143 if (shift_opnum != 0
4144 && (attr_type == TYPE_ALU_SHIFT || attr_type == TYPE_ALU_SHIFT_REG))
4146 rtx shifted_operand;
4147 int opno;
4149 /* Get the shifted operand. */
4150 extract_insn (insn);
4151 shifted_operand = recog_data.operand[shift_opnum];
4153 /* Iterate over all the operands in DEP. If we write an operand
4154 that overlaps with SHIFTED_OPERAND, then we have increase the
4155 cost of this dependency. */
4156 extract_insn (dep);
4157 preprocess_constraints ();
4158 for (opno = 0; opno < recog_data.n_operands; opno++)
4160 /* We can ignore strict inputs. */
4161 if (recog_data.operand_type[opno] == OP_IN)
4162 continue;
4164 if (reg_overlap_mentioned_p (recog_data.operand[opno],
4165 shifted_operand))
4166 return 2;
4171 /* XXX This is not strictly true for the FPA. */
4172 if (REG_NOTE_KIND (link) == REG_DEP_ANTI
4173 || REG_NOTE_KIND (link) == REG_DEP_OUTPUT)
4174 return 0;
4176 /* Call insns don't incur a stall, even if they follow a load. */
4177 if (REG_NOTE_KIND (link) == 0
4178 && GET_CODE (insn) == CALL_INSN)
4179 return 1;
4181 if ((i_pat = single_set (insn)) != NULL
4182 && GET_CODE (SET_SRC (i_pat)) == MEM
4183 && (d_pat = single_set (dep)) != NULL
4184 && GET_CODE (SET_DEST (d_pat)) == MEM)
4186 rtx src_mem = XEXP (SET_SRC (i_pat), 0);
4187 /* This is a load after a store, there is no conflict if the load reads
4188 from a cached area. Assume that loads from the stack, and from the
4189 constant pool are cached, and that others will miss. This is a
4190 hack. */
4192 if ((GET_CODE (src_mem) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (src_mem))
4193 || reg_mentioned_p (stack_pointer_rtx, src_mem)
4194 || reg_mentioned_p (frame_pointer_rtx, src_mem)
4195 || reg_mentioned_p (hard_frame_pointer_rtx, src_mem))
4196 return 1;
4199 return cost;
4202 static int fp_consts_inited = 0;
4204 /* Only zero is valid for VFP. Other values are also valid for FPA. */
4205 static const char * const strings_fp[8] =
4207 "0", "1", "2", "3",
4208 "4", "5", "0.5", "10"
4211 static REAL_VALUE_TYPE values_fp[8];
4213 static void
4214 init_fp_table (void)
4216 int i;
4217 REAL_VALUE_TYPE r;
4219 if (TARGET_VFP)
4220 fp_consts_inited = 1;
4221 else
4222 fp_consts_inited = 8;
4224 for (i = 0; i < fp_consts_inited; i++)
4226 r = REAL_VALUE_ATOF (strings_fp[i], DFmode);
4227 values_fp[i] = r;
4231 /* Return TRUE if rtx X is a valid immediate FP constant. */
4233 arm_const_double_rtx (rtx x)
4235 REAL_VALUE_TYPE r;
4236 int i;
4238 if (!fp_consts_inited)
4239 init_fp_table ();
4241 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
4242 if (REAL_VALUE_MINUS_ZERO (r))
4243 return 0;
4245 for (i = 0; i < fp_consts_inited; i++)
4246 if (REAL_VALUES_EQUAL (r, values_fp[i]))
4247 return 1;
4249 return 0;
4252 /* Return TRUE if rtx X is a valid immediate FPA constant. */
4254 neg_const_double_rtx_ok_for_fpa (rtx x)
4256 REAL_VALUE_TYPE r;
4257 int i;
4259 if (!fp_consts_inited)
4260 init_fp_table ();
4262 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
4263 r = REAL_VALUE_NEGATE (r);
4264 if (REAL_VALUE_MINUS_ZERO (r))
4265 return 0;
4267 for (i = 0; i < 8; i++)
4268 if (REAL_VALUES_EQUAL (r, values_fp[i]))
4269 return 1;
4271 return 0;
4274 /* Predicates for `match_operand' and `match_operator'. */
4276 /* s_register_operand is the same as register_operand, but it doesn't accept
4277 (SUBREG (MEM)...).
4279 This function exists because at the time it was put in it led to better
4280 code. SUBREG(MEM) always needs a reload in the places where
4281 s_register_operand is used, and this seemed to lead to excessive
4282 reloading. */
4284 s_register_operand (rtx op, enum machine_mode mode)
4286 if (GET_MODE (op) != mode && mode != VOIDmode)
4287 return 0;
4289 if (GET_CODE (op) == SUBREG)
4290 op = SUBREG_REG (op);
4292 /* We don't consider registers whose class is NO_REGS
4293 to be a register operand. */
4294 /* XXX might have to check for lo regs only for thumb ??? */
4295 return (GET_CODE (op) == REG
4296 && (REGNO (op) >= FIRST_PSEUDO_REGISTER
4297 || REGNO_REG_CLASS (REGNO (op)) != NO_REGS));
4300 /* A hard register operand (even before reload. */
4302 arm_hard_register_operand (rtx op, enum machine_mode mode)
4304 if (GET_MODE (op) != mode && mode != VOIDmode)
4305 return 0;
4307 return (GET_CODE (op) == REG
4308 && REGNO (op) < FIRST_PSEUDO_REGISTER);
4311 /* An arm register operand. */
4313 arm_general_register_operand (rtx op, enum machine_mode mode)
4315 if (GET_MODE (op) != mode && mode != VOIDmode)
4316 return 0;
4318 if (GET_CODE (op) == SUBREG)
4319 op = SUBREG_REG (op);
4321 return (GET_CODE (op) == REG
4322 && (REGNO (op) <= LAST_ARM_REGNUM
4323 || REGNO (op) >= FIRST_PSEUDO_REGISTER));
4326 /* Only accept reg, subreg(reg), const_int. */
4328 reg_or_int_operand (rtx op, enum machine_mode mode)
4330 if (GET_CODE (op) == CONST_INT)
4331 return 1;
4333 if (GET_MODE (op) != mode && mode != VOIDmode)
4334 return 0;
4336 if (GET_CODE (op) == SUBREG)
4337 op = SUBREG_REG (op);
4339 /* We don't consider registers whose class is NO_REGS
4340 to be a register operand. */
4341 return (GET_CODE (op) == REG
4342 && (REGNO (op) >= FIRST_PSEUDO_REGISTER
4343 || REGNO_REG_CLASS (REGNO (op)) != NO_REGS));
4346 /* Return 1 if OP is an item in memory, given that we are in reload. */
4348 arm_reload_memory_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
4350 int regno = true_regnum (op);
4352 return (!CONSTANT_P (op)
4353 && (regno == -1
4354 || (GET_CODE (op) == REG
4355 && REGNO (op) >= FIRST_PSEUDO_REGISTER)));
4358 /* Return TRUE for valid operands for the rhs of an ARM instruction. */
4360 arm_rhs_operand (rtx op, enum machine_mode mode)
4362 return (s_register_operand (op, mode)
4363 || (GET_CODE (op) == CONST_INT && const_ok_for_arm (INTVAL (op))));
4366 /* Return TRUE for valid operands for the
4367 rhs of an ARM instruction, or a load. */
4369 arm_rhsm_operand (rtx op, enum machine_mode mode)
4371 return (s_register_operand (op, mode)
4372 || (GET_CODE (op) == CONST_INT && const_ok_for_arm (INTVAL (op)))
4373 || memory_operand (op, mode));
4376 /* Return TRUE for valid operands for the rhs of an ARM instruction, or if a
4377 constant that is valid when negated. */
4379 arm_add_operand (rtx op, enum machine_mode mode)
4381 if (TARGET_THUMB)
4382 return thumb_cmp_operand (op, mode);
4384 return (s_register_operand (op, mode)
4385 || (GET_CODE (op) == CONST_INT
4386 && (const_ok_for_arm (INTVAL (op))
4387 || const_ok_for_arm (-INTVAL (op)))));
4390 /* Return TRUE for valid ARM constants (or when valid if negated). */
4392 arm_addimm_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
4394 return (GET_CODE (op) == CONST_INT
4395 && (const_ok_for_arm (INTVAL (op))
4396 || const_ok_for_arm (-INTVAL (op))));
4400 arm_not_operand (rtx op, enum machine_mode mode)
4402 return (s_register_operand (op, mode)
4403 || (GET_CODE (op) == CONST_INT
4404 && (const_ok_for_arm (INTVAL (op))
4405 || const_ok_for_arm (~INTVAL (op)))));
4408 /* Return TRUE if the operand is a memory reference which contains an
4409 offsettable address. */
4411 offsettable_memory_operand (rtx op, enum machine_mode mode)
4413 if (mode == VOIDmode)
4414 mode = GET_MODE (op);
4416 return (mode == GET_MODE (op)
4417 && GET_CODE (op) == MEM
4418 && offsettable_address_p (reload_completed | reload_in_progress,
4419 mode, XEXP (op, 0)));
4422 /* Return TRUE if the operand is a memory reference which is, or can be
4423 made word aligned by adjusting the offset. */
4425 alignable_memory_operand (rtx op, enum machine_mode mode)
4427 rtx reg;
4429 if (mode == VOIDmode)
4430 mode = GET_MODE (op);
4432 if (mode != GET_MODE (op) || GET_CODE (op) != MEM)
4433 return 0;
4435 op = XEXP (op, 0);
4437 return ((GET_CODE (reg = op) == REG
4438 || (GET_CODE (op) == SUBREG
4439 && GET_CODE (reg = SUBREG_REG (op)) == REG)
4440 || (GET_CODE (op) == PLUS
4441 && GET_CODE (XEXP (op, 1)) == CONST_INT
4442 && (GET_CODE (reg = XEXP (op, 0)) == REG
4443 || (GET_CODE (XEXP (op, 0)) == SUBREG
4444 && GET_CODE (reg = SUBREG_REG (XEXP (op, 0))) == REG))))
4445 && REGNO_POINTER_ALIGN (REGNO (reg)) >= 32);
4448 /* Similar to s_register_operand, but does not allow hard integer
4449 registers. */
4451 f_register_operand (rtx op, enum machine_mode mode)
4453 if (GET_MODE (op) != mode && mode != VOIDmode)
4454 return 0;
4456 if (GET_CODE (op) == SUBREG)
4457 op = SUBREG_REG (op);
4459 /* We don't consider registers whose class is NO_REGS
4460 to be a register operand. */
4461 return (GET_CODE (op) == REG
4462 && (REGNO (op) >= FIRST_PSEUDO_REGISTER
4463 || REGNO_REG_CLASS (REGNO (op)) == FPA_REGS));
4466 /* Return TRUE for valid operands for the rhs of an floating point insns.
4467 Allows regs or certain consts on FPA, just regs for everything else. */
4469 arm_float_rhs_operand (rtx op, enum machine_mode mode)
4471 if (s_register_operand (op, mode))
4472 return TRUE;
4474 if (GET_MODE (op) != mode && mode != VOIDmode)
4475 return FALSE;
4477 if (TARGET_FPA && GET_CODE (op) == CONST_DOUBLE)
4478 return arm_const_double_rtx (op);
4480 return FALSE;
4484 arm_float_add_operand (rtx op, enum machine_mode mode)
4486 if (s_register_operand (op, mode))
4487 return TRUE;
4489 if (GET_MODE (op) != mode && mode != VOIDmode)
4490 return FALSE;
4492 if (TARGET_FPA && GET_CODE (op) == CONST_DOUBLE)
4493 return (arm_const_double_rtx (op)
4494 || neg_const_double_rtx_ok_for_fpa (op));
4496 return FALSE;
4500 /* Return TRUE if OP is suitable for the rhs of a floating point comparison.
4501 Depends which fpu we are targeting. */
4504 arm_float_compare_operand (rtx op, enum machine_mode mode)
4506 if (TARGET_VFP)
4507 return vfp_compare_operand (op, mode);
4508 else
4509 return arm_float_rhs_operand (op, mode);
4513 /* Return nonzero if OP is a valid Cirrus memory address pattern. */
4515 cirrus_memory_offset (rtx op)
4517 /* Reject eliminable registers. */
4518 if (! (reload_in_progress || reload_completed)
4519 && ( reg_mentioned_p (frame_pointer_rtx, op)
4520 || reg_mentioned_p (arg_pointer_rtx, op)
4521 || reg_mentioned_p (virtual_incoming_args_rtx, op)
4522 || reg_mentioned_p (virtual_outgoing_args_rtx, op)
4523 || reg_mentioned_p (virtual_stack_dynamic_rtx, op)
4524 || reg_mentioned_p (virtual_stack_vars_rtx, op)))
4525 return 0;
4527 if (GET_CODE (op) == MEM)
4529 rtx ind;
4531 ind = XEXP (op, 0);
4533 /* Match: (mem (reg)). */
4534 if (GET_CODE (ind) == REG)
4535 return 1;
4537 /* Match:
4538 (mem (plus (reg)
4539 (const))). */
4540 if (GET_CODE (ind) == PLUS
4541 && GET_CODE (XEXP (ind, 0)) == REG
4542 && REG_MODE_OK_FOR_BASE_P (XEXP (ind, 0), VOIDmode)
4543 && GET_CODE (XEXP (ind, 1)) == CONST_INT)
4544 return 1;
4547 return 0;
4551 arm_extendqisi_mem_op (rtx op, enum machine_mode mode)
4553 if (!memory_operand (op, mode))
4554 return 0;
4556 return arm_legitimate_address_p (mode, XEXP (op, 0), SIGN_EXTEND, 0);
4559 /* Return nonzero if OP is a Cirrus or general register. */
4561 cirrus_register_operand (rtx op, enum machine_mode mode)
4563 if (GET_MODE (op) != mode && mode != VOIDmode)
4564 return FALSE;
4566 if (GET_CODE (op) == SUBREG)
4567 op = SUBREG_REG (op);
4569 return (GET_CODE (op) == REG
4570 && (REGNO_REG_CLASS (REGNO (op)) == CIRRUS_REGS
4571 || REGNO_REG_CLASS (REGNO (op)) == GENERAL_REGS));
4574 /* Return nonzero if OP is a cirrus FP register. */
4576 cirrus_fp_register (rtx op, enum machine_mode mode)
4578 if (GET_MODE (op) != mode && mode != VOIDmode)
4579 return FALSE;
4581 if (GET_CODE (op) == SUBREG)
4582 op = SUBREG_REG (op);
4584 return (GET_CODE (op) == REG
4585 && (REGNO (op) >= FIRST_PSEUDO_REGISTER
4586 || REGNO_REG_CLASS (REGNO (op)) == CIRRUS_REGS));
4589 /* Return nonzero if OP is a 6bit constant (0..63). */
4591 cirrus_shift_const (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
4593 return (GET_CODE (op) == CONST_INT
4594 && INTVAL (op) >= 0
4595 && INTVAL (op) < 64);
4599 /* Return TRUE if OP is a valid VFP memory address pattern. */
4600 /* Copied from cirrus_memory_offset but with restricted offset range. */
4603 vfp_mem_operand (rtx op)
4605 /* Reject eliminable registers. */
4607 if (! (reload_in_progress || reload_completed)
4608 && ( reg_mentioned_p (frame_pointer_rtx, op)
4609 || reg_mentioned_p (arg_pointer_rtx, op)
4610 || reg_mentioned_p (virtual_incoming_args_rtx, op)
4611 || reg_mentioned_p (virtual_outgoing_args_rtx, op)
4612 || reg_mentioned_p (virtual_stack_dynamic_rtx, op)
4613 || reg_mentioned_p (virtual_stack_vars_rtx, op)))
4614 return FALSE;
4616 /* Constants are converted into offsets from labels. */
4617 if (GET_CODE (op) == MEM)
4619 rtx ind;
4621 ind = XEXP (op, 0);
4623 if (reload_completed
4624 && (GET_CODE (ind) == LABEL_REF
4625 || (GET_CODE (ind) == CONST
4626 && GET_CODE (XEXP (ind, 0)) == PLUS
4627 && GET_CODE (XEXP (XEXP (ind, 0), 0)) == LABEL_REF
4628 && GET_CODE (XEXP (XEXP (ind, 0), 1)) == CONST_INT)))
4629 return TRUE;
4631 /* Match: (mem (reg)). */
4632 if (GET_CODE (ind) == REG)
4633 return arm_address_register_rtx_p (ind, 0);
4635 /* Match:
4636 (mem (plus (reg)
4637 (const))). */
4638 if (GET_CODE (ind) == PLUS
4639 && GET_CODE (XEXP (ind, 0)) == REG
4640 && REG_MODE_OK_FOR_BASE_P (XEXP (ind, 0), VOIDmode)
4641 && GET_CODE (XEXP (ind, 1)) == CONST_INT
4642 && INTVAL (XEXP (ind, 1)) > -1024
4643 && INTVAL (XEXP (ind, 1)) < 1024)
4644 return TRUE;
4647 return FALSE;
4651 /* Return TRUE if OP is a REG or constant zero. */
4653 vfp_compare_operand (rtx op, enum machine_mode mode)
4655 if (s_register_operand (op, mode))
4656 return TRUE;
4658 return (GET_CODE (op) == CONST_DOUBLE
4659 && arm_const_double_rtx (op));
4663 /* Return GENERAL_REGS if a scratch register required to reload x to/from
4664 VFP registers. Otherwise return NO_REGS. */
4666 enum reg_class
4667 vfp_secondary_reload_class (enum machine_mode mode, rtx x)
4669 if (vfp_mem_operand (x) || s_register_operand (x, mode))
4670 return NO_REGS;
4672 return GENERAL_REGS;
4676 /* Returns TRUE if INSN is an "LDR REG, ADDR" instruction.
4677 Use by the Cirrus Maverick code which has to workaround
4678 a hardware bug triggered by such instructions. */
4679 static bool
4680 arm_memory_load_p (rtx insn)
4682 rtx body, lhs, rhs;;
4684 if (insn == NULL_RTX || GET_CODE (insn) != INSN)
4685 return false;
4687 body = PATTERN (insn);
4689 if (GET_CODE (body) != SET)
4690 return false;
4692 lhs = XEXP (body, 0);
4693 rhs = XEXP (body, 1);
4695 lhs = REG_OR_SUBREG_RTX (lhs);
4697 /* If the destination is not a general purpose
4698 register we do not have to worry. */
4699 if (GET_CODE (lhs) != REG
4700 || REGNO_REG_CLASS (REGNO (lhs)) != GENERAL_REGS)
4701 return false;
4703 /* As well as loads from memory we also have to react
4704 to loads of invalid constants which will be turned
4705 into loads from the minipool. */
4706 return (GET_CODE (rhs) == MEM
4707 || GET_CODE (rhs) == SYMBOL_REF
4708 || note_invalid_constants (insn, -1, false));
4711 /* Return TRUE if INSN is a Cirrus instruction. */
4712 static bool
4713 arm_cirrus_insn_p (rtx insn)
4715 enum attr_cirrus attr;
4717 /* get_attr aborts on USE and CLOBBER. */
4718 if (!insn
4719 || GET_CODE (insn) != INSN
4720 || GET_CODE (PATTERN (insn)) == USE
4721 || GET_CODE (PATTERN (insn)) == CLOBBER)
4722 return 0;
4724 attr = get_attr_cirrus (insn);
4726 return attr != CIRRUS_NOT;
4729 /* Cirrus reorg for invalid instruction combinations. */
4730 static void
4731 cirrus_reorg (rtx first)
4733 enum attr_cirrus attr;
4734 rtx body = PATTERN (first);
4735 rtx t;
4736 int nops;
4738 /* Any branch must be followed by 2 non Cirrus instructions. */
4739 if (GET_CODE (first) == JUMP_INSN && GET_CODE (body) != RETURN)
4741 nops = 0;
4742 t = next_nonnote_insn (first);
4744 if (arm_cirrus_insn_p (t))
4745 ++ nops;
4747 if (arm_cirrus_insn_p (next_nonnote_insn (t)))
4748 ++ nops;
4750 while (nops --)
4751 emit_insn_after (gen_nop (), first);
4753 return;
4756 /* (float (blah)) is in parallel with a clobber. */
4757 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) > 0)
4758 body = XVECEXP (body, 0, 0);
4760 if (GET_CODE (body) == SET)
4762 rtx lhs = XEXP (body, 0), rhs = XEXP (body, 1);
4764 /* cfldrd, cfldr64, cfstrd, cfstr64 must
4765 be followed by a non Cirrus insn. */
4766 if (get_attr_cirrus (first) == CIRRUS_DOUBLE)
4768 if (arm_cirrus_insn_p (next_nonnote_insn (first)))
4769 emit_insn_after (gen_nop (), first);
4771 return;
4773 else if (arm_memory_load_p (first))
4775 unsigned int arm_regno;
4777 /* Any ldr/cfmvdlr, ldr/cfmvdhr, ldr/cfmvsr, ldr/cfmv64lr,
4778 ldr/cfmv64hr combination where the Rd field is the same
4779 in both instructions must be split with a non Cirrus
4780 insn. Example:
4782 ldr r0, blah
4784 cfmvsr mvf0, r0. */
4786 /* Get Arm register number for ldr insn. */
4787 if (GET_CODE (lhs) == REG)
4788 arm_regno = REGNO (lhs);
4789 else if (GET_CODE (rhs) == REG)
4790 arm_regno = REGNO (rhs);
4791 else
4792 abort ();
4794 /* Next insn. */
4795 first = next_nonnote_insn (first);
4797 if (! arm_cirrus_insn_p (first))
4798 return;
4800 body = PATTERN (first);
4802 /* (float (blah)) is in parallel with a clobber. */
4803 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0))
4804 body = XVECEXP (body, 0, 0);
4806 if (GET_CODE (body) == FLOAT)
4807 body = XEXP (body, 0);
4809 if (get_attr_cirrus (first) == CIRRUS_MOVE
4810 && GET_CODE (XEXP (body, 1)) == REG
4811 && arm_regno == REGNO (XEXP (body, 1)))
4812 emit_insn_after (gen_nop (), first);
4814 return;
4818 /* get_attr aborts on USE and CLOBBER. */
4819 if (!first
4820 || GET_CODE (first) != INSN
4821 || GET_CODE (PATTERN (first)) == USE
4822 || GET_CODE (PATTERN (first)) == CLOBBER)
4823 return;
4825 attr = get_attr_cirrus (first);
4827 /* Any coprocessor compare instruction (cfcmps, cfcmpd, ...)
4828 must be followed by a non-coprocessor instruction. */
4829 if (attr == CIRRUS_COMPARE)
4831 nops = 0;
4833 t = next_nonnote_insn (first);
4835 if (arm_cirrus_insn_p (t))
4836 ++ nops;
4838 if (arm_cirrus_insn_p (next_nonnote_insn (t)))
4839 ++ nops;
4841 while (nops --)
4842 emit_insn_after (gen_nop (), first);
4844 return;
4848 /* Return nonzero if OP is a constant power of two. */
4850 power_of_two_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
4852 if (GET_CODE (op) == CONST_INT)
4854 HOST_WIDE_INT value = INTVAL (op);
4856 return value != 0 && (value & (value - 1)) == 0;
4859 return FALSE;
4862 /* Return TRUE for a valid operand of a DImode operation.
4863 Either: REG, SUBREG, CONST_DOUBLE or MEM(DImode_address).
4864 Note that this disallows MEM(REG+REG), but allows
4865 MEM(PRE/POST_INC/DEC(REG)). */
4867 di_operand (rtx op, enum machine_mode mode)
4869 if (s_register_operand (op, mode))
4870 return TRUE;
4872 if (mode != VOIDmode && GET_MODE (op) != VOIDmode && GET_MODE (op) != DImode)
4873 return FALSE;
4875 if (GET_CODE (op) == SUBREG)
4876 op = SUBREG_REG (op);
4878 switch (GET_CODE (op))
4880 case CONST_DOUBLE:
4881 case CONST_INT:
4882 return TRUE;
4884 case MEM:
4885 return memory_address_p (DImode, XEXP (op, 0));
4887 default:
4888 return FALSE;
4892 /* Like di_operand, but don't accept constants. */
4894 nonimmediate_di_operand (rtx op, enum machine_mode mode)
4896 if (s_register_operand (op, mode))
4897 return TRUE;
4899 if (mode != VOIDmode && GET_MODE (op) != VOIDmode && GET_MODE (op) != DImode)
4900 return FALSE;
4902 if (GET_CODE (op) == SUBREG)
4903 op = SUBREG_REG (op);
4905 if (GET_CODE (op) == MEM)
4906 return memory_address_p (DImode, XEXP (op, 0));
4908 return FALSE;
4911 /* Return TRUE for a valid operand of a DFmode operation when soft-float.
4912 Either: REG, SUBREG, CONST_DOUBLE or MEM(DImode_address).
4913 Note that this disallows MEM(REG+REG), but allows
4914 MEM(PRE/POST_INC/DEC(REG)). */
4916 soft_df_operand (rtx op, enum machine_mode mode)
4918 if (s_register_operand (op, mode))
4919 return TRUE;
4921 if (mode != VOIDmode && GET_MODE (op) != mode)
4922 return FALSE;
4924 if (GET_CODE (op) == SUBREG && CONSTANT_P (SUBREG_REG (op)))
4925 return FALSE;
4927 if (GET_CODE (op) == SUBREG)
4928 op = SUBREG_REG (op);
4930 switch (GET_CODE (op))
4932 case CONST_DOUBLE:
4933 return TRUE;
4935 case MEM:
4936 return memory_address_p (DFmode, XEXP (op, 0));
4938 default:
4939 return FALSE;
4943 /* Like soft_df_operand, but don't accept constants. */
4945 nonimmediate_soft_df_operand (rtx op, enum machine_mode mode)
4947 if (s_register_operand (op, mode))
4948 return TRUE;
4950 if (mode != VOIDmode && GET_MODE (op) != mode)
4951 return FALSE;
4953 if (GET_CODE (op) == SUBREG)
4954 op = SUBREG_REG (op);
4956 if (GET_CODE (op) == MEM)
4957 return memory_address_p (DFmode, XEXP (op, 0));
4958 return FALSE;
4961 /* Return TRUE for valid index operands. */
4963 index_operand (rtx op, enum machine_mode mode)
4965 return (s_register_operand (op, mode)
4966 || (immediate_operand (op, mode)
4967 && (GET_CODE (op) != CONST_INT
4968 || (INTVAL (op) < 4096 && INTVAL (op) > -4096))));
4971 /* Return TRUE for valid shifts by a constant. This also accepts any
4972 power of two on the (somewhat overly relaxed) assumption that the
4973 shift operator in this case was a mult. */
4975 const_shift_operand (rtx op, enum machine_mode mode)
4977 return (power_of_two_operand (op, mode)
4978 || (immediate_operand (op, mode)
4979 && (GET_CODE (op) != CONST_INT
4980 || (INTVAL (op) < 32 && INTVAL (op) > 0))));
4983 /* Return TRUE for arithmetic operators which can be combined with a multiply
4984 (shift). */
4986 shiftable_operator (rtx x, enum machine_mode mode)
4988 enum rtx_code code;
4990 if (GET_MODE (x) != mode)
4991 return FALSE;
4993 code = GET_CODE (x);
4995 return (code == PLUS || code == MINUS
4996 || code == IOR || code == XOR || code == AND);
4999 /* Return TRUE for binary logical operators. */
5001 logical_binary_operator (rtx x, enum machine_mode mode)
5003 enum rtx_code code;
5005 if (GET_MODE (x) != mode)
5006 return FALSE;
5008 code = GET_CODE (x);
5010 return (code == IOR || code == XOR || code == AND);
5013 /* Return TRUE for shift operators. */
5015 shift_operator (rtx x,enum machine_mode mode)
5017 enum rtx_code code;
5019 if (GET_MODE (x) != mode)
5020 return FALSE;
5022 code = GET_CODE (x);
5024 if (code == MULT)
5025 return power_of_two_operand (XEXP (x, 1), mode);
5027 return (code == ASHIFT || code == ASHIFTRT || code == LSHIFTRT
5028 || code == ROTATERT);
5031 /* Return TRUE if x is EQ or NE. */
5033 equality_operator (rtx x, enum machine_mode mode ATTRIBUTE_UNUSED)
5035 return GET_CODE (x) == EQ || GET_CODE (x) == NE;
5038 /* Return TRUE if x is a comparison operator other than LTGT or UNEQ. */
5040 arm_comparison_operator (rtx x, enum machine_mode mode)
5042 return (comparison_operator (x, mode)
5043 && GET_CODE (x) != LTGT
5044 && GET_CODE (x) != UNEQ);
5047 /* Return TRUE for SMIN SMAX UMIN UMAX operators. */
5049 minmax_operator (rtx x, enum machine_mode mode)
5051 enum rtx_code code = GET_CODE (x);
5053 if (GET_MODE (x) != mode)
5054 return FALSE;
5056 return code == SMIN || code == SMAX || code == UMIN || code == UMAX;
5059 /* Return TRUE if this is the condition code register, if we aren't given
5060 a mode, accept any class CCmode register. */
5062 cc_register (rtx x, enum machine_mode mode)
5064 if (mode == VOIDmode)
5066 mode = GET_MODE (x);
5068 if (GET_MODE_CLASS (mode) != MODE_CC)
5069 return FALSE;
5072 if ( GET_MODE (x) == mode
5073 && GET_CODE (x) == REG
5074 && REGNO (x) == CC_REGNUM)
5075 return TRUE;
5077 return FALSE;
5080 /* Return TRUE if this is the condition code register, if we aren't given
5081 a mode, accept any class CCmode register which indicates a dominance
5082 expression. */
5084 dominant_cc_register (rtx x, enum machine_mode mode)
5086 if (mode == VOIDmode)
5088 mode = GET_MODE (x);
5090 if (GET_MODE_CLASS (mode) != MODE_CC)
5091 return FALSE;
5094 if (mode != CC_DNEmode && mode != CC_DEQmode
5095 && mode != CC_DLEmode && mode != CC_DLTmode
5096 && mode != CC_DGEmode && mode != CC_DGTmode
5097 && mode != CC_DLEUmode && mode != CC_DLTUmode
5098 && mode != CC_DGEUmode && mode != CC_DGTUmode)
5099 return FALSE;
5101 return cc_register (x, mode);
5104 /* Return TRUE if X references a SYMBOL_REF. */
5106 symbol_mentioned_p (rtx x)
5108 const char * fmt;
5109 int i;
5111 if (GET_CODE (x) == SYMBOL_REF)
5112 return 1;
5114 fmt = GET_RTX_FORMAT (GET_CODE (x));
5116 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5118 if (fmt[i] == 'E')
5120 int j;
5122 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5123 if (symbol_mentioned_p (XVECEXP (x, i, j)))
5124 return 1;
5126 else if (fmt[i] == 'e' && symbol_mentioned_p (XEXP (x, i)))
5127 return 1;
5130 return 0;
5133 /* Return TRUE if X references a LABEL_REF. */
5135 label_mentioned_p (rtx x)
5137 const char * fmt;
5138 int i;
5140 if (GET_CODE (x) == LABEL_REF)
5141 return 1;
5143 fmt = GET_RTX_FORMAT (GET_CODE (x));
5144 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5146 if (fmt[i] == 'E')
5148 int j;
5150 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5151 if (label_mentioned_p (XVECEXP (x, i, j)))
5152 return 1;
5154 else if (fmt[i] == 'e' && label_mentioned_p (XEXP (x, i)))
5155 return 1;
5158 return 0;
5161 enum rtx_code
5162 minmax_code (rtx x)
5164 enum rtx_code code = GET_CODE (x);
5166 if (code == SMAX)
5167 return GE;
5168 else if (code == SMIN)
5169 return LE;
5170 else if (code == UMIN)
5171 return LEU;
5172 else if (code == UMAX)
5173 return GEU;
5175 abort ();
5178 /* Return 1 if memory locations are adjacent. */
5180 adjacent_mem_locations (rtx a, rtx b)
5182 if ((GET_CODE (XEXP (a, 0)) == REG
5183 || (GET_CODE (XEXP (a, 0)) == PLUS
5184 && GET_CODE (XEXP (XEXP (a, 0), 1)) == CONST_INT))
5185 && (GET_CODE (XEXP (b, 0)) == REG
5186 || (GET_CODE (XEXP (b, 0)) == PLUS
5187 && GET_CODE (XEXP (XEXP (b, 0), 1)) == CONST_INT)))
5189 int val0 = 0, val1 = 0;
5190 int reg0, reg1;
5192 if (GET_CODE (XEXP (a, 0)) == PLUS)
5194 reg0 = REGNO (XEXP (XEXP (a, 0), 0));
5195 val0 = INTVAL (XEXP (XEXP (a, 0), 1));
5197 else
5198 reg0 = REGNO (XEXP (a, 0));
5200 if (GET_CODE (XEXP (b, 0)) == PLUS)
5202 reg1 = REGNO (XEXP (XEXP (b, 0), 0));
5203 val1 = INTVAL (XEXP (XEXP (b, 0), 1));
5205 else
5206 reg1 = REGNO (XEXP (b, 0));
5208 /* Don't accept any offset that will require multiple
5209 instructions to handle, since this would cause the
5210 arith_adjacentmem pattern to output an overlong sequence. */
5211 if (!const_ok_for_op (PLUS, val0) || !const_ok_for_op (PLUS, val1))
5212 return 0;
5214 return (reg0 == reg1) && ((val1 - val0) == 4 || (val0 - val1) == 4);
5216 return 0;
5219 /* Return 1 if OP is a load multiple operation. It is known to be
5220 parallel and the first section will be tested. */
5222 load_multiple_operation (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
5224 HOST_WIDE_INT count = XVECLEN (op, 0);
5225 int dest_regno;
5226 rtx src_addr;
5227 HOST_WIDE_INT i = 1, base = 0;
5228 rtx elt;
5230 if (count <= 1
5231 || GET_CODE (XVECEXP (op, 0, 0)) != SET)
5232 return 0;
5234 /* Check to see if this might be a write-back. */
5235 if (GET_CODE (SET_SRC (elt = XVECEXP (op, 0, 0))) == PLUS)
5237 i++;
5238 base = 1;
5240 /* Now check it more carefully. */
5241 if (GET_CODE (SET_DEST (elt)) != REG
5242 || GET_CODE (XEXP (SET_SRC (elt), 0)) != REG
5243 || GET_CODE (XEXP (SET_SRC (elt), 1)) != CONST_INT
5244 || INTVAL (XEXP (SET_SRC (elt), 1)) != (count - 1) * 4)
5245 return 0;
5248 /* Perform a quick check so we don't blow up below. */
5249 if (count <= i
5250 || GET_CODE (XVECEXP (op, 0, i - 1)) != SET
5251 || GET_CODE (SET_DEST (XVECEXP (op, 0, i - 1))) != REG
5252 || GET_CODE (SET_SRC (XVECEXP (op, 0, i - 1))) != MEM)
5253 return 0;
5255 dest_regno = REGNO (SET_DEST (XVECEXP (op, 0, i - 1)));
5256 src_addr = XEXP (SET_SRC (XVECEXP (op, 0, i - 1)), 0);
5258 for (; i < count; i++)
5260 elt = XVECEXP (op, 0, i);
5262 if (GET_CODE (elt) != SET
5263 || GET_CODE (SET_DEST (elt)) != REG
5264 || GET_MODE (SET_DEST (elt)) != SImode
5265 || REGNO (SET_DEST (elt)) != (unsigned int)(dest_regno + i - base)
5266 || GET_CODE (SET_SRC (elt)) != MEM
5267 || GET_MODE (SET_SRC (elt)) != SImode
5268 || GET_CODE (XEXP (SET_SRC (elt), 0)) != PLUS
5269 || !rtx_equal_p (XEXP (XEXP (SET_SRC (elt), 0), 0), src_addr)
5270 || GET_CODE (XEXP (XEXP (SET_SRC (elt), 0), 1)) != CONST_INT
5271 || INTVAL (XEXP (XEXP (SET_SRC (elt), 0), 1)) != (i - base) * 4)
5272 return 0;
5275 return 1;
5278 /* Return 1 if OP is a store multiple operation. It is known to be
5279 parallel and the first section will be tested. */
5281 store_multiple_operation (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
5283 HOST_WIDE_INT count = XVECLEN (op, 0);
5284 int src_regno;
5285 rtx dest_addr;
5286 HOST_WIDE_INT i = 1, base = 0;
5287 rtx elt;
5289 if (count <= 1
5290 || GET_CODE (XVECEXP (op, 0, 0)) != SET)
5291 return 0;
5293 /* Check to see if this might be a write-back. */
5294 if (GET_CODE (SET_SRC (elt = XVECEXP (op, 0, 0))) == PLUS)
5296 i++;
5297 base = 1;
5299 /* Now check it more carefully. */
5300 if (GET_CODE (SET_DEST (elt)) != REG
5301 || GET_CODE (XEXP (SET_SRC (elt), 0)) != REG
5302 || GET_CODE (XEXP (SET_SRC (elt), 1)) != CONST_INT
5303 || INTVAL (XEXP (SET_SRC (elt), 1)) != (count - 1) * 4)
5304 return 0;
5307 /* Perform a quick check so we don't blow up below. */
5308 if (count <= i
5309 || GET_CODE (XVECEXP (op, 0, i - 1)) != SET
5310 || GET_CODE (SET_DEST (XVECEXP (op, 0, i - 1))) != MEM
5311 || GET_CODE (SET_SRC (XVECEXP (op, 0, i - 1))) != REG)
5312 return 0;
5314 src_regno = REGNO (SET_SRC (XVECEXP (op, 0, i - 1)));
5315 dest_addr = XEXP (SET_DEST (XVECEXP (op, 0, i - 1)), 0);
5317 for (; i < count; i++)
5319 elt = XVECEXP (op, 0, i);
5321 if (GET_CODE (elt) != SET
5322 || GET_CODE (SET_SRC (elt)) != REG
5323 || GET_MODE (SET_SRC (elt)) != SImode
5324 || REGNO (SET_SRC (elt)) != (unsigned int)(src_regno + i - base)
5325 || GET_CODE (SET_DEST (elt)) != MEM
5326 || GET_MODE (SET_DEST (elt)) != SImode
5327 || GET_CODE (XEXP (SET_DEST (elt), 0)) != PLUS
5328 || !rtx_equal_p (XEXP (XEXP (SET_DEST (elt), 0), 0), dest_addr)
5329 || GET_CODE (XEXP (XEXP (SET_DEST (elt), 0), 1)) != CONST_INT
5330 || INTVAL (XEXP (XEXP (SET_DEST (elt), 0), 1)) != (i - base) * 4)
5331 return 0;
5334 return 1;
5338 load_multiple_sequence (rtx *operands, int nops, int *regs, int *base,
5339 HOST_WIDE_INT *load_offset)
5341 int unsorted_regs[4];
5342 HOST_WIDE_INT unsorted_offsets[4];
5343 int order[4];
5344 int base_reg = -1;
5345 int i;
5347 /* Can only handle 2, 3, or 4 insns at present,
5348 though could be easily extended if required. */
5349 if (nops < 2 || nops > 4)
5350 abort ();
5352 /* Loop over the operands and check that the memory references are
5353 suitable (ie immediate offsets from the same base register). At
5354 the same time, extract the target register, and the memory
5355 offsets. */
5356 for (i = 0; i < nops; i++)
5358 rtx reg;
5359 rtx offset;
5361 /* Convert a subreg of a mem into the mem itself. */
5362 if (GET_CODE (operands[nops + i]) == SUBREG)
5363 operands[nops + i] = alter_subreg (operands + (nops + i));
5365 if (GET_CODE (operands[nops + i]) != MEM)
5366 abort ();
5368 /* Don't reorder volatile memory references; it doesn't seem worth
5369 looking for the case where the order is ok anyway. */
5370 if (MEM_VOLATILE_P (operands[nops + i]))
5371 return 0;
5373 offset = const0_rtx;
5375 if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG
5376 || (GET_CODE (reg) == SUBREG
5377 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5378 || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
5379 && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0))
5380 == REG)
5381 || (GET_CODE (reg) == SUBREG
5382 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5383 && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1))
5384 == CONST_INT)))
5386 if (i == 0)
5388 base_reg = REGNO (reg);
5389 unsorted_regs[0] = (GET_CODE (operands[i]) == REG
5390 ? REGNO (operands[i])
5391 : REGNO (SUBREG_REG (operands[i])));
5392 order[0] = 0;
5394 else
5396 if (base_reg != (int) REGNO (reg))
5397 /* Not addressed from the same base register. */
5398 return 0;
5400 unsorted_regs[i] = (GET_CODE (operands[i]) == REG
5401 ? REGNO (operands[i])
5402 : REGNO (SUBREG_REG (operands[i])));
5403 if (unsorted_regs[i] < unsorted_regs[order[0]])
5404 order[0] = i;
5407 /* If it isn't an integer register, or if it overwrites the
5408 base register but isn't the last insn in the list, then
5409 we can't do this. */
5410 if (unsorted_regs[i] < 0 || unsorted_regs[i] > 14
5411 || (i != nops - 1 && unsorted_regs[i] == base_reg))
5412 return 0;
5414 unsorted_offsets[i] = INTVAL (offset);
5416 else
5417 /* Not a suitable memory address. */
5418 return 0;
5421 /* All the useful information has now been extracted from the
5422 operands into unsorted_regs and unsorted_offsets; additionally,
5423 order[0] has been set to the lowest numbered register in the
5424 list. Sort the registers into order, and check that the memory
5425 offsets are ascending and adjacent. */
5427 for (i = 1; i < nops; i++)
5429 int j;
5431 order[i] = order[i - 1];
5432 for (j = 0; j < nops; j++)
5433 if (unsorted_regs[j] > unsorted_regs[order[i - 1]]
5434 && (order[i] == order[i - 1]
5435 || unsorted_regs[j] < unsorted_regs[order[i]]))
5436 order[i] = j;
5438 /* Have we found a suitable register? if not, one must be used more
5439 than once. */
5440 if (order[i] == order[i - 1])
5441 return 0;
5443 /* Is the memory address adjacent and ascending? */
5444 if (unsorted_offsets[order[i]] != unsorted_offsets[order[i - 1]] + 4)
5445 return 0;
5448 if (base)
5450 *base = base_reg;
5452 for (i = 0; i < nops; i++)
5453 regs[i] = unsorted_regs[order[i]];
5455 *load_offset = unsorted_offsets[order[0]];
5458 if (unsorted_offsets[order[0]] == 0)
5459 return 1; /* ldmia */
5461 if (unsorted_offsets[order[0]] == 4)
5462 return 2; /* ldmib */
5464 if (unsorted_offsets[order[nops - 1]] == 0)
5465 return 3; /* ldmda */
5467 if (unsorted_offsets[order[nops - 1]] == -4)
5468 return 4; /* ldmdb */
5470 /* For ARM8,9 & StrongARM, 2 ldr instructions are faster than an ldm
5471 if the offset isn't small enough. The reason 2 ldrs are faster
5472 is because these ARMs are able to do more than one cache access
5473 in a single cycle. The ARM9 and StrongARM have Harvard caches,
5474 whilst the ARM8 has a double bandwidth cache. This means that
5475 these cores can do both an instruction fetch and a data fetch in
5476 a single cycle, so the trick of calculating the address into a
5477 scratch register (one of the result regs) and then doing a load
5478 multiple actually becomes slower (and no smaller in code size).
5479 That is the transformation
5481 ldr rd1, [rbase + offset]
5482 ldr rd2, [rbase + offset + 4]
5486 add rd1, rbase, offset
5487 ldmia rd1, {rd1, rd2}
5489 produces worse code -- '3 cycles + any stalls on rd2' instead of
5490 '2 cycles + any stalls on rd2'. On ARMs with only one cache
5491 access per cycle, the first sequence could never complete in less
5492 than 6 cycles, whereas the ldm sequence would only take 5 and
5493 would make better use of sequential accesses if not hitting the
5494 cache.
5496 We cheat here and test 'arm_ld_sched' which we currently know to
5497 only be true for the ARM8, ARM9 and StrongARM. If this ever
5498 changes, then the test below needs to be reworked. */
5499 if (nops == 2 && arm_ld_sched)
5500 return 0;
5502 /* Can't do it without setting up the offset, only do this if it takes
5503 no more than one insn. */
5504 return (const_ok_for_arm (unsorted_offsets[order[0]])
5505 || const_ok_for_arm (-unsorted_offsets[order[0]])) ? 5 : 0;
5508 const char *
5509 emit_ldm_seq (rtx *operands, int nops)
5511 int regs[4];
5512 int base_reg;
5513 HOST_WIDE_INT offset;
5514 char buf[100];
5515 int i;
5517 switch (load_multiple_sequence (operands, nops, regs, &base_reg, &offset))
5519 case 1:
5520 strcpy (buf, "ldm%?ia\t");
5521 break;
5523 case 2:
5524 strcpy (buf, "ldm%?ib\t");
5525 break;
5527 case 3:
5528 strcpy (buf, "ldm%?da\t");
5529 break;
5531 case 4:
5532 strcpy (buf, "ldm%?db\t");
5533 break;
5535 case 5:
5536 if (offset >= 0)
5537 sprintf (buf, "add%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX,
5538 reg_names[regs[0]], REGISTER_PREFIX, reg_names[base_reg],
5539 (long) offset);
5540 else
5541 sprintf (buf, "sub%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX,
5542 reg_names[regs[0]], REGISTER_PREFIX, reg_names[base_reg],
5543 (long) -offset);
5544 output_asm_insn (buf, operands);
5545 base_reg = regs[0];
5546 strcpy (buf, "ldm%?ia\t");
5547 break;
5549 default:
5550 abort ();
5553 sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
5554 reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
5556 for (i = 1; i < nops; i++)
5557 sprintf (buf + strlen (buf), ", %s%s", REGISTER_PREFIX,
5558 reg_names[regs[i]]);
5560 strcat (buf, "}\t%@ phole ldm");
5562 output_asm_insn (buf, operands);
5563 return "";
5567 store_multiple_sequence (rtx *operands, int nops, int *regs, int *base,
5568 HOST_WIDE_INT * load_offset)
5570 int unsorted_regs[4];
5571 HOST_WIDE_INT unsorted_offsets[4];
5572 int order[4];
5573 int base_reg = -1;
5574 int i;
5576 /* Can only handle 2, 3, or 4 insns at present, though could be easily
5577 extended if required. */
5578 if (nops < 2 || nops > 4)
5579 abort ();
5581 /* Loop over the operands and check that the memory references are
5582 suitable (ie immediate offsets from the same base register). At
5583 the same time, extract the target register, and the memory
5584 offsets. */
5585 for (i = 0; i < nops; i++)
5587 rtx reg;
5588 rtx offset;
5590 /* Convert a subreg of a mem into the mem itself. */
5591 if (GET_CODE (operands[nops + i]) == SUBREG)
5592 operands[nops + i] = alter_subreg (operands + (nops + i));
5594 if (GET_CODE (operands[nops + i]) != MEM)
5595 abort ();
5597 /* Don't reorder volatile memory references; it doesn't seem worth
5598 looking for the case where the order is ok anyway. */
5599 if (MEM_VOLATILE_P (operands[nops + i]))
5600 return 0;
5602 offset = const0_rtx;
5604 if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG
5605 || (GET_CODE (reg) == SUBREG
5606 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5607 || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
5608 && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0))
5609 == REG)
5610 || (GET_CODE (reg) == SUBREG
5611 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5612 && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1))
5613 == CONST_INT)))
5615 if (i == 0)
5617 base_reg = REGNO (reg);
5618 unsorted_regs[0] = (GET_CODE (operands[i]) == REG
5619 ? REGNO (operands[i])
5620 : REGNO (SUBREG_REG (operands[i])));
5621 order[0] = 0;
5623 else
5625 if (base_reg != (int) REGNO (reg))
5626 /* Not addressed from the same base register. */
5627 return 0;
5629 unsorted_regs[i] = (GET_CODE (operands[i]) == REG
5630 ? REGNO (operands[i])
5631 : REGNO (SUBREG_REG (operands[i])));
5632 if (unsorted_regs[i] < unsorted_regs[order[0]])
5633 order[0] = i;
5636 /* If it isn't an integer register, then we can't do this. */
5637 if (unsorted_regs[i] < 0 || unsorted_regs[i] > 14)
5638 return 0;
5640 unsorted_offsets[i] = INTVAL (offset);
5642 else
5643 /* Not a suitable memory address. */
5644 return 0;
5647 /* All the useful information has now been extracted from the
5648 operands into unsorted_regs and unsorted_offsets; additionally,
5649 order[0] has been set to the lowest numbered register in the
5650 list. Sort the registers into order, and check that the memory
5651 offsets are ascending and adjacent. */
5653 for (i = 1; i < nops; i++)
5655 int j;
5657 order[i] = order[i - 1];
5658 for (j = 0; j < nops; j++)
5659 if (unsorted_regs[j] > unsorted_regs[order[i - 1]]
5660 && (order[i] == order[i - 1]
5661 || unsorted_regs[j] < unsorted_regs[order[i]]))
5662 order[i] = j;
5664 /* Have we found a suitable register? if not, one must be used more
5665 than once. */
5666 if (order[i] == order[i - 1])
5667 return 0;
5669 /* Is the memory address adjacent and ascending? */
5670 if (unsorted_offsets[order[i]] != unsorted_offsets[order[i - 1]] + 4)
5671 return 0;
5674 if (base)
5676 *base = base_reg;
5678 for (i = 0; i < nops; i++)
5679 regs[i] = unsorted_regs[order[i]];
5681 *load_offset = unsorted_offsets[order[0]];
5684 if (unsorted_offsets[order[0]] == 0)
5685 return 1; /* stmia */
5687 if (unsorted_offsets[order[0]] == 4)
5688 return 2; /* stmib */
5690 if (unsorted_offsets[order[nops - 1]] == 0)
5691 return 3; /* stmda */
5693 if (unsorted_offsets[order[nops - 1]] == -4)
5694 return 4; /* stmdb */
5696 return 0;
5699 const char *
5700 emit_stm_seq (rtx *operands, int nops)
5702 int regs[4];
5703 int base_reg;
5704 HOST_WIDE_INT offset;
5705 char buf[100];
5706 int i;
5708 switch (store_multiple_sequence (operands, nops, regs, &base_reg, &offset))
5710 case 1:
5711 strcpy (buf, "stm%?ia\t");
5712 break;
5714 case 2:
5715 strcpy (buf, "stm%?ib\t");
5716 break;
5718 case 3:
5719 strcpy (buf, "stm%?da\t");
5720 break;
5722 case 4:
5723 strcpy (buf, "stm%?db\t");
5724 break;
5726 default:
5727 abort ();
5730 sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
5731 reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
5733 for (i = 1; i < nops; i++)
5734 sprintf (buf + strlen (buf), ", %s%s", REGISTER_PREFIX,
5735 reg_names[regs[i]]);
5737 strcat (buf, "}\t%@ phole stm");
5739 output_asm_insn (buf, operands);
5740 return "";
5744 multi_register_push (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
5746 if (GET_CODE (op) != PARALLEL
5747 || (GET_CODE (XVECEXP (op, 0, 0)) != SET)
5748 || (GET_CODE (SET_SRC (XVECEXP (op, 0, 0))) != UNSPEC)
5749 || (XINT (SET_SRC (XVECEXP (op, 0, 0)), 1) != UNSPEC_PUSH_MULT))
5750 return 0;
5752 return 1;
5755 /* Routines for use in generating RTL. */
5758 arm_gen_load_multiple (int base_regno, int count, rtx from, int up,
5759 int write_back, int unchanging_p, int in_struct_p,
5760 int scalar_p)
5762 int i = 0, j;
5763 rtx result;
5764 int sign = up ? 1 : -1;
5765 rtx mem;
5767 /* XScale has load-store double instructions, but they have stricter
5768 alignment requirements than load-store multiple, so we can not
5769 use them.
5771 For XScale ldm requires 2 + NREGS cycles to complete and blocks
5772 the pipeline until completion.
5774 NREGS CYCLES
5780 An ldr instruction takes 1-3 cycles, but does not block the
5781 pipeline.
5783 NREGS CYCLES
5784 1 1-3
5785 2 2-6
5786 3 3-9
5787 4 4-12
5789 Best case ldr will always win. However, the more ldr instructions
5790 we issue, the less likely we are to be able to schedule them well.
5791 Using ldr instructions also increases code size.
5793 As a compromise, we use ldr for counts of 1 or 2 regs, and ldm
5794 for counts of 3 or 4 regs. */
5795 if (arm_tune_xscale && count <= 2 && ! optimize_size)
5797 rtx seq;
5799 start_sequence ();
5801 for (i = 0; i < count; i++)
5803 mem = gen_rtx_MEM (SImode, plus_constant (from, i * 4 * sign));
5804 RTX_UNCHANGING_P (mem) = unchanging_p;
5805 MEM_IN_STRUCT_P (mem) = in_struct_p;
5806 MEM_SCALAR_P (mem) = scalar_p;
5807 emit_move_insn (gen_rtx_REG (SImode, base_regno + i), mem);
5810 if (write_back)
5811 emit_move_insn (from, plus_constant (from, count * 4 * sign));
5813 seq = get_insns ();
5814 end_sequence ();
5816 return seq;
5819 result = gen_rtx_PARALLEL (VOIDmode,
5820 rtvec_alloc (count + (write_back ? 1 : 0)));
5821 if (write_back)
5823 XVECEXP (result, 0, 0)
5824 = gen_rtx_SET (GET_MODE (from), from,
5825 plus_constant (from, count * 4 * sign));
5826 i = 1;
5827 count++;
5830 for (j = 0; i < count; i++, j++)
5832 mem = gen_rtx_MEM (SImode, plus_constant (from, j * 4 * sign));
5833 RTX_UNCHANGING_P (mem) = unchanging_p;
5834 MEM_IN_STRUCT_P (mem) = in_struct_p;
5835 MEM_SCALAR_P (mem) = scalar_p;
5836 XVECEXP (result, 0, i)
5837 = gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, base_regno + j), mem);
5840 return result;
5844 arm_gen_store_multiple (int base_regno, int count, rtx to, int up,
5845 int write_back, int unchanging_p, int in_struct_p,
5846 int scalar_p)
5848 int i = 0, j;
5849 rtx result;
5850 int sign = up ? 1 : -1;
5851 rtx mem;
5853 /* See arm_gen_load_multiple for discussion of
5854 the pros/cons of ldm/stm usage for XScale. */
5855 if (arm_tune_xscale && count <= 2 && ! optimize_size)
5857 rtx seq;
5859 start_sequence ();
5861 for (i = 0; i < count; i++)
5863 mem = gen_rtx_MEM (SImode, plus_constant (to, i * 4 * sign));
5864 RTX_UNCHANGING_P (mem) = unchanging_p;
5865 MEM_IN_STRUCT_P (mem) = in_struct_p;
5866 MEM_SCALAR_P (mem) = scalar_p;
5867 emit_move_insn (mem, gen_rtx_REG (SImode, base_regno + i));
5870 if (write_back)
5871 emit_move_insn (to, plus_constant (to, count * 4 * sign));
5873 seq = get_insns ();
5874 end_sequence ();
5876 return seq;
5879 result = gen_rtx_PARALLEL (VOIDmode,
5880 rtvec_alloc (count + (write_back ? 1 : 0)));
5881 if (write_back)
5883 XVECEXP (result, 0, 0)
5884 = gen_rtx_SET (GET_MODE (to), to,
5885 plus_constant (to, count * 4 * sign));
5886 i = 1;
5887 count++;
5890 for (j = 0; i < count; i++, j++)
5892 mem = gen_rtx_MEM (SImode, plus_constant (to, j * 4 * sign));
5893 RTX_UNCHANGING_P (mem) = unchanging_p;
5894 MEM_IN_STRUCT_P (mem) = in_struct_p;
5895 MEM_SCALAR_P (mem) = scalar_p;
5897 XVECEXP (result, 0, i)
5898 = gen_rtx_SET (VOIDmode, mem, gen_rtx_REG (SImode, base_regno + j));
5901 return result;
5905 arm_gen_movstrqi (rtx *operands)
5907 HOST_WIDE_INT in_words_to_go, out_words_to_go, last_bytes;
5908 int i;
5909 rtx src, dst;
5910 rtx st_src, st_dst, fin_src, fin_dst;
5911 rtx part_bytes_reg = NULL;
5912 rtx mem;
5913 int dst_unchanging_p, dst_in_struct_p, src_unchanging_p, src_in_struct_p;
5914 int dst_scalar_p, src_scalar_p;
5916 if (GET_CODE (operands[2]) != CONST_INT
5917 || GET_CODE (operands[3]) != CONST_INT
5918 || INTVAL (operands[2]) > 64
5919 || INTVAL (operands[3]) & 3)
5920 return 0;
5922 st_dst = XEXP (operands[0], 0);
5923 st_src = XEXP (operands[1], 0);
5925 dst_unchanging_p = RTX_UNCHANGING_P (operands[0]);
5926 dst_in_struct_p = MEM_IN_STRUCT_P (operands[0]);
5927 dst_scalar_p = MEM_SCALAR_P (operands[0]);
5928 src_unchanging_p = RTX_UNCHANGING_P (operands[1]);
5929 src_in_struct_p = MEM_IN_STRUCT_P (operands[1]);
5930 src_scalar_p = MEM_SCALAR_P (operands[1]);
5932 fin_dst = dst = copy_to_mode_reg (SImode, st_dst);
5933 fin_src = src = copy_to_mode_reg (SImode, st_src);
5935 in_words_to_go = ARM_NUM_INTS (INTVAL (operands[2]));
5936 out_words_to_go = INTVAL (operands[2]) / 4;
5937 last_bytes = INTVAL (operands[2]) & 3;
5939 if (out_words_to_go != in_words_to_go && ((in_words_to_go - 1) & 3) != 0)
5940 part_bytes_reg = gen_rtx_REG (SImode, (in_words_to_go - 1) & 3);
5942 for (i = 0; in_words_to_go >= 2; i+=4)
5944 if (in_words_to_go > 4)
5945 emit_insn (arm_gen_load_multiple (0, 4, src, TRUE, TRUE,
5946 src_unchanging_p,
5947 src_in_struct_p,
5948 src_scalar_p));
5949 else
5950 emit_insn (arm_gen_load_multiple (0, in_words_to_go, src, TRUE,
5951 FALSE, src_unchanging_p,
5952 src_in_struct_p, src_scalar_p));
5954 if (out_words_to_go)
5956 if (out_words_to_go > 4)
5957 emit_insn (arm_gen_store_multiple (0, 4, dst, TRUE, TRUE,
5958 dst_unchanging_p,
5959 dst_in_struct_p,
5960 dst_scalar_p));
5961 else if (out_words_to_go != 1)
5962 emit_insn (arm_gen_store_multiple (0, out_words_to_go,
5963 dst, TRUE,
5964 (last_bytes == 0
5965 ? FALSE : TRUE),
5966 dst_unchanging_p,
5967 dst_in_struct_p,
5968 dst_scalar_p));
5969 else
5971 mem = gen_rtx_MEM (SImode, dst);
5972 RTX_UNCHANGING_P (mem) = dst_unchanging_p;
5973 MEM_IN_STRUCT_P (mem) = dst_in_struct_p;
5974 MEM_SCALAR_P (mem) = dst_scalar_p;
5975 emit_move_insn (mem, gen_rtx_REG (SImode, 0));
5976 if (last_bytes != 0)
5977 emit_insn (gen_addsi3 (dst, dst, GEN_INT (4)));
5981 in_words_to_go -= in_words_to_go < 4 ? in_words_to_go : 4;
5982 out_words_to_go -= out_words_to_go < 4 ? out_words_to_go : 4;
5985 /* OUT_WORDS_TO_GO will be zero here if there are byte stores to do. */
5986 if (out_words_to_go)
5988 rtx sreg;
5990 mem = gen_rtx_MEM (SImode, src);
5991 RTX_UNCHANGING_P (mem) = src_unchanging_p;
5992 MEM_IN_STRUCT_P (mem) = src_in_struct_p;
5993 MEM_SCALAR_P (mem) = src_scalar_p;
5994 emit_move_insn (sreg = gen_reg_rtx (SImode), mem);
5995 emit_move_insn (fin_src = gen_reg_rtx (SImode), plus_constant (src, 4));
5997 mem = gen_rtx_MEM (SImode, dst);
5998 RTX_UNCHANGING_P (mem) = dst_unchanging_p;
5999 MEM_IN_STRUCT_P (mem) = dst_in_struct_p;
6000 MEM_SCALAR_P (mem) = dst_scalar_p;
6001 emit_move_insn (mem, sreg);
6002 emit_move_insn (fin_dst = gen_reg_rtx (SImode), plus_constant (dst, 4));
6003 in_words_to_go--;
6005 if (in_words_to_go) /* Sanity check */
6006 abort ();
6009 if (in_words_to_go)
6011 if (in_words_to_go < 0)
6012 abort ();
6014 mem = gen_rtx_MEM (SImode, src);
6015 RTX_UNCHANGING_P (mem) = src_unchanging_p;
6016 MEM_IN_STRUCT_P (mem) = src_in_struct_p;
6017 MEM_SCALAR_P (mem) = src_scalar_p;
6018 part_bytes_reg = copy_to_mode_reg (SImode, mem);
6021 if (last_bytes && part_bytes_reg == NULL)
6022 abort ();
6024 if (BYTES_BIG_ENDIAN && last_bytes)
6026 rtx tmp = gen_reg_rtx (SImode);
6028 /* The bytes we want are in the top end of the word. */
6029 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg,
6030 GEN_INT (8 * (4 - last_bytes))));
6031 part_bytes_reg = tmp;
6033 while (last_bytes)
6035 mem = gen_rtx_MEM (QImode, plus_constant (dst, last_bytes - 1));
6036 RTX_UNCHANGING_P (mem) = dst_unchanging_p;
6037 MEM_IN_STRUCT_P (mem) = dst_in_struct_p;
6038 MEM_SCALAR_P (mem) = dst_scalar_p;
6039 emit_move_insn (mem, gen_lowpart (QImode, part_bytes_reg));
6041 if (--last_bytes)
6043 tmp = gen_reg_rtx (SImode);
6044 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (8)));
6045 part_bytes_reg = tmp;
6050 else
6052 if (last_bytes > 1)
6054 mem = gen_rtx_MEM (HImode, dst);
6055 RTX_UNCHANGING_P (mem) = dst_unchanging_p;
6056 MEM_IN_STRUCT_P (mem) = dst_in_struct_p;
6057 MEM_SCALAR_P (mem) = dst_scalar_p;
6058 emit_move_insn (mem, gen_lowpart (HImode, part_bytes_reg));
6059 last_bytes -= 2;
6060 if (last_bytes)
6062 rtx tmp = gen_reg_rtx (SImode);
6064 emit_insn (gen_addsi3 (dst, dst, const2_rtx));
6065 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (16)));
6066 part_bytes_reg = tmp;
6070 if (last_bytes)
6072 mem = gen_rtx_MEM (QImode, dst);
6073 RTX_UNCHANGING_P (mem) = dst_unchanging_p;
6074 MEM_IN_STRUCT_P (mem) = dst_in_struct_p;
6075 MEM_SCALAR_P (mem) = dst_scalar_p;
6076 emit_move_insn (mem, gen_lowpart (QImode, part_bytes_reg));
6080 return 1;
6083 /* Generate a memory reference for a half word, such that it will be loaded
6084 into the top 16 bits of the word. We can assume that the address is
6085 known to be alignable and of the form reg, or plus (reg, const). */
6088 arm_gen_rotated_half_load (rtx memref)
6090 HOST_WIDE_INT offset = 0;
6091 rtx base = XEXP (memref, 0);
6093 if (GET_CODE (base) == PLUS)
6095 offset = INTVAL (XEXP (base, 1));
6096 base = XEXP (base, 0);
6099 /* If we aren't allowed to generate unaligned addresses, then fail. */
6100 if (TARGET_MMU_TRAPS
6101 && ((BYTES_BIG_ENDIAN ? 1 : 0) ^ ((offset & 2) == 0)))
6102 return NULL;
6104 base = gen_rtx_MEM (SImode, plus_constant (base, offset & ~2));
6106 if ((BYTES_BIG_ENDIAN ? 1 : 0) ^ ((offset & 2) == 2))
6107 return base;
6109 return gen_rtx_ROTATE (SImode, base, GEN_INT (16));
6112 /* Select a dominance comparison mode if possible for a test of the general
6113 form (OP (COND_OR (X) (Y)) (const_int 0)). We support three forms.
6114 COND_OR == DOM_CC_X_AND_Y => (X && Y)
6115 COND_OR == DOM_CC_NX_OR_Y => ((! X) || Y)
6116 COND_OR == DOM_CC_X_OR_Y => (X || Y)
6117 In all cases OP will be either EQ or NE, but we don't need to know which
6118 here. If we are unable to support a dominance comparison we return
6119 CC mode. This will then fail to match for the RTL expressions that
6120 generate this call. */
6121 enum machine_mode
6122 arm_select_dominance_cc_mode (rtx x, rtx y, HOST_WIDE_INT cond_or)
6124 enum rtx_code cond1, cond2;
6125 int swapped = 0;
6127 /* Currently we will probably get the wrong result if the individual
6128 comparisons are not simple. This also ensures that it is safe to
6129 reverse a comparison if necessary. */
6130 if ((arm_select_cc_mode (cond1 = GET_CODE (x), XEXP (x, 0), XEXP (x, 1))
6131 != CCmode)
6132 || (arm_select_cc_mode (cond2 = GET_CODE (y), XEXP (y, 0), XEXP (y, 1))
6133 != CCmode))
6134 return CCmode;
6136 /* The if_then_else variant of this tests the second condition if the
6137 first passes, but is true if the first fails. Reverse the first
6138 condition to get a true "inclusive-or" expression. */
6139 if (cond_or == DOM_CC_NX_OR_Y)
6140 cond1 = reverse_condition (cond1);
6142 /* If the comparisons are not equal, and one doesn't dominate the other,
6143 then we can't do this. */
6144 if (cond1 != cond2
6145 && !comparison_dominates_p (cond1, cond2)
6146 && (swapped = 1, !comparison_dominates_p (cond2, cond1)))
6147 return CCmode;
6149 if (swapped)
6151 enum rtx_code temp = cond1;
6152 cond1 = cond2;
6153 cond2 = temp;
6156 switch (cond1)
6158 case EQ:
6159 if (cond2 == EQ || cond_or == DOM_CC_X_AND_Y)
6160 return CC_DEQmode;
6162 switch (cond2)
6164 case LE: return CC_DLEmode;
6165 case LEU: return CC_DLEUmode;
6166 case GE: return CC_DGEmode;
6167 case GEU: return CC_DGEUmode;
6168 default: break;
6171 break;
6173 case LT:
6174 if (cond2 == LT || cond_or == DOM_CC_X_AND_Y)
6175 return CC_DLTmode;
6176 if (cond2 == LE)
6177 return CC_DLEmode;
6178 if (cond2 == NE)
6179 return CC_DNEmode;
6180 break;
6182 case GT:
6183 if (cond2 == GT || cond_or == DOM_CC_X_AND_Y)
6184 return CC_DGTmode;
6185 if (cond2 == GE)
6186 return CC_DGEmode;
6187 if (cond2 == NE)
6188 return CC_DNEmode;
6189 break;
6191 case LTU:
6192 if (cond2 == LTU || cond_or == DOM_CC_X_AND_Y)
6193 return CC_DLTUmode;
6194 if (cond2 == LEU)
6195 return CC_DLEUmode;
6196 if (cond2 == NE)
6197 return CC_DNEmode;
6198 break;
6200 case GTU:
6201 if (cond2 == GTU || cond_or == DOM_CC_X_AND_Y)
6202 return CC_DGTUmode;
6203 if (cond2 == GEU)
6204 return CC_DGEUmode;
6205 if (cond2 == NE)
6206 return CC_DNEmode;
6207 break;
6209 /* The remaining cases only occur when both comparisons are the
6210 same. */
6211 case NE:
6212 return CC_DNEmode;
6214 case LE:
6215 return CC_DLEmode;
6217 case GE:
6218 return CC_DGEmode;
6220 case LEU:
6221 return CC_DLEUmode;
6223 case GEU:
6224 return CC_DGEUmode;
6226 default:
6227 break;
6230 abort ();
6233 enum machine_mode
6234 arm_select_cc_mode (enum rtx_code op, rtx x, rtx y)
6236 /* All floating point compares return CCFP if it is an equality
6237 comparison, and CCFPE otherwise. */
6238 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
6240 switch (op)
6242 case EQ:
6243 case NE:
6244 case UNORDERED:
6245 case ORDERED:
6246 case UNLT:
6247 case UNLE:
6248 case UNGT:
6249 case UNGE:
6250 case UNEQ:
6251 case LTGT:
6252 return CCFPmode;
6254 case LT:
6255 case LE:
6256 case GT:
6257 case GE:
6258 if (TARGET_HARD_FLOAT && TARGET_MAVERICK)
6259 return CCFPmode;
6260 return CCFPEmode;
6262 default:
6263 abort ();
6267 /* A compare with a shifted operand. Because of canonicalization, the
6268 comparison will have to be swapped when we emit the assembler. */
6269 if (GET_MODE (y) == SImode && GET_CODE (y) == REG
6270 && (GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
6271 || GET_CODE (x) == LSHIFTRT || GET_CODE (x) == ROTATE
6272 || GET_CODE (x) == ROTATERT))
6273 return CC_SWPmode;
6275 /* This is a special case that is used by combine to allow a
6276 comparison of a shifted byte load to be split into a zero-extend
6277 followed by a comparison of the shifted integer (only valid for
6278 equalities and unsigned inequalities). */
6279 if (GET_MODE (x) == SImode
6280 && GET_CODE (x) == ASHIFT
6281 && GET_CODE (XEXP (x, 1)) == CONST_INT && INTVAL (XEXP (x, 1)) == 24
6282 && GET_CODE (XEXP (x, 0)) == SUBREG
6283 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == MEM
6284 && GET_MODE (SUBREG_REG (XEXP (x, 0))) == QImode
6285 && (op == EQ || op == NE
6286 || op == GEU || op == GTU || op == LTU || op == LEU)
6287 && GET_CODE (y) == CONST_INT)
6288 return CC_Zmode;
6290 /* A construct for a conditional compare, if the false arm contains
6291 0, then both conditions must be true, otherwise either condition
6292 must be true. Not all conditions are possible, so CCmode is
6293 returned if it can't be done. */
6294 if (GET_CODE (x) == IF_THEN_ELSE
6295 && (XEXP (x, 2) == const0_rtx
6296 || XEXP (x, 2) == const1_rtx)
6297 && COMPARISON_P (XEXP (x, 0))
6298 && COMPARISON_P (XEXP (x, 1)))
6299 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
6300 INTVAL (XEXP (x, 2)));
6302 /* Alternate canonicalizations of the above. These are somewhat cleaner. */
6303 if (GET_CODE (x) == AND
6304 && COMPARISON_P (XEXP (x, 0))
6305 && COMPARISON_P (XEXP (x, 1)))
6306 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
6307 DOM_CC_X_AND_Y);
6309 if (GET_CODE (x) == IOR
6310 && COMPARISON_P (XEXP (x, 0))
6311 && COMPARISON_P (XEXP (x, 1)))
6312 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
6313 DOM_CC_X_OR_Y);
6315 /* An operation (on Thumb) where we want to test for a single bit.
6316 This is done by shifting that bit up into the top bit of a
6317 scratch register; we can then branch on the sign bit. */
6318 if (TARGET_THUMB
6319 && GET_MODE (x) == SImode
6320 && (op == EQ || op == NE)
6321 && (GET_CODE (x) == ZERO_EXTRACT))
6322 return CC_Nmode;
6324 /* An operation that sets the condition codes as a side-effect, the
6325 V flag is not set correctly, so we can only use comparisons where
6326 this doesn't matter. (For LT and GE we can use "mi" and "pl"
6327 instead.) */
6328 if (GET_MODE (x) == SImode
6329 && y == const0_rtx
6330 && (op == EQ || op == NE || op == LT || op == GE)
6331 && (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
6332 || GET_CODE (x) == AND || GET_CODE (x) == IOR
6333 || GET_CODE (x) == XOR || GET_CODE (x) == MULT
6334 || GET_CODE (x) == NOT || GET_CODE (x) == NEG
6335 || GET_CODE (x) == LSHIFTRT
6336 || GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
6337 || GET_CODE (x) == ROTATERT
6338 || (TARGET_ARM && GET_CODE (x) == ZERO_EXTRACT)))
6339 return CC_NOOVmode;
6341 if (GET_MODE (x) == QImode && (op == EQ || op == NE))
6342 return CC_Zmode;
6344 if (GET_MODE (x) == SImode && (op == LTU || op == GEU)
6345 && GET_CODE (x) == PLUS
6346 && (rtx_equal_p (XEXP (x, 0), y) || rtx_equal_p (XEXP (x, 1), y)))
6347 return CC_Cmode;
6349 return CCmode;
6352 /* X and Y are two things to compare using CODE. Emit the compare insn and
6353 return the rtx for register 0 in the proper mode. FP means this is a
6354 floating point compare: I don't think that it is needed on the arm. */
6356 arm_gen_compare_reg (enum rtx_code code, rtx x, rtx y)
6358 enum machine_mode mode = SELECT_CC_MODE (code, x, y);
6359 rtx cc_reg = gen_rtx_REG (mode, CC_REGNUM);
6361 emit_insn (gen_rtx_SET (VOIDmode, cc_reg,
6362 gen_rtx_COMPARE (mode, x, y)));
6364 return cc_reg;
6367 /* Generate a sequence of insns that will generate the correct return
6368 address mask depending on the physical architecture that the program
6369 is running on. */
6371 arm_gen_return_addr_mask (void)
6373 rtx reg = gen_reg_rtx (Pmode);
6375 emit_insn (gen_return_addr_mask (reg));
6376 return reg;
6379 void
6380 arm_reload_in_hi (rtx *operands)
6382 rtx ref = operands[1];
6383 rtx base, scratch;
6384 HOST_WIDE_INT offset = 0;
6386 if (GET_CODE (ref) == SUBREG)
6388 offset = SUBREG_BYTE (ref);
6389 ref = SUBREG_REG (ref);
6392 if (GET_CODE (ref) == REG)
6394 /* We have a pseudo which has been spilt onto the stack; there
6395 are two cases here: the first where there is a simple
6396 stack-slot replacement and a second where the stack-slot is
6397 out of range, or is used as a subreg. */
6398 if (reg_equiv_mem[REGNO (ref)])
6400 ref = reg_equiv_mem[REGNO (ref)];
6401 base = find_replacement (&XEXP (ref, 0));
6403 else
6404 /* The slot is out of range, or was dressed up in a SUBREG. */
6405 base = reg_equiv_address[REGNO (ref)];
6407 else
6408 base = find_replacement (&XEXP (ref, 0));
6410 /* Handle the case where the address is too complex to be offset by 1. */
6411 if (GET_CODE (base) == MINUS
6412 || (GET_CODE (base) == PLUS && GET_CODE (XEXP (base, 1)) != CONST_INT))
6414 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6416 emit_insn (gen_rtx_SET (VOIDmode, base_plus, base));
6417 base = base_plus;
6419 else if (GET_CODE (base) == PLUS)
6421 /* The addend must be CONST_INT, or we would have dealt with it above. */
6422 HOST_WIDE_INT hi, lo;
6424 offset += INTVAL (XEXP (base, 1));
6425 base = XEXP (base, 0);
6427 /* Rework the address into a legal sequence of insns. */
6428 /* Valid range for lo is -4095 -> 4095 */
6429 lo = (offset >= 0
6430 ? (offset & 0xfff)
6431 : -((-offset) & 0xfff));
6433 /* Corner case, if lo is the max offset then we would be out of range
6434 once we have added the additional 1 below, so bump the msb into the
6435 pre-loading insn(s). */
6436 if (lo == 4095)
6437 lo &= 0x7ff;
6439 hi = ((((offset - lo) & (HOST_WIDE_INT) 0xffffffff)
6440 ^ (HOST_WIDE_INT) 0x80000000)
6441 - (HOST_WIDE_INT) 0x80000000);
6443 if (hi + lo != offset)
6444 abort ();
6446 if (hi != 0)
6448 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6450 /* Get the base address; addsi3 knows how to handle constants
6451 that require more than one insn. */
6452 emit_insn (gen_addsi3 (base_plus, base, GEN_INT (hi)));
6453 base = base_plus;
6454 offset = lo;
6458 /* Operands[2] may overlap operands[0] (though it won't overlap
6459 operands[1]), that's why we asked for a DImode reg -- so we can
6460 use the bit that does not overlap. */
6461 if (REGNO (operands[2]) == REGNO (operands[0]))
6462 scratch = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6463 else
6464 scratch = gen_rtx_REG (SImode, REGNO (operands[2]));
6466 emit_insn (gen_zero_extendqisi2 (scratch,
6467 gen_rtx_MEM (QImode,
6468 plus_constant (base,
6469 offset))));
6470 emit_insn (gen_zero_extendqisi2 (gen_rtx_SUBREG (SImode, operands[0], 0),
6471 gen_rtx_MEM (QImode,
6472 plus_constant (base,
6473 offset + 1))));
6474 if (!BYTES_BIG_ENDIAN)
6475 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_SUBREG (SImode, operands[0], 0),
6476 gen_rtx_IOR (SImode,
6477 gen_rtx_ASHIFT
6478 (SImode,
6479 gen_rtx_SUBREG (SImode, operands[0], 0),
6480 GEN_INT (8)),
6481 scratch)));
6482 else
6483 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_SUBREG (SImode, operands[0], 0),
6484 gen_rtx_IOR (SImode,
6485 gen_rtx_ASHIFT (SImode, scratch,
6486 GEN_INT (8)),
6487 gen_rtx_SUBREG (SImode, operands[0],
6488 0))));
6491 /* Handle storing a half-word to memory during reload by synthesizing as two
6492 byte stores. Take care not to clobber the input values until after we
6493 have moved them somewhere safe. This code assumes that if the DImode
6494 scratch in operands[2] overlaps either the input value or output address
6495 in some way, then that value must die in this insn (we absolutely need
6496 two scratch registers for some corner cases). */
6497 void
6498 arm_reload_out_hi (rtx *operands)
6500 rtx ref = operands[0];
6501 rtx outval = operands[1];
6502 rtx base, scratch;
6503 HOST_WIDE_INT offset = 0;
6505 if (GET_CODE (ref) == SUBREG)
6507 offset = SUBREG_BYTE (ref);
6508 ref = SUBREG_REG (ref);
6511 if (GET_CODE (ref) == REG)
6513 /* We have a pseudo which has been spilt onto the stack; there
6514 are two cases here: the first where there is a simple
6515 stack-slot replacement and a second where the stack-slot is
6516 out of range, or is used as a subreg. */
6517 if (reg_equiv_mem[REGNO (ref)])
6519 ref = reg_equiv_mem[REGNO (ref)];
6520 base = find_replacement (&XEXP (ref, 0));
6522 else
6523 /* The slot is out of range, or was dressed up in a SUBREG. */
6524 base = reg_equiv_address[REGNO (ref)];
6526 else
6527 base = find_replacement (&XEXP (ref, 0));
6529 scratch = gen_rtx_REG (SImode, REGNO (operands[2]));
6531 /* Handle the case where the address is too complex to be offset by 1. */
6532 if (GET_CODE (base) == MINUS
6533 || (GET_CODE (base) == PLUS && GET_CODE (XEXP (base, 1)) != CONST_INT))
6535 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6537 /* Be careful not to destroy OUTVAL. */
6538 if (reg_overlap_mentioned_p (base_plus, outval))
6540 /* Updating base_plus might destroy outval, see if we can
6541 swap the scratch and base_plus. */
6542 if (!reg_overlap_mentioned_p (scratch, outval))
6544 rtx tmp = scratch;
6545 scratch = base_plus;
6546 base_plus = tmp;
6548 else
6550 rtx scratch_hi = gen_rtx_REG (HImode, REGNO (operands[2]));
6552 /* Be conservative and copy OUTVAL into the scratch now,
6553 this should only be necessary if outval is a subreg
6554 of something larger than a word. */
6555 /* XXX Might this clobber base? I can't see how it can,
6556 since scratch is known to overlap with OUTVAL, and
6557 must be wider than a word. */
6558 emit_insn (gen_movhi (scratch_hi, outval));
6559 outval = scratch_hi;
6563 emit_insn (gen_rtx_SET (VOIDmode, base_plus, base));
6564 base = base_plus;
6566 else if (GET_CODE (base) == PLUS)
6568 /* The addend must be CONST_INT, or we would have dealt with it above. */
6569 HOST_WIDE_INT hi, lo;
6571 offset += INTVAL (XEXP (base, 1));
6572 base = XEXP (base, 0);
6574 /* Rework the address into a legal sequence of insns. */
6575 /* Valid range for lo is -4095 -> 4095 */
6576 lo = (offset >= 0
6577 ? (offset & 0xfff)
6578 : -((-offset) & 0xfff));
6580 /* Corner case, if lo is the max offset then we would be out of range
6581 once we have added the additional 1 below, so bump the msb into the
6582 pre-loading insn(s). */
6583 if (lo == 4095)
6584 lo &= 0x7ff;
6586 hi = ((((offset - lo) & (HOST_WIDE_INT) 0xffffffff)
6587 ^ (HOST_WIDE_INT) 0x80000000)
6588 - (HOST_WIDE_INT) 0x80000000);
6590 if (hi + lo != offset)
6591 abort ();
6593 if (hi != 0)
6595 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6597 /* Be careful not to destroy OUTVAL. */
6598 if (reg_overlap_mentioned_p (base_plus, outval))
6600 /* Updating base_plus might destroy outval, see if we
6601 can swap the scratch and base_plus. */
6602 if (!reg_overlap_mentioned_p (scratch, outval))
6604 rtx tmp = scratch;
6605 scratch = base_plus;
6606 base_plus = tmp;
6608 else
6610 rtx scratch_hi = gen_rtx_REG (HImode, REGNO (operands[2]));
6612 /* Be conservative and copy outval into scratch now,
6613 this should only be necessary if outval is a
6614 subreg of something larger than a word. */
6615 /* XXX Might this clobber base? I can't see how it
6616 can, since scratch is known to overlap with
6617 outval. */
6618 emit_insn (gen_movhi (scratch_hi, outval));
6619 outval = scratch_hi;
6623 /* Get the base address; addsi3 knows how to handle constants
6624 that require more than one insn. */
6625 emit_insn (gen_addsi3 (base_plus, base, GEN_INT (hi)));
6626 base = base_plus;
6627 offset = lo;
6631 if (BYTES_BIG_ENDIAN)
6633 emit_insn (gen_movqi (gen_rtx_MEM (QImode,
6634 plus_constant (base, offset + 1)),
6635 gen_lowpart (QImode, outval)));
6636 emit_insn (gen_lshrsi3 (scratch,
6637 gen_rtx_SUBREG (SImode, outval, 0),
6638 GEN_INT (8)));
6639 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (base, offset)),
6640 gen_lowpart (QImode, scratch)));
6642 else
6644 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (base, offset)),
6645 gen_lowpart (QImode, outval)));
6646 emit_insn (gen_lshrsi3 (scratch,
6647 gen_rtx_SUBREG (SImode, outval, 0),
6648 GEN_INT (8)));
6649 emit_insn (gen_movqi (gen_rtx_MEM (QImode,
6650 plus_constant (base, offset + 1)),
6651 gen_lowpart (QImode, scratch)));
6655 /* Print a symbolic form of X to the debug file, F. */
6656 static void
6657 arm_print_value (FILE *f, rtx x)
6659 switch (GET_CODE (x))
6661 case CONST_INT:
6662 fprintf (f, HOST_WIDE_INT_PRINT_HEX, INTVAL (x));
6663 return;
6665 case CONST_DOUBLE:
6666 fprintf (f, "<0x%lx,0x%lx>", (long)XWINT (x, 2), (long)XWINT (x, 3));
6667 return;
6669 case CONST_VECTOR:
6671 int i;
6673 fprintf (f, "<");
6674 for (i = 0; i < CONST_VECTOR_NUNITS (x); i++)
6676 fprintf (f, HOST_WIDE_INT_PRINT_HEX, INTVAL (CONST_VECTOR_ELT (x, i)));
6677 if (i < (CONST_VECTOR_NUNITS (x) - 1))
6678 fputc (',', f);
6680 fprintf (f, ">");
6682 return;
6684 case CONST_STRING:
6685 fprintf (f, "\"%s\"", XSTR (x, 0));
6686 return;
6688 case SYMBOL_REF:
6689 fprintf (f, "`%s'", XSTR (x, 0));
6690 return;
6692 case LABEL_REF:
6693 fprintf (f, "L%d", INSN_UID (XEXP (x, 0)));
6694 return;
6696 case CONST:
6697 arm_print_value (f, XEXP (x, 0));
6698 return;
6700 case PLUS:
6701 arm_print_value (f, XEXP (x, 0));
6702 fprintf (f, "+");
6703 arm_print_value (f, XEXP (x, 1));
6704 return;
6706 case PC:
6707 fprintf (f, "pc");
6708 return;
6710 default:
6711 fprintf (f, "????");
6712 return;
6716 /* Routines for manipulation of the constant pool. */
6718 /* Arm instructions cannot load a large constant directly into a
6719 register; they have to come from a pc relative load. The constant
6720 must therefore be placed in the addressable range of the pc
6721 relative load. Depending on the precise pc relative load
6722 instruction the range is somewhere between 256 bytes and 4k. This
6723 means that we often have to dump a constant inside a function, and
6724 generate code to branch around it.
6726 It is important to minimize this, since the branches will slow
6727 things down and make the code larger.
6729 Normally we can hide the table after an existing unconditional
6730 branch so that there is no interruption of the flow, but in the
6731 worst case the code looks like this:
6733 ldr rn, L1
6735 b L2
6736 align
6737 L1: .long value
6741 ldr rn, L3
6743 b L4
6744 align
6745 L3: .long value
6749 We fix this by performing a scan after scheduling, which notices
6750 which instructions need to have their operands fetched from the
6751 constant table and builds the table.
6753 The algorithm starts by building a table of all the constants that
6754 need fixing up and all the natural barriers in the function (places
6755 where a constant table can be dropped without breaking the flow).
6756 For each fixup we note how far the pc-relative replacement will be
6757 able to reach and the offset of the instruction into the function.
6759 Having built the table we then group the fixes together to form
6760 tables that are as large as possible (subject to addressing
6761 constraints) and emit each table of constants after the last
6762 barrier that is within range of all the instructions in the group.
6763 If a group does not contain a barrier, then we forcibly create one
6764 by inserting a jump instruction into the flow. Once the table has
6765 been inserted, the insns are then modified to reference the
6766 relevant entry in the pool.
6768 Possible enhancements to the algorithm (not implemented) are:
6770 1) For some processors and object formats, there may be benefit in
6771 aligning the pools to the start of cache lines; this alignment
6772 would need to be taken into account when calculating addressability
6773 of a pool. */
6775 /* These typedefs are located at the start of this file, so that
6776 they can be used in the prototypes there. This comment is to
6777 remind readers of that fact so that the following structures
6778 can be understood more easily.
6780 typedef struct minipool_node Mnode;
6781 typedef struct minipool_fixup Mfix; */
6783 struct minipool_node
6785 /* Doubly linked chain of entries. */
6786 Mnode * next;
6787 Mnode * prev;
6788 /* The maximum offset into the code that this entry can be placed. While
6789 pushing fixes for forward references, all entries are sorted in order
6790 of increasing max_address. */
6791 HOST_WIDE_INT max_address;
6792 /* Similarly for an entry inserted for a backwards ref. */
6793 HOST_WIDE_INT min_address;
6794 /* The number of fixes referencing this entry. This can become zero
6795 if we "unpush" an entry. In this case we ignore the entry when we
6796 come to emit the code. */
6797 int refcount;
6798 /* The offset from the start of the minipool. */
6799 HOST_WIDE_INT offset;
6800 /* The value in table. */
6801 rtx value;
6802 /* The mode of value. */
6803 enum machine_mode mode;
6804 /* The size of the value. With iWMMXt enabled
6805 sizes > 4 also imply an alignment of 8-bytes. */
6806 int fix_size;
6809 struct minipool_fixup
6811 Mfix * next;
6812 rtx insn;
6813 HOST_WIDE_INT address;
6814 rtx * loc;
6815 enum machine_mode mode;
6816 int fix_size;
6817 rtx value;
6818 Mnode * minipool;
6819 HOST_WIDE_INT forwards;
6820 HOST_WIDE_INT backwards;
6823 /* Fixes less than a word need padding out to a word boundary. */
6824 #define MINIPOOL_FIX_SIZE(mode) \
6825 (GET_MODE_SIZE ((mode)) >= 4 ? GET_MODE_SIZE ((mode)) : 4)
6827 static Mnode * minipool_vector_head;
6828 static Mnode * minipool_vector_tail;
6829 static rtx minipool_vector_label;
6831 /* The linked list of all minipool fixes required for this function. */
6832 Mfix * minipool_fix_head;
6833 Mfix * minipool_fix_tail;
6834 /* The fix entry for the current minipool, once it has been placed. */
6835 Mfix * minipool_barrier;
6837 /* Determines if INSN is the start of a jump table. Returns the end
6838 of the TABLE or NULL_RTX. */
6839 static rtx
6840 is_jump_table (rtx insn)
6842 rtx table;
6844 if (GET_CODE (insn) == JUMP_INSN
6845 && JUMP_LABEL (insn) != NULL
6846 && ((table = next_real_insn (JUMP_LABEL (insn)))
6847 == next_real_insn (insn))
6848 && table != NULL
6849 && GET_CODE (table) == JUMP_INSN
6850 && (GET_CODE (PATTERN (table)) == ADDR_VEC
6851 || GET_CODE (PATTERN (table)) == ADDR_DIFF_VEC))
6852 return table;
6854 return NULL_RTX;
6857 #ifndef JUMP_TABLES_IN_TEXT_SECTION
6858 #define JUMP_TABLES_IN_TEXT_SECTION 0
6859 #endif
6861 static HOST_WIDE_INT
6862 get_jump_table_size (rtx insn)
6864 /* ADDR_VECs only take room if read-only data does into the text
6865 section. */
6866 if (JUMP_TABLES_IN_TEXT_SECTION
6867 #if !defined(READONLY_DATA_SECTION) && !defined(READONLY_DATA_SECTION_ASM_OP)
6868 || 1
6869 #endif
6872 rtx body = PATTERN (insn);
6873 int elt = GET_CODE (body) == ADDR_DIFF_VEC ? 1 : 0;
6875 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, elt);
6878 return 0;
6881 /* Move a minipool fix MP from its current location to before MAX_MP.
6882 If MAX_MP is NULL, then MP doesn't need moving, but the addressing
6883 constraints may need updating. */
6884 static Mnode *
6885 move_minipool_fix_forward_ref (Mnode *mp, Mnode *max_mp,
6886 HOST_WIDE_INT max_address)
6888 /* This should never be true and the code below assumes these are
6889 different. */
6890 if (mp == max_mp)
6891 abort ();
6893 if (max_mp == NULL)
6895 if (max_address < mp->max_address)
6896 mp->max_address = max_address;
6898 else
6900 if (max_address > max_mp->max_address - mp->fix_size)
6901 mp->max_address = max_mp->max_address - mp->fix_size;
6902 else
6903 mp->max_address = max_address;
6905 /* Unlink MP from its current position. Since max_mp is non-null,
6906 mp->prev must be non-null. */
6907 mp->prev->next = mp->next;
6908 if (mp->next != NULL)
6909 mp->next->prev = mp->prev;
6910 else
6911 minipool_vector_tail = mp->prev;
6913 /* Re-insert it before MAX_MP. */
6914 mp->next = max_mp;
6915 mp->prev = max_mp->prev;
6916 max_mp->prev = mp;
6918 if (mp->prev != NULL)
6919 mp->prev->next = mp;
6920 else
6921 minipool_vector_head = mp;
6924 /* Save the new entry. */
6925 max_mp = mp;
6927 /* Scan over the preceding entries and adjust their addresses as
6928 required. */
6929 while (mp->prev != NULL
6930 && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
6932 mp->prev->max_address = mp->max_address - mp->prev->fix_size;
6933 mp = mp->prev;
6936 return max_mp;
6939 /* Add a constant to the minipool for a forward reference. Returns the
6940 node added or NULL if the constant will not fit in this pool. */
6941 static Mnode *
6942 add_minipool_forward_ref (Mfix *fix)
6944 /* If set, max_mp is the first pool_entry that has a lower
6945 constraint than the one we are trying to add. */
6946 Mnode * max_mp = NULL;
6947 HOST_WIDE_INT max_address = fix->address + fix->forwards;
6948 Mnode * mp;
6950 /* If this fix's address is greater than the address of the first
6951 entry, then we can't put the fix in this pool. We subtract the
6952 size of the current fix to ensure that if the table is fully
6953 packed we still have enough room to insert this value by suffling
6954 the other fixes forwards. */
6955 if (minipool_vector_head &&
6956 fix->address >= minipool_vector_head->max_address - fix->fix_size)
6957 return NULL;
6959 /* Scan the pool to see if a constant with the same value has
6960 already been added. While we are doing this, also note the
6961 location where we must insert the constant if it doesn't already
6962 exist. */
6963 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
6965 if (GET_CODE (fix->value) == GET_CODE (mp->value)
6966 && fix->mode == mp->mode
6967 && (GET_CODE (fix->value) != CODE_LABEL
6968 || (CODE_LABEL_NUMBER (fix->value)
6969 == CODE_LABEL_NUMBER (mp->value)))
6970 && rtx_equal_p (fix->value, mp->value))
6972 /* More than one fix references this entry. */
6973 mp->refcount++;
6974 return move_minipool_fix_forward_ref (mp, max_mp, max_address);
6977 /* Note the insertion point if necessary. */
6978 if (max_mp == NULL
6979 && mp->max_address > max_address)
6980 max_mp = mp;
6982 /* If we are inserting an 8-bytes aligned quantity and
6983 we have not already found an insertion point, then
6984 make sure that all such 8-byte aligned quantities are
6985 placed at the start of the pool. */
6986 if (ARM_DOUBLEWORD_ALIGN
6987 && max_mp == NULL
6988 && fix->fix_size == 8
6989 && mp->fix_size != 8)
6991 max_mp = mp;
6992 max_address = mp->max_address;
6996 /* The value is not currently in the minipool, so we need to create
6997 a new entry for it. If MAX_MP is NULL, the entry will be put on
6998 the end of the list since the placement is less constrained than
6999 any existing entry. Otherwise, we insert the new fix before
7000 MAX_MP and, if necessary, adjust the constraints on the other
7001 entries. */
7002 mp = xmalloc (sizeof (* mp));
7003 mp->fix_size = fix->fix_size;
7004 mp->mode = fix->mode;
7005 mp->value = fix->value;
7006 mp->refcount = 1;
7007 /* Not yet required for a backwards ref. */
7008 mp->min_address = -65536;
7010 if (max_mp == NULL)
7012 mp->max_address = max_address;
7013 mp->next = NULL;
7014 mp->prev = minipool_vector_tail;
7016 if (mp->prev == NULL)
7018 minipool_vector_head = mp;
7019 minipool_vector_label = gen_label_rtx ();
7021 else
7022 mp->prev->next = mp;
7024 minipool_vector_tail = mp;
7026 else
7028 if (max_address > max_mp->max_address - mp->fix_size)
7029 mp->max_address = max_mp->max_address - mp->fix_size;
7030 else
7031 mp->max_address = max_address;
7033 mp->next = max_mp;
7034 mp->prev = max_mp->prev;
7035 max_mp->prev = mp;
7036 if (mp->prev != NULL)
7037 mp->prev->next = mp;
7038 else
7039 minipool_vector_head = mp;
7042 /* Save the new entry. */
7043 max_mp = mp;
7045 /* Scan over the preceding entries and adjust their addresses as
7046 required. */
7047 while (mp->prev != NULL
7048 && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
7050 mp->prev->max_address = mp->max_address - mp->prev->fix_size;
7051 mp = mp->prev;
7054 return max_mp;
7057 static Mnode *
7058 move_minipool_fix_backward_ref (Mnode *mp, Mnode *min_mp,
7059 HOST_WIDE_INT min_address)
7061 HOST_WIDE_INT offset;
7063 /* This should never be true, and the code below assumes these are
7064 different. */
7065 if (mp == min_mp)
7066 abort ();
7068 if (min_mp == NULL)
7070 if (min_address > mp->min_address)
7071 mp->min_address = min_address;
7073 else
7075 /* We will adjust this below if it is too loose. */
7076 mp->min_address = min_address;
7078 /* Unlink MP from its current position. Since min_mp is non-null,
7079 mp->next must be non-null. */
7080 mp->next->prev = mp->prev;
7081 if (mp->prev != NULL)
7082 mp->prev->next = mp->next;
7083 else
7084 minipool_vector_head = mp->next;
7086 /* Reinsert it after MIN_MP. */
7087 mp->prev = min_mp;
7088 mp->next = min_mp->next;
7089 min_mp->next = mp;
7090 if (mp->next != NULL)
7091 mp->next->prev = mp;
7092 else
7093 minipool_vector_tail = mp;
7096 min_mp = mp;
7098 offset = 0;
7099 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7101 mp->offset = offset;
7102 if (mp->refcount > 0)
7103 offset += mp->fix_size;
7105 if (mp->next && mp->next->min_address < mp->min_address + mp->fix_size)
7106 mp->next->min_address = mp->min_address + mp->fix_size;
7109 return min_mp;
7112 /* Add a constant to the minipool for a backward reference. Returns the
7113 node added or NULL if the constant will not fit in this pool.
7115 Note that the code for insertion for a backwards reference can be
7116 somewhat confusing because the calculated offsets for each fix do
7117 not take into account the size of the pool (which is still under
7118 construction. */
7119 static Mnode *
7120 add_minipool_backward_ref (Mfix *fix)
7122 /* If set, min_mp is the last pool_entry that has a lower constraint
7123 than the one we are trying to add. */
7124 Mnode *min_mp = NULL;
7125 /* This can be negative, since it is only a constraint. */
7126 HOST_WIDE_INT min_address = fix->address - fix->backwards;
7127 Mnode *mp;
7129 /* If we can't reach the current pool from this insn, or if we can't
7130 insert this entry at the end of the pool without pushing other
7131 fixes out of range, then we don't try. This ensures that we
7132 can't fail later on. */
7133 if (min_address >= minipool_barrier->address
7134 || (minipool_vector_tail->min_address + fix->fix_size
7135 >= minipool_barrier->address))
7136 return NULL;
7138 /* Scan the pool to see if a constant with the same value has
7139 already been added. While we are doing this, also note the
7140 location where we must insert the constant if it doesn't already
7141 exist. */
7142 for (mp = minipool_vector_tail; mp != NULL; mp = mp->prev)
7144 if (GET_CODE (fix->value) == GET_CODE (mp->value)
7145 && fix->mode == mp->mode
7146 && (GET_CODE (fix->value) != CODE_LABEL
7147 || (CODE_LABEL_NUMBER (fix->value)
7148 == CODE_LABEL_NUMBER (mp->value)))
7149 && rtx_equal_p (fix->value, mp->value)
7150 /* Check that there is enough slack to move this entry to the
7151 end of the table (this is conservative). */
7152 && (mp->max_address
7153 > (minipool_barrier->address
7154 + minipool_vector_tail->offset
7155 + minipool_vector_tail->fix_size)))
7157 mp->refcount++;
7158 return move_minipool_fix_backward_ref (mp, min_mp, min_address);
7161 if (min_mp != NULL)
7162 mp->min_address += fix->fix_size;
7163 else
7165 /* Note the insertion point if necessary. */
7166 if (mp->min_address < min_address)
7168 /* For now, we do not allow the insertion of 8-byte alignment
7169 requiring nodes anywhere but at the start of the pool. */
7170 if (ARM_DOUBLEWORD_ALIGN
7171 && fix->fix_size == 8 && mp->fix_size != 8)
7172 return NULL;
7173 else
7174 min_mp = mp;
7176 else if (mp->max_address
7177 < minipool_barrier->address + mp->offset + fix->fix_size)
7179 /* Inserting before this entry would push the fix beyond
7180 its maximum address (which can happen if we have
7181 re-located a forwards fix); force the new fix to come
7182 after it. */
7183 min_mp = mp;
7184 min_address = mp->min_address + fix->fix_size;
7186 /* If we are inserting an 8-bytes aligned quantity and
7187 we have not already found an insertion point, then
7188 make sure that all such 8-byte aligned quantities are
7189 placed at the start of the pool. */
7190 else if (ARM_DOUBLEWORD_ALIGN
7191 && min_mp == NULL
7192 && fix->fix_size == 8
7193 && mp->fix_size < 8)
7195 min_mp = mp;
7196 min_address = mp->min_address + fix->fix_size;
7201 /* We need to create a new entry. */
7202 mp = xmalloc (sizeof (* mp));
7203 mp->fix_size = fix->fix_size;
7204 mp->mode = fix->mode;
7205 mp->value = fix->value;
7206 mp->refcount = 1;
7207 mp->max_address = minipool_barrier->address + 65536;
7209 mp->min_address = min_address;
7211 if (min_mp == NULL)
7213 mp->prev = NULL;
7214 mp->next = minipool_vector_head;
7216 if (mp->next == NULL)
7218 minipool_vector_tail = mp;
7219 minipool_vector_label = gen_label_rtx ();
7221 else
7222 mp->next->prev = mp;
7224 minipool_vector_head = mp;
7226 else
7228 mp->next = min_mp->next;
7229 mp->prev = min_mp;
7230 min_mp->next = mp;
7232 if (mp->next != NULL)
7233 mp->next->prev = mp;
7234 else
7235 minipool_vector_tail = mp;
7238 /* Save the new entry. */
7239 min_mp = mp;
7241 if (mp->prev)
7242 mp = mp->prev;
7243 else
7244 mp->offset = 0;
7246 /* Scan over the following entries and adjust their offsets. */
7247 while (mp->next != NULL)
7249 if (mp->next->min_address < mp->min_address + mp->fix_size)
7250 mp->next->min_address = mp->min_address + mp->fix_size;
7252 if (mp->refcount)
7253 mp->next->offset = mp->offset + mp->fix_size;
7254 else
7255 mp->next->offset = mp->offset;
7257 mp = mp->next;
7260 return min_mp;
7263 static void
7264 assign_minipool_offsets (Mfix *barrier)
7266 HOST_WIDE_INT offset = 0;
7267 Mnode *mp;
7269 minipool_barrier = barrier;
7271 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7273 mp->offset = offset;
7275 if (mp->refcount > 0)
7276 offset += mp->fix_size;
7280 /* Output the literal table */
7281 static void
7282 dump_minipool (rtx scan)
7284 Mnode * mp;
7285 Mnode * nmp;
7286 int align64 = 0;
7288 if (ARM_DOUBLEWORD_ALIGN)
7289 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7290 if (mp->refcount > 0 && mp->fix_size == 8)
7292 align64 = 1;
7293 break;
7296 if (dump_file)
7297 fprintf (dump_file,
7298 ";; Emitting minipool after insn %u; address %ld; align %d (bytes)\n",
7299 INSN_UID (scan), (unsigned long) minipool_barrier->address, align64 ? 8 : 4);
7301 scan = emit_label_after (gen_label_rtx (), scan);
7302 scan = emit_insn_after (align64 ? gen_align_8 () : gen_align_4 (), scan);
7303 scan = emit_label_after (minipool_vector_label, scan);
7305 for (mp = minipool_vector_head; mp != NULL; mp = nmp)
7307 if (mp->refcount > 0)
7309 if (dump_file)
7311 fprintf (dump_file,
7312 ";; Offset %u, min %ld, max %ld ",
7313 (unsigned) mp->offset, (unsigned long) mp->min_address,
7314 (unsigned long) mp->max_address);
7315 arm_print_value (dump_file, mp->value);
7316 fputc ('\n', dump_file);
7319 switch (mp->fix_size)
7321 #ifdef HAVE_consttable_1
7322 case 1:
7323 scan = emit_insn_after (gen_consttable_1 (mp->value), scan);
7324 break;
7326 #endif
7327 #ifdef HAVE_consttable_2
7328 case 2:
7329 scan = emit_insn_after (gen_consttable_2 (mp->value), scan);
7330 break;
7332 #endif
7333 #ifdef HAVE_consttable_4
7334 case 4:
7335 scan = emit_insn_after (gen_consttable_4 (mp->value), scan);
7336 break;
7338 #endif
7339 #ifdef HAVE_consttable_8
7340 case 8:
7341 scan = emit_insn_after (gen_consttable_8 (mp->value), scan);
7342 break;
7344 #endif
7345 default:
7346 abort ();
7347 break;
7351 nmp = mp->next;
7352 free (mp);
7355 minipool_vector_head = minipool_vector_tail = NULL;
7356 scan = emit_insn_after (gen_consttable_end (), scan);
7357 scan = emit_barrier_after (scan);
7360 /* Return the cost of forcibly inserting a barrier after INSN. */
7361 static int
7362 arm_barrier_cost (rtx insn)
7364 /* Basing the location of the pool on the loop depth is preferable,
7365 but at the moment, the basic block information seems to be
7366 corrupt by this stage of the compilation. */
7367 int base_cost = 50;
7368 rtx next = next_nonnote_insn (insn);
7370 if (next != NULL && GET_CODE (next) == CODE_LABEL)
7371 base_cost -= 20;
7373 switch (GET_CODE (insn))
7375 case CODE_LABEL:
7376 /* It will always be better to place the table before the label, rather
7377 than after it. */
7378 return 50;
7380 case INSN:
7381 case CALL_INSN:
7382 return base_cost;
7384 case JUMP_INSN:
7385 return base_cost - 10;
7387 default:
7388 return base_cost + 10;
7392 /* Find the best place in the insn stream in the range
7393 (FIX->address,MAX_ADDRESS) to forcibly insert a minipool barrier.
7394 Create the barrier by inserting a jump and add a new fix entry for
7395 it. */
7396 static Mfix *
7397 create_fix_barrier (Mfix *fix, HOST_WIDE_INT max_address)
7399 HOST_WIDE_INT count = 0;
7400 rtx barrier;
7401 rtx from = fix->insn;
7402 rtx selected = from;
7403 int selected_cost;
7404 HOST_WIDE_INT selected_address;
7405 Mfix * new_fix;
7406 HOST_WIDE_INT max_count = max_address - fix->address;
7407 rtx label = gen_label_rtx ();
7409 selected_cost = arm_barrier_cost (from);
7410 selected_address = fix->address;
7412 while (from && count < max_count)
7414 rtx tmp;
7415 int new_cost;
7417 /* This code shouldn't have been called if there was a natural barrier
7418 within range. */
7419 if (GET_CODE (from) == BARRIER)
7420 abort ();
7422 /* Count the length of this insn. */
7423 count += get_attr_length (from);
7425 /* If there is a jump table, add its length. */
7426 tmp = is_jump_table (from);
7427 if (tmp != NULL)
7429 count += get_jump_table_size (tmp);
7431 /* Jump tables aren't in a basic block, so base the cost on
7432 the dispatch insn. If we select this location, we will
7433 still put the pool after the table. */
7434 new_cost = arm_barrier_cost (from);
7436 if (count < max_count && new_cost <= selected_cost)
7438 selected = tmp;
7439 selected_cost = new_cost;
7440 selected_address = fix->address + count;
7443 /* Continue after the dispatch table. */
7444 from = NEXT_INSN (tmp);
7445 continue;
7448 new_cost = arm_barrier_cost (from);
7450 if (count < max_count && new_cost <= selected_cost)
7452 selected = from;
7453 selected_cost = new_cost;
7454 selected_address = fix->address + count;
7457 from = NEXT_INSN (from);
7460 /* Create a new JUMP_INSN that branches around a barrier. */
7461 from = emit_jump_insn_after (gen_jump (label), selected);
7462 JUMP_LABEL (from) = label;
7463 barrier = emit_barrier_after (from);
7464 emit_label_after (label, barrier);
7466 /* Create a minipool barrier entry for the new barrier. */
7467 new_fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* new_fix));
7468 new_fix->insn = barrier;
7469 new_fix->address = selected_address;
7470 new_fix->next = fix->next;
7471 fix->next = new_fix;
7473 return new_fix;
7476 /* Record that there is a natural barrier in the insn stream at
7477 ADDRESS. */
7478 static void
7479 push_minipool_barrier (rtx insn, HOST_WIDE_INT address)
7481 Mfix * fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* fix));
7483 fix->insn = insn;
7484 fix->address = address;
7486 fix->next = NULL;
7487 if (minipool_fix_head != NULL)
7488 minipool_fix_tail->next = fix;
7489 else
7490 minipool_fix_head = fix;
7492 minipool_fix_tail = fix;
7495 /* Record INSN, which will need fixing up to load a value from the
7496 minipool. ADDRESS is the offset of the insn since the start of the
7497 function; LOC is a pointer to the part of the insn which requires
7498 fixing; VALUE is the constant that must be loaded, which is of type
7499 MODE. */
7500 static void
7501 push_minipool_fix (rtx insn, HOST_WIDE_INT address, rtx *loc,
7502 enum machine_mode mode, rtx value)
7504 Mfix * fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* fix));
7506 #ifdef AOF_ASSEMBLER
7507 /* PIC symbol references need to be converted into offsets into the
7508 based area. */
7509 /* XXX This shouldn't be done here. */
7510 if (flag_pic && GET_CODE (value) == SYMBOL_REF)
7511 value = aof_pic_entry (value);
7512 #endif /* AOF_ASSEMBLER */
7514 fix->insn = insn;
7515 fix->address = address;
7516 fix->loc = loc;
7517 fix->mode = mode;
7518 fix->fix_size = MINIPOOL_FIX_SIZE (mode);
7519 fix->value = value;
7520 fix->forwards = get_attr_pool_range (insn);
7521 fix->backwards = get_attr_neg_pool_range (insn);
7522 fix->minipool = NULL;
7524 /* If an insn doesn't have a range defined for it, then it isn't
7525 expecting to be reworked by this code. Better to abort now than
7526 to generate duff assembly code. */
7527 if (fix->forwards == 0 && fix->backwards == 0)
7528 abort ();
7530 /* With AAPCS/iWMMXt enabled, the pool is aligned to an 8-byte boundary.
7531 So there might be an empty word before the start of the pool.
7532 Hence we reduce the forward range by 4 to allow for this
7533 possibility. */
7534 if (ARM_DOUBLEWORD_ALIGN && fix->fix_size == 8)
7535 fix->forwards -= 4;
7537 if (dump_file)
7539 fprintf (dump_file,
7540 ";; %smode fixup for i%d; addr %lu, range (%ld,%ld): ",
7541 GET_MODE_NAME (mode),
7542 INSN_UID (insn), (unsigned long) address,
7543 -1 * (long)fix->backwards, (long)fix->forwards);
7544 arm_print_value (dump_file, fix->value);
7545 fprintf (dump_file, "\n");
7548 /* Add it to the chain of fixes. */
7549 fix->next = NULL;
7551 if (minipool_fix_head != NULL)
7552 minipool_fix_tail->next = fix;
7553 else
7554 minipool_fix_head = fix;
7556 minipool_fix_tail = fix;
7559 /* Scan INSN and note any of its operands that need fixing.
7560 If DO_PUSHES is false we do not actually push any of the fixups
7561 needed. The function returns TRUE is any fixups were needed/pushed.
7562 This is used by arm_memory_load_p() which needs to know about loads
7563 of constants that will be converted into minipool loads. */
7564 static bool
7565 note_invalid_constants (rtx insn, HOST_WIDE_INT address, int do_pushes)
7567 bool result = false;
7568 int opno;
7570 extract_insn (insn);
7572 if (!constrain_operands (1))
7573 fatal_insn_not_found (insn);
7575 if (recog_data.n_alternatives == 0)
7576 return false;
7578 /* Fill in recog_op_alt with information about the constraints of this insn. */
7579 preprocess_constraints ();
7581 for (opno = 0; opno < recog_data.n_operands; opno++)
7583 /* Things we need to fix can only occur in inputs. */
7584 if (recog_data.operand_type[opno] != OP_IN)
7585 continue;
7587 /* If this alternative is a memory reference, then any mention
7588 of constants in this alternative is really to fool reload
7589 into allowing us to accept one there. We need to fix them up
7590 now so that we output the right code. */
7591 if (recog_op_alt[opno][which_alternative].memory_ok)
7593 rtx op = recog_data.operand[opno];
7595 if (CONSTANT_P (op))
7597 if (do_pushes)
7598 push_minipool_fix (insn, address, recog_data.operand_loc[opno],
7599 recog_data.operand_mode[opno], op);
7600 result = true;
7602 else if (GET_CODE (op) == MEM
7603 && GET_CODE (XEXP (op, 0)) == SYMBOL_REF
7604 && CONSTANT_POOL_ADDRESS_P (XEXP (op, 0)))
7606 if (do_pushes)
7608 rtx cop = avoid_constant_pool_reference (op);
7610 /* Casting the address of something to a mode narrower
7611 than a word can cause avoid_constant_pool_reference()
7612 to return the pool reference itself. That's no good to
7613 us here. Lets just hope that we can use the
7614 constant pool value directly. */
7615 if (op == cop)
7616 cop = get_pool_constant (XEXP (op, 0));
7618 push_minipool_fix (insn, address,
7619 recog_data.operand_loc[opno],
7620 recog_data.operand_mode[opno], cop);
7623 result = true;
7628 return result;
7631 /* Gcc puts the pool in the wrong place for ARM, since we can only
7632 load addresses a limited distance around the pc. We do some
7633 special munging to move the constant pool values to the correct
7634 point in the code. */
7635 static void
7636 arm_reorg (void)
7638 rtx insn;
7639 HOST_WIDE_INT address = 0;
7640 Mfix * fix;
7642 minipool_fix_head = minipool_fix_tail = NULL;
7644 /* The first insn must always be a note, or the code below won't
7645 scan it properly. */
7646 insn = get_insns ();
7647 if (GET_CODE (insn) != NOTE)
7648 abort ();
7650 /* Scan all the insns and record the operands that will need fixing. */
7651 for (insn = next_nonnote_insn (insn); insn; insn = next_nonnote_insn (insn))
7653 if (TARGET_CIRRUS_FIX_INVALID_INSNS
7654 && (arm_cirrus_insn_p (insn)
7655 || GET_CODE (insn) == JUMP_INSN
7656 || arm_memory_load_p (insn)))
7657 cirrus_reorg (insn);
7659 if (GET_CODE (insn) == BARRIER)
7660 push_minipool_barrier (insn, address);
7661 else if (INSN_P (insn))
7663 rtx table;
7665 note_invalid_constants (insn, address, true);
7666 address += get_attr_length (insn);
7668 /* If the insn is a vector jump, add the size of the table
7669 and skip the table. */
7670 if ((table = is_jump_table (insn)) != NULL)
7672 address += get_jump_table_size (table);
7673 insn = table;
7678 fix = minipool_fix_head;
7680 /* Now scan the fixups and perform the required changes. */
7681 while (fix)
7683 Mfix * ftmp;
7684 Mfix * fdel;
7685 Mfix * last_added_fix;
7686 Mfix * last_barrier = NULL;
7687 Mfix * this_fix;
7689 /* Skip any further barriers before the next fix. */
7690 while (fix && GET_CODE (fix->insn) == BARRIER)
7691 fix = fix->next;
7693 /* No more fixes. */
7694 if (fix == NULL)
7695 break;
7697 last_added_fix = NULL;
7699 for (ftmp = fix; ftmp; ftmp = ftmp->next)
7701 if (GET_CODE (ftmp->insn) == BARRIER)
7703 if (ftmp->address >= minipool_vector_head->max_address)
7704 break;
7706 last_barrier = ftmp;
7708 else if ((ftmp->minipool = add_minipool_forward_ref (ftmp)) == NULL)
7709 break;
7711 last_added_fix = ftmp; /* Keep track of the last fix added. */
7714 /* If we found a barrier, drop back to that; any fixes that we
7715 could have reached but come after the barrier will now go in
7716 the next mini-pool. */
7717 if (last_barrier != NULL)
7719 /* Reduce the refcount for those fixes that won't go into this
7720 pool after all. */
7721 for (fdel = last_barrier->next;
7722 fdel && fdel != ftmp;
7723 fdel = fdel->next)
7725 fdel->minipool->refcount--;
7726 fdel->minipool = NULL;
7729 ftmp = last_barrier;
7731 else
7733 /* ftmp is first fix that we can't fit into this pool and
7734 there no natural barriers that we could use. Insert a
7735 new barrier in the code somewhere between the previous
7736 fix and this one, and arrange to jump around it. */
7737 HOST_WIDE_INT max_address;
7739 /* The last item on the list of fixes must be a barrier, so
7740 we can never run off the end of the list of fixes without
7741 last_barrier being set. */
7742 if (ftmp == NULL)
7743 abort ();
7745 max_address = minipool_vector_head->max_address;
7746 /* Check that there isn't another fix that is in range that
7747 we couldn't fit into this pool because the pool was
7748 already too large: we need to put the pool before such an
7749 instruction. */
7750 if (ftmp->address < max_address)
7751 max_address = ftmp->address;
7753 last_barrier = create_fix_barrier (last_added_fix, max_address);
7756 assign_minipool_offsets (last_barrier);
7758 while (ftmp)
7760 if (GET_CODE (ftmp->insn) != BARRIER
7761 && ((ftmp->minipool = add_minipool_backward_ref (ftmp))
7762 == NULL))
7763 break;
7765 ftmp = ftmp->next;
7768 /* Scan over the fixes we have identified for this pool, fixing them
7769 up and adding the constants to the pool itself. */
7770 for (this_fix = fix; this_fix && ftmp != this_fix;
7771 this_fix = this_fix->next)
7772 if (GET_CODE (this_fix->insn) != BARRIER)
7774 rtx addr
7775 = plus_constant (gen_rtx_LABEL_REF (VOIDmode,
7776 minipool_vector_label),
7777 this_fix->minipool->offset);
7778 *this_fix->loc = gen_rtx_MEM (this_fix->mode, addr);
7781 dump_minipool (last_barrier->insn);
7782 fix = ftmp;
7785 /* From now on we must synthesize any constants that we can't handle
7786 directly. This can happen if the RTL gets split during final
7787 instruction generation. */
7788 after_arm_reorg = 1;
7790 /* Free the minipool memory. */
7791 obstack_free (&minipool_obstack, minipool_startobj);
7794 /* Routines to output assembly language. */
7796 /* If the rtx is the correct value then return the string of the number.
7797 In this way we can ensure that valid double constants are generated even
7798 when cross compiling. */
7799 const char *
7800 fp_immediate_constant (rtx x)
7802 REAL_VALUE_TYPE r;
7803 int i;
7805 if (!fp_consts_inited)
7806 init_fp_table ();
7808 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
7809 for (i = 0; i < 8; i++)
7810 if (REAL_VALUES_EQUAL (r, values_fp[i]))
7811 return strings_fp[i];
7813 abort ();
7816 /* As for fp_immediate_constant, but value is passed directly, not in rtx. */
7817 static const char *
7818 fp_const_from_val (REAL_VALUE_TYPE *r)
7820 int i;
7822 if (!fp_consts_inited)
7823 init_fp_table ();
7825 for (i = 0; i < 8; i++)
7826 if (REAL_VALUES_EQUAL (*r, values_fp[i]))
7827 return strings_fp[i];
7829 abort ();
7832 /* Output the operands of a LDM/STM instruction to STREAM.
7833 MASK is the ARM register set mask of which only bits 0-15 are important.
7834 REG is the base register, either the frame pointer or the stack pointer,
7835 INSTR is the possibly suffixed load or store instruction. */
7836 static void
7837 print_multi_reg (FILE *stream, const char *instr, int reg, int mask)
7839 int i;
7840 int not_first = FALSE;
7842 fputc ('\t', stream);
7843 asm_fprintf (stream, instr, reg);
7844 fputs (", {", stream);
7846 for (i = 0; i <= LAST_ARM_REGNUM; i++)
7847 if (mask & (1 << i))
7849 if (not_first)
7850 fprintf (stream, ", ");
7852 asm_fprintf (stream, "%r", i);
7853 not_first = TRUE;
7856 fprintf (stream, "}");
7858 /* Add a ^ character for the 26-bit ABI, but only if we were loading
7859 the PC. Otherwise we would generate an UNPREDICTABLE instruction.
7860 Strictly speaking the instruction would be unpredicatble only if
7861 we were writing back the base register as well, but since we never
7862 want to generate an LDM type 2 instruction (register bank switching)
7863 which is what you get if the PC is not being loaded, we do not need
7864 to check for writeback. */
7865 if (! TARGET_APCS_32
7866 && ((mask & (1 << PC_REGNUM)) != 0))
7867 fprintf (stream, "^");
7869 fprintf (stream, "\n");
7873 /* Output a FLDMX instruction to STREAM.
7874 BASE if the register containing the address.
7875 REG and COUNT specify the register range.
7876 Extra registers may be added to avoid hardware bugs. */
7878 static void
7879 arm_output_fldmx (FILE * stream, unsigned int base, int reg, int count)
7881 int i;
7883 /* Workaround ARM10 VFPr1 bug. */
7884 if (count == 2 && !arm_arch6)
7886 if (reg == 15)
7887 reg--;
7888 count++;
7891 fputc ('\t', stream);
7892 asm_fprintf (stream, "fldmfdx\t%r!, {", base);
7894 for (i = reg; i < reg + count; i++)
7896 if (i > reg)
7897 fputs (", ", stream);
7898 asm_fprintf (stream, "d%d", i);
7900 fputs ("}\n", stream);
7905 /* Output the assembly for a store multiple. */
7907 const char *
7908 vfp_output_fstmx (rtx * operands)
7910 char pattern[100];
7911 int p;
7912 int base;
7913 int i;
7915 strcpy (pattern, "fstmfdx\t%m0!, {%P1");
7916 p = strlen (pattern);
7918 if (GET_CODE (operands[1]) != REG)
7919 abort ();
7921 base = (REGNO (operands[1]) - FIRST_VFP_REGNUM) / 2;
7922 for (i = 1; i < XVECLEN (operands[2], 0); i++)
7924 p += sprintf (&pattern[p], ", d%d", base + i);
7926 strcpy (&pattern[p], "}");
7928 output_asm_insn (pattern, operands);
7929 return "";
7933 /* Emit RTL to save block of VFP register pairs to the stack. Returns the
7934 number of bytes pushed. */
7936 static int
7937 vfp_emit_fstmx (int base_reg, int count)
7939 rtx par;
7940 rtx dwarf;
7941 rtx tmp, reg;
7942 int i;
7944 /* Workaround ARM10 VFPr1 bug. Data corruption can occur when exactly two
7945 register pairs are stored by a store multiple insn. We avoid this
7946 by pushing an extra pair. */
7947 if (count == 2 && !arm_arch6)
7949 if (base_reg == LAST_VFP_REGNUM - 3)
7950 base_reg -= 2;
7951 count++;
7954 /* ??? The frame layout is implementation defined. We describe
7955 standard format 1 (equivalent to a FSTMD insn and unused pad word).
7956 We really need some way of representing the whole block so that the
7957 unwinder can figure it out at runtime. */
7958 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
7959 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (count + 1));
7961 reg = gen_rtx_REG (DFmode, base_reg);
7962 base_reg += 2;
7964 XVECEXP (par, 0, 0)
7965 = gen_rtx_SET (VOIDmode,
7966 gen_rtx_MEM (BLKmode,
7967 gen_rtx_PRE_DEC (BLKmode, stack_pointer_rtx)),
7968 gen_rtx_UNSPEC (BLKmode,
7969 gen_rtvec (1, reg),
7970 UNSPEC_PUSH_MULT));
7972 tmp = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7973 gen_rtx_PLUS (SImode, stack_pointer_rtx,
7974 GEN_INT (-(count * 8 + 4))));
7975 RTX_FRAME_RELATED_P (tmp) = 1;
7976 XVECEXP (dwarf, 0, 0) = tmp;
7978 tmp = gen_rtx_SET (VOIDmode,
7979 gen_rtx_MEM (DFmode, stack_pointer_rtx),
7980 reg);
7981 RTX_FRAME_RELATED_P (tmp) = 1;
7982 XVECEXP (dwarf, 0, 1) = tmp;
7984 for (i = 1; i < count; i++)
7986 reg = gen_rtx_REG (DFmode, base_reg);
7987 base_reg += 2;
7988 XVECEXP (par, 0, i) = gen_rtx_USE (VOIDmode, reg);
7990 tmp = gen_rtx_SET (VOIDmode,
7991 gen_rtx_MEM (DFmode,
7992 gen_rtx_PLUS (SImode,
7993 stack_pointer_rtx,
7994 GEN_INT (i * 8))),
7995 reg);
7996 RTX_FRAME_RELATED_P (tmp) = 1;
7997 XVECEXP (dwarf, 0, i + 1) = tmp;
8000 par = emit_insn (par);
8001 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
8002 REG_NOTES (par));
8003 RTX_FRAME_RELATED_P (par) = 1;
8005 return count * 8 + 4;
8009 /* Output a 'call' insn. */
8010 const char *
8011 output_call (rtx *operands)
8013 /* Handle calls to lr using ip (which may be clobbered in subr anyway). */
8015 if (REGNO (operands[0]) == LR_REGNUM)
8017 operands[0] = gen_rtx_REG (SImode, IP_REGNUM);
8018 output_asm_insn ("mov%?\t%0, %|lr", operands);
8021 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
8023 if (TARGET_INTERWORK)
8024 output_asm_insn ("bx%?\t%0", operands);
8025 else
8026 output_asm_insn ("mov%?\t%|pc, %0", operands);
8028 return "";
8031 /* Output a 'call' insn that is a reference in memory. */
8032 const char *
8033 output_call_mem (rtx *operands)
8035 if (TARGET_INTERWORK)
8037 output_asm_insn ("ldr%?\t%|ip, %0", operands);
8038 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
8039 output_asm_insn ("bx%?\t%|ip", operands);
8041 else if (regno_use_in (LR_REGNUM, operands[0]))
8043 /* LR is used in the memory address. We load the address in the
8044 first instruction. It's safe to use IP as the target of the
8045 load since the call will kill it anyway. */
8046 output_asm_insn ("ldr%?\t%|ip, %0", operands);
8047 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
8048 output_asm_insn ("mov%?\t%|pc, %|ip", operands);
8050 else
8052 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
8053 output_asm_insn ("ldr%?\t%|pc, %0", operands);
8056 return "";
8060 /* Output a move from arm registers to an fpa registers.
8061 OPERANDS[0] is an fpa register.
8062 OPERANDS[1] is the first registers of an arm register pair. */
8063 const char *
8064 output_mov_long_double_fpa_from_arm (rtx *operands)
8066 int arm_reg0 = REGNO (operands[1]);
8067 rtx ops[3];
8069 if (arm_reg0 == IP_REGNUM)
8070 abort ();
8072 ops[0] = gen_rtx_REG (SImode, arm_reg0);
8073 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
8074 ops[2] = gen_rtx_REG (SImode, 2 + arm_reg0);
8076 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1, %2}", ops);
8077 output_asm_insn ("ldf%?e\t%0, [%|sp], #12", operands);
8079 return "";
8082 /* Output a move from an fpa register to arm registers.
8083 OPERANDS[0] is the first registers of an arm register pair.
8084 OPERANDS[1] is an fpa register. */
8085 const char *
8086 output_mov_long_double_arm_from_fpa (rtx *operands)
8088 int arm_reg0 = REGNO (operands[0]);
8089 rtx ops[3];
8091 if (arm_reg0 == IP_REGNUM)
8092 abort ();
8094 ops[0] = gen_rtx_REG (SImode, arm_reg0);
8095 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
8096 ops[2] = gen_rtx_REG (SImode, 2 + arm_reg0);
8098 output_asm_insn ("stf%?e\t%1, [%|sp, #-12]!", operands);
8099 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1, %2}", ops);
8100 return "";
8103 /* Output a move from arm registers to arm registers of a long double
8104 OPERANDS[0] is the destination.
8105 OPERANDS[1] is the source. */
8106 const char *
8107 output_mov_long_double_arm_from_arm (rtx *operands)
8109 /* We have to be careful here because the two might overlap. */
8110 int dest_start = REGNO (operands[0]);
8111 int src_start = REGNO (operands[1]);
8112 rtx ops[2];
8113 int i;
8115 if (dest_start < src_start)
8117 for (i = 0; i < 3; i++)
8119 ops[0] = gen_rtx_REG (SImode, dest_start + i);
8120 ops[1] = gen_rtx_REG (SImode, src_start + i);
8121 output_asm_insn ("mov%?\t%0, %1", ops);
8124 else
8126 for (i = 2; i >= 0; i--)
8128 ops[0] = gen_rtx_REG (SImode, dest_start + i);
8129 ops[1] = gen_rtx_REG (SImode, src_start + i);
8130 output_asm_insn ("mov%?\t%0, %1", ops);
8134 return "";
8138 /* Output a move from arm registers to an fpa registers.
8139 OPERANDS[0] is an fpa register.
8140 OPERANDS[1] is the first registers of an arm register pair. */
8141 const char *
8142 output_mov_double_fpa_from_arm (rtx *operands)
8144 int arm_reg0 = REGNO (operands[1]);
8145 rtx ops[2];
8147 if (arm_reg0 == IP_REGNUM)
8148 abort ();
8150 ops[0] = gen_rtx_REG (SImode, arm_reg0);
8151 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
8152 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1}", ops);
8153 output_asm_insn ("ldf%?d\t%0, [%|sp], #8", operands);
8154 return "";
8157 /* Output a move from an fpa register to arm registers.
8158 OPERANDS[0] is the first registers of an arm register pair.
8159 OPERANDS[1] is an fpa register. */
8160 const char *
8161 output_mov_double_arm_from_fpa (rtx *operands)
8163 int arm_reg0 = REGNO (operands[0]);
8164 rtx ops[2];
8166 if (arm_reg0 == IP_REGNUM)
8167 abort ();
8169 ops[0] = gen_rtx_REG (SImode, arm_reg0);
8170 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
8171 output_asm_insn ("stf%?d\t%1, [%|sp, #-8]!", operands);
8172 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1}", ops);
8173 return "";
8176 /* Output a move between double words.
8177 It must be REG<-REG, REG<-CONST_DOUBLE, REG<-CONST_INT, REG<-MEM
8178 or MEM<-REG and all MEMs must be offsettable addresses. */
8179 const char *
8180 output_move_double (rtx *operands)
8182 enum rtx_code code0 = GET_CODE (operands[0]);
8183 enum rtx_code code1 = GET_CODE (operands[1]);
8184 rtx otherops[3];
8186 if (code0 == REG)
8188 int reg0 = REGNO (operands[0]);
8190 otherops[0] = gen_rtx_REG (SImode, 1 + reg0);
8192 if (code1 == REG)
8194 int reg1 = REGNO (operands[1]);
8195 if (reg1 == IP_REGNUM)
8196 abort ();
8198 /* Ensure the second source is not overwritten. */
8199 if (reg1 == reg0 + (WORDS_BIG_ENDIAN ? -1 : 1))
8200 output_asm_insn ("mov%?\t%Q0, %Q1\n\tmov%?\t%R0, %R1", operands);
8201 else
8202 output_asm_insn ("mov%?\t%R0, %R1\n\tmov%?\t%Q0, %Q1", operands);
8204 else if (code1 == CONST_VECTOR)
8206 HOST_WIDE_INT hint = 0;
8208 switch (GET_MODE (operands[1]))
8210 case V2SImode:
8211 otherops[1] = GEN_INT (INTVAL (CONST_VECTOR_ELT (operands[1], 1)));
8212 operands[1] = GEN_INT (INTVAL (CONST_VECTOR_ELT (operands[1], 0)));
8213 break;
8215 case V4HImode:
8216 if (BYTES_BIG_ENDIAN)
8218 hint = INTVAL (CONST_VECTOR_ELT (operands[1], 2));
8219 hint <<= 16;
8220 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 3));
8222 else
8224 hint = INTVAL (CONST_VECTOR_ELT (operands[1], 3));
8225 hint <<= 16;
8226 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 2));
8229 otherops[1] = GEN_INT (hint);
8230 hint = 0;
8232 if (BYTES_BIG_ENDIAN)
8234 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 0));
8235 hint <<= 16;
8236 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 1));
8238 else
8240 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 1));
8241 hint <<= 16;
8242 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 0));
8245 operands[1] = GEN_INT (hint);
8246 break;
8248 case V8QImode:
8249 if (BYTES_BIG_ENDIAN)
8251 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 4));
8252 hint <<= 8;
8253 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 5));
8254 hint <<= 8;
8255 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 6));
8256 hint <<= 8;
8257 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 7));
8259 else
8261 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 7));
8262 hint <<= 8;
8263 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 6));
8264 hint <<= 8;
8265 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 5));
8266 hint <<= 8;
8267 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 4));
8270 otherops[1] = GEN_INT (hint);
8271 hint = 0;
8273 if (BYTES_BIG_ENDIAN)
8275 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 0));
8276 hint <<= 8;
8277 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 1));
8278 hint <<= 8;
8279 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 2));
8280 hint <<= 8;
8281 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 3));
8283 else
8285 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 3));
8286 hint <<= 8;
8287 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 2));
8288 hint <<= 8;
8289 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 1));
8290 hint <<= 8;
8291 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 0));
8294 operands[1] = GEN_INT (hint);
8295 break;
8297 default:
8298 abort ();
8300 output_mov_immediate (operands);
8301 output_mov_immediate (otherops);
8303 else if (code1 == CONST_DOUBLE)
8305 if (GET_MODE (operands[1]) == DFmode)
8307 REAL_VALUE_TYPE r;
8308 long l[2];
8310 REAL_VALUE_FROM_CONST_DOUBLE (r, operands[1]);
8311 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
8312 otherops[1] = GEN_INT (l[1]);
8313 operands[1] = GEN_INT (l[0]);
8315 else if (GET_MODE (operands[1]) != VOIDmode)
8316 abort ();
8317 else if (WORDS_BIG_ENDIAN)
8319 otherops[1] = GEN_INT (CONST_DOUBLE_LOW (operands[1]));
8320 operands[1] = GEN_INT (CONST_DOUBLE_HIGH (operands[1]));
8322 else
8324 otherops[1] = GEN_INT (CONST_DOUBLE_HIGH (operands[1]));
8325 operands[1] = GEN_INT (CONST_DOUBLE_LOW (operands[1]));
8328 output_mov_immediate (operands);
8329 output_mov_immediate (otherops);
8331 else if (code1 == CONST_INT)
8333 #if HOST_BITS_PER_WIDE_INT > 32
8334 /* If HOST_WIDE_INT is more than 32 bits, the intval tells us
8335 what the upper word is. */
8336 if (WORDS_BIG_ENDIAN)
8338 otherops[1] = GEN_INT (ARM_SIGN_EXTEND (INTVAL (operands[1])));
8339 operands[1] = GEN_INT (INTVAL (operands[1]) >> 32);
8341 else
8343 otherops[1] = GEN_INT (INTVAL (operands[1]) >> 32);
8344 operands[1] = GEN_INT (ARM_SIGN_EXTEND (INTVAL (operands[1])));
8346 #else
8347 /* Sign extend the intval into the high-order word. */
8348 if (WORDS_BIG_ENDIAN)
8350 otherops[1] = operands[1];
8351 operands[1] = (INTVAL (operands[1]) < 0
8352 ? constm1_rtx : const0_rtx);
8354 else
8355 otherops[1] = INTVAL (operands[1]) < 0 ? constm1_rtx : const0_rtx;
8356 #endif
8357 output_mov_immediate (otherops);
8358 output_mov_immediate (operands);
8360 else if (code1 == MEM)
8362 switch (GET_CODE (XEXP (operands[1], 0)))
8364 case REG:
8365 output_asm_insn ("ldm%?ia\t%m1, %M0", operands);
8366 break;
8368 case PRE_INC:
8369 abort (); /* Should never happen now. */
8370 break;
8372 case PRE_DEC:
8373 output_asm_insn ("ldm%?db\t%m1!, %M0", operands);
8374 break;
8376 case POST_INC:
8377 output_asm_insn ("ldm%?ia\t%m1!, %M0", operands);
8378 break;
8380 case POST_DEC:
8381 abort (); /* Should never happen now. */
8382 break;
8384 case LABEL_REF:
8385 case CONST:
8386 output_asm_insn ("adr%?\t%0, %1", operands);
8387 output_asm_insn ("ldm%?ia\t%0, %M0", operands);
8388 break;
8390 default:
8391 if (arm_add_operand (XEXP (XEXP (operands[1], 0), 1),
8392 GET_MODE (XEXP (XEXP (operands[1], 0), 1))))
8394 otherops[0] = operands[0];
8395 otherops[1] = XEXP (XEXP (operands[1], 0), 0);
8396 otherops[2] = XEXP (XEXP (operands[1], 0), 1);
8398 if (GET_CODE (XEXP (operands[1], 0)) == PLUS)
8400 if (GET_CODE (otherops[2]) == CONST_INT)
8402 switch ((int) INTVAL (otherops[2]))
8404 case -8:
8405 output_asm_insn ("ldm%?db\t%1, %M0", otherops);
8406 return "";
8407 case -4:
8408 output_asm_insn ("ldm%?da\t%1, %M0", otherops);
8409 return "";
8410 case 4:
8411 output_asm_insn ("ldm%?ib\t%1, %M0", otherops);
8412 return "";
8415 if (!(const_ok_for_arm (INTVAL (otherops[2]))))
8416 output_asm_insn ("sub%?\t%0, %1, #%n2", otherops);
8417 else
8418 output_asm_insn ("add%?\t%0, %1, %2", otherops);
8420 else
8421 output_asm_insn ("add%?\t%0, %1, %2", otherops);
8423 else
8424 output_asm_insn ("sub%?\t%0, %1, %2", otherops);
8426 return "ldm%?ia\t%0, %M0";
8428 else
8430 otherops[1] = adjust_address (operands[1], SImode, 4);
8431 /* Take care of overlapping base/data reg. */
8432 if (reg_mentioned_p (operands[0], operands[1]))
8434 output_asm_insn ("ldr%?\t%0, %1", otherops);
8435 output_asm_insn ("ldr%?\t%0, %1", operands);
8437 else
8439 output_asm_insn ("ldr%?\t%0, %1", operands);
8440 output_asm_insn ("ldr%?\t%0, %1", otherops);
8445 else
8446 abort (); /* Constraints should prevent this. */
8448 else if (code0 == MEM && code1 == REG)
8450 if (REGNO (operands[1]) == IP_REGNUM)
8451 abort ();
8453 switch (GET_CODE (XEXP (operands[0], 0)))
8455 case REG:
8456 output_asm_insn ("stm%?ia\t%m0, %M1", operands);
8457 break;
8459 case PRE_INC:
8460 abort (); /* Should never happen now. */
8461 break;
8463 case PRE_DEC:
8464 output_asm_insn ("stm%?db\t%m0!, %M1", operands);
8465 break;
8467 case POST_INC:
8468 output_asm_insn ("stm%?ia\t%m0!, %M1", operands);
8469 break;
8471 case POST_DEC:
8472 abort (); /* Should never happen now. */
8473 break;
8475 case PLUS:
8476 if (GET_CODE (XEXP (XEXP (operands[0], 0), 1)) == CONST_INT)
8478 switch ((int) INTVAL (XEXP (XEXP (operands[0], 0), 1)))
8480 case -8:
8481 output_asm_insn ("stm%?db\t%m0, %M1", operands);
8482 return "";
8484 case -4:
8485 output_asm_insn ("stm%?da\t%m0, %M1", operands);
8486 return "";
8488 case 4:
8489 output_asm_insn ("stm%?ib\t%m0, %M1", operands);
8490 return "";
8493 /* Fall through */
8495 default:
8496 otherops[0] = adjust_address (operands[0], SImode, 4);
8497 otherops[1] = gen_rtx_REG (SImode, 1 + REGNO (operands[1]));
8498 output_asm_insn ("str%?\t%1, %0", operands);
8499 output_asm_insn ("str%?\t%1, %0", otherops);
8502 else
8503 /* Constraints should prevent this. */
8504 abort ();
8506 return "";
8510 /* Output an arbitrary MOV reg, #n.
8511 OPERANDS[0] is a register. OPERANDS[1] is a const_int. */
8512 const char *
8513 output_mov_immediate (rtx *operands)
8515 HOST_WIDE_INT n = INTVAL (operands[1]);
8517 /* Try to use one MOV. */
8518 if (const_ok_for_arm (n))
8519 output_asm_insn ("mov%?\t%0, %1", operands);
8521 /* Try to use one MVN. */
8522 else if (const_ok_for_arm (~n))
8524 operands[1] = GEN_INT (~n);
8525 output_asm_insn ("mvn%?\t%0, %1", operands);
8527 else
8529 int n_ones = 0;
8530 int i;
8532 /* If all else fails, make it out of ORRs or BICs as appropriate. */
8533 for (i = 0; i < 32; i++)
8534 if (n & 1 << i)
8535 n_ones++;
8537 if (n_ones > 16) /* Shorter to use MVN with BIC in this case. */
8538 output_multi_immediate (operands, "mvn%?\t%0, %1", "bic%?\t%0, %0, %1", 1, ~ n);
8539 else
8540 output_multi_immediate (operands, "mov%?\t%0, %1", "orr%?\t%0, %0, %1", 1, n);
8543 return "";
8546 /* Output an ADD r, s, #n where n may be too big for one instruction.
8547 If adding zero to one register, output nothing. */
8548 const char *
8549 output_add_immediate (rtx *operands)
8551 HOST_WIDE_INT n = INTVAL (operands[2]);
8553 if (n != 0 || REGNO (operands[0]) != REGNO (operands[1]))
8555 if (n < 0)
8556 output_multi_immediate (operands,
8557 "sub%?\t%0, %1, %2", "sub%?\t%0, %0, %2", 2,
8558 -n);
8559 else
8560 output_multi_immediate (operands,
8561 "add%?\t%0, %1, %2", "add%?\t%0, %0, %2", 2,
8565 return "";
8568 /* Output a multiple immediate operation.
8569 OPERANDS is the vector of operands referred to in the output patterns.
8570 INSTR1 is the output pattern to use for the first constant.
8571 INSTR2 is the output pattern to use for subsequent constants.
8572 IMMED_OP is the index of the constant slot in OPERANDS.
8573 N is the constant value. */
8574 static const char *
8575 output_multi_immediate (rtx *operands, const char *instr1, const char *instr2,
8576 int immed_op, HOST_WIDE_INT n)
8578 #if HOST_BITS_PER_WIDE_INT > 32
8579 n &= 0xffffffff;
8580 #endif
8582 if (n == 0)
8584 /* Quick and easy output. */
8585 operands[immed_op] = const0_rtx;
8586 output_asm_insn (instr1, operands);
8588 else
8590 int i;
8591 const char * instr = instr1;
8593 /* Note that n is never zero here (which would give no output). */
8594 for (i = 0; i < 32; i += 2)
8596 if (n & (3 << i))
8598 operands[immed_op] = GEN_INT (n & (255 << i));
8599 output_asm_insn (instr, operands);
8600 instr = instr2;
8601 i += 6;
8606 return "";
8609 /* Return the appropriate ARM instruction for the operation code.
8610 The returned result should not be overwritten. OP is the rtx of the
8611 operation. SHIFT_FIRST_ARG is TRUE if the first argument of the operator
8612 was shifted. */
8613 const char *
8614 arithmetic_instr (rtx op, int shift_first_arg)
8616 switch (GET_CODE (op))
8618 case PLUS:
8619 return "add";
8621 case MINUS:
8622 return shift_first_arg ? "rsb" : "sub";
8624 case IOR:
8625 return "orr";
8627 case XOR:
8628 return "eor";
8630 case AND:
8631 return "and";
8633 default:
8634 abort ();
8638 /* Ensure valid constant shifts and return the appropriate shift mnemonic
8639 for the operation code. The returned result should not be overwritten.
8640 OP is the rtx code of the shift.
8641 On exit, *AMOUNTP will be -1 if the shift is by a register, or a constant
8642 shift. */
8643 static const char *
8644 shift_op (rtx op, HOST_WIDE_INT *amountp)
8646 const char * mnem;
8647 enum rtx_code code = GET_CODE (op);
8649 if (GET_CODE (XEXP (op, 1)) == REG || GET_CODE (XEXP (op, 1)) == SUBREG)
8650 *amountp = -1;
8651 else if (GET_CODE (XEXP (op, 1)) == CONST_INT)
8652 *amountp = INTVAL (XEXP (op, 1));
8653 else
8654 abort ();
8656 switch (code)
8658 case ASHIFT:
8659 mnem = "asl";
8660 break;
8662 case ASHIFTRT:
8663 mnem = "asr";
8664 break;
8666 case LSHIFTRT:
8667 mnem = "lsr";
8668 break;
8670 case ROTATERT:
8671 mnem = "ror";
8672 break;
8674 case MULT:
8675 /* We never have to worry about the amount being other than a
8676 power of 2, since this case can never be reloaded from a reg. */
8677 if (*amountp != -1)
8678 *amountp = int_log2 (*amountp);
8679 else
8680 abort ();
8681 return "asl";
8683 default:
8684 abort ();
8687 if (*amountp != -1)
8689 /* This is not 100% correct, but follows from the desire to merge
8690 multiplication by a power of 2 with the recognizer for a
8691 shift. >=32 is not a valid shift for "asl", so we must try and
8692 output a shift that produces the correct arithmetical result.
8693 Using lsr #32 is identical except for the fact that the carry bit
8694 is not set correctly if we set the flags; but we never use the
8695 carry bit from such an operation, so we can ignore that. */
8696 if (code == ROTATERT)
8697 /* Rotate is just modulo 32. */
8698 *amountp &= 31;
8699 else if (*amountp != (*amountp & 31))
8701 if (code == ASHIFT)
8702 mnem = "lsr";
8703 *amountp = 32;
8706 /* Shifts of 0 are no-ops. */
8707 if (*amountp == 0)
8708 return NULL;
8711 return mnem;
8714 /* Obtain the shift from the POWER of two. */
8716 static HOST_WIDE_INT
8717 int_log2 (HOST_WIDE_INT power)
8719 HOST_WIDE_INT shift = 0;
8721 while ((((HOST_WIDE_INT) 1 << shift) & power) == 0)
8723 if (shift > 31)
8724 abort ();
8725 shift++;
8728 return shift;
8731 /* Output a .ascii pseudo-op, keeping track of lengths. This is because
8732 /bin/as is horribly restrictive. */
8733 #define MAX_ASCII_LEN 51
8735 void
8736 output_ascii_pseudo_op (FILE *stream, const unsigned char *p, int len)
8738 int i;
8739 int len_so_far = 0;
8741 fputs ("\t.ascii\t\"", stream);
8743 for (i = 0; i < len; i++)
8745 int c = p[i];
8747 if (len_so_far >= MAX_ASCII_LEN)
8749 fputs ("\"\n\t.ascii\t\"", stream);
8750 len_so_far = 0;
8753 switch (c)
8755 case TARGET_TAB:
8756 fputs ("\\t", stream);
8757 len_so_far += 2;
8758 break;
8760 case TARGET_FF:
8761 fputs ("\\f", stream);
8762 len_so_far += 2;
8763 break;
8765 case TARGET_BS:
8766 fputs ("\\b", stream);
8767 len_so_far += 2;
8768 break;
8770 case TARGET_CR:
8771 fputs ("\\r", stream);
8772 len_so_far += 2;
8773 break;
8775 case TARGET_NEWLINE:
8776 fputs ("\\n", stream);
8777 c = p [i + 1];
8778 if ((c >= ' ' && c <= '~')
8779 || c == TARGET_TAB)
8780 /* This is a good place for a line break. */
8781 len_so_far = MAX_ASCII_LEN;
8782 else
8783 len_so_far += 2;
8784 break;
8786 case '\"':
8787 case '\\':
8788 putc ('\\', stream);
8789 len_so_far++;
8790 /* Drop through. */
8792 default:
8793 if (c >= ' ' && c <= '~')
8795 putc (c, stream);
8796 len_so_far++;
8798 else
8800 fprintf (stream, "\\%03o", c);
8801 len_so_far += 4;
8803 break;
8807 fputs ("\"\n", stream);
8810 /* Compute the register sabe mask for registers 0 through 12
8811 inclusive. This code is used by arm_compute_save_reg_mask. */
8812 static unsigned long
8813 arm_compute_save_reg0_reg12_mask (void)
8815 unsigned long func_type = arm_current_func_type ();
8816 unsigned int save_reg_mask = 0;
8817 unsigned int reg;
8819 if (IS_INTERRUPT (func_type))
8821 unsigned int max_reg;
8822 /* Interrupt functions must not corrupt any registers,
8823 even call clobbered ones. If this is a leaf function
8824 we can just examine the registers used by the RTL, but
8825 otherwise we have to assume that whatever function is
8826 called might clobber anything, and so we have to save
8827 all the call-clobbered registers as well. */
8828 if (ARM_FUNC_TYPE (func_type) == ARM_FT_FIQ)
8829 /* FIQ handlers have registers r8 - r12 banked, so
8830 we only need to check r0 - r7, Normal ISRs only
8831 bank r14 and r15, so we must check up to r12.
8832 r13 is the stack pointer which is always preserved,
8833 so we do not need to consider it here. */
8834 max_reg = 7;
8835 else
8836 max_reg = 12;
8838 for (reg = 0; reg <= max_reg; reg++)
8839 if (regs_ever_live[reg]
8840 || (! current_function_is_leaf && call_used_regs [reg]))
8841 save_reg_mask |= (1 << reg);
8843 else
8845 /* In the normal case we only need to save those registers
8846 which are call saved and which are used by this function. */
8847 for (reg = 0; reg <= 10; reg++)
8848 if (regs_ever_live[reg] && ! call_used_regs [reg])
8849 save_reg_mask |= (1 << reg);
8851 /* Handle the frame pointer as a special case. */
8852 if (! TARGET_APCS_FRAME
8853 && ! frame_pointer_needed
8854 && regs_ever_live[HARD_FRAME_POINTER_REGNUM]
8855 && ! call_used_regs[HARD_FRAME_POINTER_REGNUM])
8856 save_reg_mask |= 1 << HARD_FRAME_POINTER_REGNUM;
8858 /* If we aren't loading the PIC register,
8859 don't stack it even though it may be live. */
8860 if (flag_pic
8861 && ! TARGET_SINGLE_PIC_BASE
8862 && regs_ever_live[PIC_OFFSET_TABLE_REGNUM])
8863 save_reg_mask |= 1 << PIC_OFFSET_TABLE_REGNUM;
8866 return save_reg_mask;
8869 /* Compute a bit mask of which registers need to be
8870 saved on the stack for the current function. */
8872 static unsigned long
8873 arm_compute_save_reg_mask (void)
8875 unsigned int save_reg_mask = 0;
8876 unsigned long func_type = arm_current_func_type ();
8878 if (IS_NAKED (func_type))
8879 /* This should never really happen. */
8880 return 0;
8882 /* If we are creating a stack frame, then we must save the frame pointer,
8883 IP (which will hold the old stack pointer), LR and the PC. */
8884 if (frame_pointer_needed)
8885 save_reg_mask |=
8886 (1 << ARM_HARD_FRAME_POINTER_REGNUM)
8887 | (1 << IP_REGNUM)
8888 | (1 << LR_REGNUM)
8889 | (1 << PC_REGNUM);
8891 /* Volatile functions do not return, so there
8892 is no need to save any other registers. */
8893 if (IS_VOLATILE (func_type))
8894 return save_reg_mask;
8896 save_reg_mask |= arm_compute_save_reg0_reg12_mask ();
8898 /* Decide if we need to save the link register.
8899 Interrupt routines have their own banked link register,
8900 so they never need to save it.
8901 Otherwise if we do not use the link register we do not need to save
8902 it. If we are pushing other registers onto the stack however, we
8903 can save an instruction in the epilogue by pushing the link register
8904 now and then popping it back into the PC. This incurs extra memory
8905 accesses though, so we only do it when optimizing for size, and only
8906 if we know that we will not need a fancy return sequence. */
8907 if (regs_ever_live [LR_REGNUM]
8908 || (save_reg_mask
8909 && optimize_size
8910 && ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL))
8911 save_reg_mask |= 1 << LR_REGNUM;
8913 if (cfun->machine->lr_save_eliminated)
8914 save_reg_mask &= ~ (1 << LR_REGNUM);
8916 if (TARGET_REALLY_IWMMXT
8917 && ((bit_count (save_reg_mask)
8918 + ARM_NUM_INTS (current_function_pretend_args_size)) % 2) != 0)
8920 unsigned int reg;
8922 /* The total number of registers that are going to be pushed
8923 onto the stack is odd. We need to ensure that the stack
8924 is 64-bit aligned before we start to save iWMMXt registers,
8925 and also before we start to create locals. (A local variable
8926 might be a double or long long which we will load/store using
8927 an iWMMXt instruction). Therefore we need to push another
8928 ARM register, so that the stack will be 64-bit aligned. We
8929 try to avoid using the arg registers (r0 -r3) as they might be
8930 used to pass values in a tail call. */
8931 for (reg = 4; reg <= 12; reg++)
8932 if ((save_reg_mask & (1 << reg)) == 0)
8933 break;
8935 if (reg <= 12)
8936 save_reg_mask |= (1 << reg);
8937 else
8939 cfun->machine->sibcall_blocked = 1;
8940 save_reg_mask |= (1 << 3);
8944 return save_reg_mask;
8948 /* Return the number of bytes required to save VFP registers. */
8949 static int
8950 arm_get_vfp_saved_size (void)
8952 unsigned int regno;
8953 int count;
8954 int saved;
8956 saved = 0;
8957 /* Space for saved VFP registers. */
8958 if (TARGET_HARD_FLOAT && TARGET_VFP)
8960 count = 0;
8961 for (regno = FIRST_VFP_REGNUM;
8962 regno < LAST_VFP_REGNUM;
8963 regno += 2)
8965 if ((!regs_ever_live[regno] || call_used_regs[regno])
8966 && (!regs_ever_live[regno + 1] || call_used_regs[regno + 1]))
8968 if (count > 0)
8970 /* Workaround ARM10 VFPr1 bug. */
8971 if (count == 2 && !arm_arch6)
8972 count++;
8973 saved += count * 8 + 4;
8975 count = 0;
8977 else
8978 count++;
8980 if (count > 0)
8982 if (count == 2 && !arm_arch6)
8983 count++;
8984 saved += count * 8 + 4;
8987 return saved;
8991 /* Generate a function exit sequence. If REALLY_RETURN is false, then do
8992 everything bar the final return instruction. */
8993 const char *
8994 output_return_instruction (rtx operand, int really_return, int reverse)
8996 char conditional[10];
8997 char instr[100];
8998 int reg;
8999 unsigned long live_regs_mask;
9000 unsigned long func_type;
9001 arm_stack_offsets *offsets;
9003 func_type = arm_current_func_type ();
9005 if (IS_NAKED (func_type))
9006 return "";
9008 if (IS_VOLATILE (func_type) && TARGET_ABORT_NORETURN)
9010 /* If this function was declared non-returning, and we have
9011 found a tail call, then we have to trust that the called
9012 function won't return. */
9013 if (really_return)
9015 rtx ops[2];
9017 /* Otherwise, trap an attempted return by aborting. */
9018 ops[0] = operand;
9019 ops[1] = gen_rtx_SYMBOL_REF (Pmode, NEED_PLT_RELOC ? "abort(PLT)"
9020 : "abort");
9021 assemble_external_libcall (ops[1]);
9022 output_asm_insn (reverse ? "bl%D0\t%a1" : "bl%d0\t%a1", ops);
9025 return "";
9028 if (current_function_calls_alloca && !really_return)
9029 abort ();
9031 sprintf (conditional, "%%?%%%c0", reverse ? 'D' : 'd');
9033 return_used_this_function = 1;
9035 live_regs_mask = arm_compute_save_reg_mask ();
9037 if (live_regs_mask)
9039 const char * return_reg;
9041 /* If we do not have any special requirements for function exit
9042 (eg interworking, or ISR) then we can load the return address
9043 directly into the PC. Otherwise we must load it into LR. */
9044 if (really_return
9045 && ! TARGET_INTERWORK)
9046 return_reg = reg_names[PC_REGNUM];
9047 else
9048 return_reg = reg_names[LR_REGNUM];
9050 if ((live_regs_mask & (1 << IP_REGNUM)) == (1 << IP_REGNUM))
9052 /* There are three possible reasons for the IP register
9053 being saved. 1) a stack frame was created, in which case
9054 IP contains the old stack pointer, or 2) an ISR routine
9055 corrupted it, or 3) it was saved to align the stack on
9056 iWMMXt. In case 1, restore IP into SP, otherwise just
9057 restore IP. */
9058 if (frame_pointer_needed)
9060 live_regs_mask &= ~ (1 << IP_REGNUM);
9061 live_regs_mask |= (1 << SP_REGNUM);
9063 else
9065 if (! IS_INTERRUPT (func_type)
9066 && ! TARGET_REALLY_IWMMXT)
9067 abort ();
9071 /* On some ARM architectures it is faster to use LDR rather than
9072 LDM to load a single register. On other architectures, the
9073 cost is the same. In 26 bit mode, or for exception handlers,
9074 we have to use LDM to load the PC so that the CPSR is also
9075 restored. */
9076 for (reg = 0; reg <= LAST_ARM_REGNUM; reg++)
9078 if (live_regs_mask == (unsigned int)(1 << reg))
9079 break;
9081 if (reg <= LAST_ARM_REGNUM
9082 && (reg != LR_REGNUM
9083 || ! really_return
9084 || (TARGET_APCS_32 && ! IS_INTERRUPT (func_type))))
9086 sprintf (instr, "ldr%s\t%%|%s, [%%|sp], #4", conditional,
9087 (reg == LR_REGNUM) ? return_reg : reg_names[reg]);
9089 else
9091 char *p;
9092 int first = 1;
9094 /* Generate the load multiple instruction to restore the
9095 registers. Note we can get here, even if
9096 frame_pointer_needed is true, but only if sp already
9097 points to the base of the saved core registers. */
9098 if (live_regs_mask & (1 << SP_REGNUM))
9100 unsigned HOST_WIDE_INT stack_adjust;
9102 offsets = arm_get_frame_offsets ();
9103 stack_adjust = offsets->outgoing_args - offsets->saved_regs;
9104 if (stack_adjust != 0 && stack_adjust != 4)
9105 abort ();
9107 if (stack_adjust && arm_arch5)
9108 sprintf (instr, "ldm%sib\t%%|sp, {", conditional);
9109 else
9111 /* If we can't use ldmib (SA110 bug), then try to pop r3
9112 instead. */
9113 if (stack_adjust)
9114 live_regs_mask |= 1 << 3;
9115 sprintf (instr, "ldm%sfd\t%%|sp, {", conditional);
9118 else
9119 sprintf (instr, "ldm%sfd\t%%|sp!, {", conditional);
9121 p = instr + strlen (instr);
9123 for (reg = 0; reg <= SP_REGNUM; reg++)
9124 if (live_regs_mask & (1 << reg))
9126 int l = strlen (reg_names[reg]);
9128 if (first)
9129 first = 0;
9130 else
9132 memcpy (p, ", ", 2);
9133 p += 2;
9136 memcpy (p, "%|", 2);
9137 memcpy (p + 2, reg_names[reg], l);
9138 p += l + 2;
9141 if (live_regs_mask & (1 << LR_REGNUM))
9143 sprintf (p, "%s%%|%s}", first ? "" : ", ", return_reg);
9144 /* Decide if we need to add the ^ symbol to the end of the
9145 register list. This causes the saved condition codes
9146 register to be copied into the current condition codes
9147 register. We do the copy if we are conforming to the 32-bit
9148 ABI and this is an interrupt function, or if we are
9149 conforming to the 26-bit ABI. There is a special case for
9150 the 26-bit ABI however, which is if we are writing back the
9151 stack pointer but not loading the PC. In this case adding
9152 the ^ symbol would create a type 2 LDM instruction, where
9153 writeback is UNPREDICTABLE. We are safe in leaving the ^
9154 character off in this case however, since the actual return
9155 instruction will be a MOVS which will restore the CPSR. */
9156 if ((TARGET_APCS_32 && IS_INTERRUPT (func_type))
9157 || (! TARGET_APCS_32 && really_return))
9158 strcat (p, "^");
9160 else
9161 strcpy (p, "}");
9164 output_asm_insn (instr, & operand);
9166 /* See if we need to generate an extra instruction to
9167 perform the actual function return. */
9168 if (really_return
9169 && func_type != ARM_FT_INTERWORKED
9170 && (live_regs_mask & (1 << LR_REGNUM)) != 0)
9172 /* The return has already been handled
9173 by loading the LR into the PC. */
9174 really_return = 0;
9178 if (really_return)
9180 switch ((int) ARM_FUNC_TYPE (func_type))
9182 case ARM_FT_ISR:
9183 case ARM_FT_FIQ:
9184 sprintf (instr, "sub%ss\t%%|pc, %%|lr, #4", conditional);
9185 break;
9187 case ARM_FT_INTERWORKED:
9188 sprintf (instr, "bx%s\t%%|lr", conditional);
9189 break;
9191 case ARM_FT_EXCEPTION:
9192 sprintf (instr, "mov%ss\t%%|pc, %%|lr", conditional);
9193 break;
9195 default:
9196 /* ARMv5 implementations always provide BX, so interworking
9197 is the default unless APCS-26 is in use. */
9198 if ((insn_flags & FL_ARCH5) != 0 && TARGET_APCS_32)
9199 sprintf (instr, "bx%s\t%%|lr", conditional);
9200 else
9201 sprintf (instr, "mov%s%s\t%%|pc, %%|lr",
9202 conditional, TARGET_APCS_32 ? "" : "s");
9203 break;
9206 output_asm_insn (instr, & operand);
9209 return "";
9212 /* Write the function name into the code section, directly preceding
9213 the function prologue.
9215 Code will be output similar to this:
9217 .ascii "arm_poke_function_name", 0
9218 .align
9220 .word 0xff000000 + (t1 - t0)
9221 arm_poke_function_name
9222 mov ip, sp
9223 stmfd sp!, {fp, ip, lr, pc}
9224 sub fp, ip, #4
9226 When performing a stack backtrace, code can inspect the value
9227 of 'pc' stored at 'fp' + 0. If the trace function then looks
9228 at location pc - 12 and the top 8 bits are set, then we know
9229 that there is a function name embedded immediately preceding this
9230 location and has length ((pc[-3]) & 0xff000000).
9232 We assume that pc is declared as a pointer to an unsigned long.
9234 It is of no benefit to output the function name if we are assembling
9235 a leaf function. These function types will not contain a stack
9236 backtrace structure, therefore it is not possible to determine the
9237 function name. */
9238 void
9239 arm_poke_function_name (FILE *stream, const char *name)
9241 unsigned long alignlength;
9242 unsigned long length;
9243 rtx x;
9245 length = strlen (name) + 1;
9246 alignlength = ROUND_UP_WORD (length);
9248 ASM_OUTPUT_ASCII (stream, name, length);
9249 ASM_OUTPUT_ALIGN (stream, 2);
9250 x = GEN_INT ((unsigned HOST_WIDE_INT) 0xff000000 + alignlength);
9251 assemble_aligned_integer (UNITS_PER_WORD, x);
9254 /* Place some comments into the assembler stream
9255 describing the current function. */
9256 static void
9257 arm_output_function_prologue (FILE *f, HOST_WIDE_INT frame_size)
9259 unsigned long func_type;
9261 if (!TARGET_ARM)
9263 thumb_output_function_prologue (f, frame_size);
9264 return;
9267 /* Sanity check. */
9268 if (arm_ccfsm_state || arm_target_insn)
9269 abort ();
9271 func_type = arm_current_func_type ();
9273 switch ((int) ARM_FUNC_TYPE (func_type))
9275 default:
9276 case ARM_FT_NORMAL:
9277 break;
9278 case ARM_FT_INTERWORKED:
9279 asm_fprintf (f, "\t%@ Function supports interworking.\n");
9280 break;
9281 case ARM_FT_EXCEPTION_HANDLER:
9282 asm_fprintf (f, "\t%@ C++ Exception Handler.\n");
9283 break;
9284 case ARM_FT_ISR:
9285 asm_fprintf (f, "\t%@ Interrupt Service Routine.\n");
9286 break;
9287 case ARM_FT_FIQ:
9288 asm_fprintf (f, "\t%@ Fast Interrupt Service Routine.\n");
9289 break;
9290 case ARM_FT_EXCEPTION:
9291 asm_fprintf (f, "\t%@ ARM Exception Handler.\n");
9292 break;
9295 if (IS_NAKED (func_type))
9296 asm_fprintf (f, "\t%@ Naked Function: prologue and epilogue provided by programmer.\n");
9298 if (IS_VOLATILE (func_type))
9299 asm_fprintf (f, "\t%@ Volatile: function does not return.\n");
9301 if (IS_NESTED (func_type))
9302 asm_fprintf (f, "\t%@ Nested: function declared inside another function.\n");
9304 asm_fprintf (f, "\t%@ args = %d, pretend = %d, frame = %wd\n",
9305 current_function_args_size,
9306 current_function_pretend_args_size, frame_size);
9308 asm_fprintf (f, "\t%@ frame_needed = %d, uses_anonymous_args = %d\n",
9309 frame_pointer_needed,
9310 cfun->machine->uses_anonymous_args);
9312 if (cfun->machine->lr_save_eliminated)
9313 asm_fprintf (f, "\t%@ link register save eliminated.\n");
9315 #ifdef AOF_ASSEMBLER
9316 if (flag_pic)
9317 asm_fprintf (f, "\tmov\t%r, %r\n", IP_REGNUM, PIC_OFFSET_TABLE_REGNUM);
9318 #endif
9320 return_used_this_function = 0;
9323 const char *
9324 arm_output_epilogue (rtx sibling)
9326 int reg;
9327 unsigned long saved_regs_mask;
9328 unsigned long func_type;
9329 /* Floats_offset is the offset from the "virtual" frame. In an APCS
9330 frame that is $fp + 4 for a non-variadic function. */
9331 int floats_offset = 0;
9332 rtx operands[3];
9333 FILE * f = asm_out_file;
9334 rtx eh_ofs = cfun->machine->eh_epilogue_sp_ofs;
9335 unsigned int lrm_count = 0;
9336 int really_return = (sibling == NULL);
9337 int start_reg;
9338 arm_stack_offsets *offsets;
9340 /* If we have already generated the return instruction
9341 then it is futile to generate anything else. */
9342 if (use_return_insn (FALSE, sibling) && return_used_this_function)
9343 return "";
9345 func_type = arm_current_func_type ();
9347 if (IS_NAKED (func_type))
9348 /* Naked functions don't have epilogues. */
9349 return "";
9351 if (IS_VOLATILE (func_type) && TARGET_ABORT_NORETURN)
9353 rtx op;
9355 /* A volatile function should never return. Call abort. */
9356 op = gen_rtx_SYMBOL_REF (Pmode, NEED_PLT_RELOC ? "abort(PLT)" : "abort");
9357 assemble_external_libcall (op);
9358 output_asm_insn ("bl\t%a0", &op);
9360 return "";
9363 if (ARM_FUNC_TYPE (func_type) == ARM_FT_EXCEPTION_HANDLER
9364 && ! really_return)
9365 /* If we are throwing an exception, then we really must
9366 be doing a return, so we can't tail-call. */
9367 abort ();
9369 offsets = arm_get_frame_offsets ();
9370 saved_regs_mask = arm_compute_save_reg_mask ();
9372 if (TARGET_IWMMXT)
9373 lrm_count = bit_count (saved_regs_mask);
9375 floats_offset = offsets->saved_args;
9376 /* Compute how far away the floats will be. */
9377 for (reg = 0; reg <= LAST_ARM_REGNUM; reg++)
9378 if (saved_regs_mask & (1 << reg))
9379 floats_offset += 4;
9381 if (frame_pointer_needed)
9383 /* This variable is for the Virtual Frame Pointer, not VFP regs. */
9384 int vfp_offset = offsets->frame;
9386 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
9388 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
9389 if (regs_ever_live[reg] && !call_used_regs[reg])
9391 floats_offset += 12;
9392 asm_fprintf (f, "\tldfe\t%r, [%r, #-%d]\n",
9393 reg, FP_REGNUM, floats_offset - vfp_offset);
9396 else
9398 start_reg = LAST_FPA_REGNUM;
9400 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
9402 if (regs_ever_live[reg] && !call_used_regs[reg])
9404 floats_offset += 12;
9406 /* We can't unstack more than four registers at once. */
9407 if (start_reg - reg == 3)
9409 asm_fprintf (f, "\tlfm\t%r, 4, [%r, #-%d]\n",
9410 reg, FP_REGNUM, floats_offset - vfp_offset);
9411 start_reg = reg - 1;
9414 else
9416 if (reg != start_reg)
9417 asm_fprintf (f, "\tlfm\t%r, %d, [%r, #-%d]\n",
9418 reg + 1, start_reg - reg,
9419 FP_REGNUM, floats_offset - vfp_offset);
9420 start_reg = reg - 1;
9424 /* Just in case the last register checked also needs unstacking. */
9425 if (reg != start_reg)
9426 asm_fprintf (f, "\tlfm\t%r, %d, [%r, #-%d]\n",
9427 reg + 1, start_reg - reg,
9428 FP_REGNUM, floats_offset - vfp_offset);
9431 if (TARGET_HARD_FLOAT && TARGET_VFP)
9433 int saved_size;
9435 /* The fldmx insn does not have base+offset addressing modes,
9436 so we use IP to hold the address. */
9437 saved_size = arm_get_vfp_saved_size ();
9439 if (saved_size > 0)
9441 floats_offset += saved_size;
9442 asm_fprintf (f, "\tsub\t%r, %r, #%d\n", IP_REGNUM,
9443 FP_REGNUM, floats_offset - vfp_offset);
9445 start_reg = FIRST_VFP_REGNUM;
9446 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
9448 if ((!regs_ever_live[reg] || call_used_regs[reg])
9449 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
9451 if (start_reg != reg)
9452 arm_output_fldmx (f, IP_REGNUM,
9453 (start_reg - FIRST_VFP_REGNUM) / 2,
9454 (reg - start_reg) / 2);
9455 start_reg = reg + 2;
9458 if (start_reg != reg)
9459 arm_output_fldmx (f, IP_REGNUM,
9460 (start_reg - FIRST_VFP_REGNUM) / 2,
9461 (reg - start_reg) / 2);
9464 if (TARGET_IWMMXT)
9466 /* The frame pointer is guaranteed to be non-double-word aligned.
9467 This is because it is set to (old_stack_pointer - 4) and the
9468 old_stack_pointer was double word aligned. Thus the offset to
9469 the iWMMXt registers to be loaded must also be non-double-word
9470 sized, so that the resultant address *is* double-word aligned.
9471 We can ignore floats_offset since that was already included in
9472 the live_regs_mask. */
9473 lrm_count += (lrm_count % 2 ? 2 : 1);
9475 for (reg = FIRST_IWMMXT_REGNUM; reg <= LAST_IWMMXT_REGNUM; reg++)
9476 if (regs_ever_live[reg] && !call_used_regs[reg])
9478 asm_fprintf (f, "\twldrd\t%r, [%r, #-%d]\n",
9479 reg, FP_REGNUM, lrm_count * 4);
9480 lrm_count += 2;
9484 /* saved_regs_mask should contain the IP, which at the time of stack
9485 frame generation actually contains the old stack pointer. So a
9486 quick way to unwind the stack is just pop the IP register directly
9487 into the stack pointer. */
9488 if ((saved_regs_mask & (1 << IP_REGNUM)) == 0)
9489 abort ();
9490 saved_regs_mask &= ~ (1 << IP_REGNUM);
9491 saved_regs_mask |= (1 << SP_REGNUM);
9493 /* There are two registers left in saved_regs_mask - LR and PC. We
9494 only need to restore the LR register (the return address), but to
9495 save time we can load it directly into the PC, unless we need a
9496 special function exit sequence, or we are not really returning. */
9497 if (really_return && ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL)
9498 /* Delete the LR from the register mask, so that the LR on
9499 the stack is loaded into the PC in the register mask. */
9500 saved_regs_mask &= ~ (1 << LR_REGNUM);
9501 else
9502 saved_regs_mask &= ~ (1 << PC_REGNUM);
9504 /* We must use SP as the base register, because SP is one of the
9505 registers being restored. If an interrupt or page fault
9506 happens in the ldm instruction, the SP might or might not
9507 have been restored. That would be bad, as then SP will no
9508 longer indicate the safe area of stack, and we can get stack
9509 corruption. Using SP as the base register means that it will
9510 be reset correctly to the original value, should an interrupt
9511 occur. If the stack pointer already points at the right
9512 place, then omit the subtraction. */
9513 if (offsets->outgoing_args != (1 + (int) bit_count (saved_regs_mask))
9514 || current_function_calls_alloca)
9515 asm_fprintf (f, "\tsub\t%r, %r, #%d\n", SP_REGNUM, FP_REGNUM,
9516 4 * bit_count (saved_regs_mask));
9517 print_multi_reg (f, "ldmfd\t%r", SP_REGNUM, saved_regs_mask);
9519 if (IS_INTERRUPT (func_type))
9520 /* Interrupt handlers will have pushed the
9521 IP onto the stack, so restore it now. */
9522 print_multi_reg (f, "ldmfd\t%r!", SP_REGNUM, 1 << IP_REGNUM);
9524 else
9526 /* Restore stack pointer if necessary. */
9527 if (offsets->outgoing_args != offsets->saved_regs)
9529 operands[0] = operands[1] = stack_pointer_rtx;
9530 operands[2] = GEN_INT (offsets->outgoing_args - offsets->saved_regs);
9531 output_add_immediate (operands);
9534 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
9536 for (reg = FIRST_FPA_REGNUM; reg <= LAST_FPA_REGNUM; reg++)
9537 if (regs_ever_live[reg] && !call_used_regs[reg])
9538 asm_fprintf (f, "\tldfe\t%r, [%r], #12\n",
9539 reg, SP_REGNUM);
9541 else
9543 start_reg = FIRST_FPA_REGNUM;
9545 for (reg = FIRST_FPA_REGNUM; reg <= LAST_FPA_REGNUM; reg++)
9547 if (regs_ever_live[reg] && !call_used_regs[reg])
9549 if (reg - start_reg == 3)
9551 asm_fprintf (f, "\tlfmfd\t%r, 4, [%r]!\n",
9552 start_reg, SP_REGNUM);
9553 start_reg = reg + 1;
9556 else
9558 if (reg != start_reg)
9559 asm_fprintf (f, "\tlfmfd\t%r, %d, [%r]!\n",
9560 start_reg, reg - start_reg,
9561 SP_REGNUM);
9563 start_reg = reg + 1;
9567 /* Just in case the last register checked also needs unstacking. */
9568 if (reg != start_reg)
9569 asm_fprintf (f, "\tlfmfd\t%r, %d, [%r]!\n",
9570 start_reg, reg - start_reg, SP_REGNUM);
9573 if (TARGET_HARD_FLOAT && TARGET_VFP)
9575 start_reg = FIRST_VFP_REGNUM;
9576 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
9578 if ((!regs_ever_live[reg] || call_used_regs[reg])
9579 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
9581 if (start_reg != reg)
9582 arm_output_fldmx (f, SP_REGNUM,
9583 (start_reg - FIRST_VFP_REGNUM) / 2,
9584 (reg - start_reg) / 2);
9585 start_reg = reg + 2;
9588 if (start_reg != reg)
9589 arm_output_fldmx (f, SP_REGNUM,
9590 (start_reg - FIRST_VFP_REGNUM) / 2,
9591 (reg - start_reg) / 2);
9593 if (TARGET_IWMMXT)
9594 for (reg = FIRST_IWMMXT_REGNUM; reg <= LAST_IWMMXT_REGNUM; reg++)
9595 if (regs_ever_live[reg] && !call_used_regs[reg])
9596 asm_fprintf (f, "\twldrd\t%r, [%r, #+8]!\n", reg, SP_REGNUM);
9598 /* If we can, restore the LR into the PC. */
9599 if (ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
9600 && really_return
9601 && current_function_pretend_args_size == 0
9602 && saved_regs_mask & (1 << LR_REGNUM))
9604 saved_regs_mask &= ~ (1 << LR_REGNUM);
9605 saved_regs_mask |= (1 << PC_REGNUM);
9608 /* Load the registers off the stack. If we only have one register
9609 to load use the LDR instruction - it is faster. */
9610 if (saved_regs_mask == (1 << LR_REGNUM))
9612 /* The exception handler ignores the LR, so we do
9613 not really need to load it off the stack. */
9614 if (eh_ofs)
9615 asm_fprintf (f, "\tadd\t%r, %r, #4\n", SP_REGNUM, SP_REGNUM);
9616 else
9617 asm_fprintf (f, "\tldr\t%r, [%r], #4\n", LR_REGNUM, SP_REGNUM);
9619 else if (saved_regs_mask)
9621 if (saved_regs_mask & (1 << SP_REGNUM))
9622 /* Note - write back to the stack register is not enabled
9623 (ie "ldmfd sp!..."). We know that the stack pointer is
9624 in the list of registers and if we add writeback the
9625 instruction becomes UNPREDICTABLE. */
9626 print_multi_reg (f, "ldmfd\t%r", SP_REGNUM, saved_regs_mask);
9627 else
9628 print_multi_reg (f, "ldmfd\t%r!", SP_REGNUM, saved_regs_mask);
9631 if (current_function_pretend_args_size)
9633 /* Unwind the pre-pushed regs. */
9634 operands[0] = operands[1] = stack_pointer_rtx;
9635 operands[2] = GEN_INT (current_function_pretend_args_size);
9636 output_add_immediate (operands);
9640 if (! really_return
9641 || (ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
9642 && current_function_pretend_args_size == 0
9643 && saved_regs_mask & (1 << PC_REGNUM)))
9644 return "";
9646 /* Generate the return instruction. */
9647 switch ((int) ARM_FUNC_TYPE (func_type))
9649 case ARM_FT_EXCEPTION_HANDLER:
9650 /* Even in 26-bit mode we do a mov (rather than a movs)
9651 because we don't have the PSR bits set in the address. */
9652 asm_fprintf (f, "\tmov\t%r, %r\n", PC_REGNUM, EXCEPTION_LR_REGNUM);
9653 break;
9655 case ARM_FT_ISR:
9656 case ARM_FT_FIQ:
9657 asm_fprintf (f, "\tsubs\t%r, %r, #4\n", PC_REGNUM, LR_REGNUM);
9658 break;
9660 case ARM_FT_EXCEPTION:
9661 asm_fprintf (f, "\tmovs\t%r, %r\n", PC_REGNUM, LR_REGNUM);
9662 break;
9664 case ARM_FT_INTERWORKED:
9665 asm_fprintf (f, "\tbx\t%r\n", LR_REGNUM);
9666 break;
9668 default:
9669 if (frame_pointer_needed)
9670 /* If we used the frame pointer then the return address
9671 will have been loaded off the stack directly into the
9672 PC, so there is no need to issue a MOV instruction
9673 here. */
9675 else if (current_function_pretend_args_size == 0
9676 && (saved_regs_mask & (1 << LR_REGNUM)))
9677 /* Similarly we may have been able to load LR into the PC
9678 even if we did not create a stack frame. */
9680 else if (TARGET_APCS_32)
9681 asm_fprintf (f, "\tmov\t%r, %r\n", PC_REGNUM, LR_REGNUM);
9682 else
9683 asm_fprintf (f, "\tmovs\t%r, %r\n", PC_REGNUM, LR_REGNUM);
9684 break;
9687 return "";
9690 static void
9691 arm_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
9692 HOST_WIDE_INT frame_size ATTRIBUTE_UNUSED)
9694 arm_stack_offsets *offsets;
9696 if (TARGET_THUMB)
9698 /* ??? Probably not safe to set this here, since it assumes that a
9699 function will be emitted as assembly immediately after we generate
9700 RTL for it. This does not happen for inline functions. */
9701 return_used_this_function = 0;
9703 else
9705 /* We need to take into account any stack-frame rounding. */
9706 offsets = arm_get_frame_offsets ();
9708 if (use_return_insn (FALSE, NULL)
9709 && return_used_this_function
9710 && offsets->saved_regs != offsets->outgoing_args
9711 && !frame_pointer_needed)
9712 abort ();
9714 /* Reset the ARM-specific per-function variables. */
9715 after_arm_reorg = 0;
9719 /* Generate and emit an insn that we will recognize as a push_multi.
9720 Unfortunately, since this insn does not reflect very well the actual
9721 semantics of the operation, we need to annotate the insn for the benefit
9722 of DWARF2 frame unwind information. */
9723 static rtx
9724 emit_multi_reg_push (int mask)
9726 int num_regs = 0;
9727 int num_dwarf_regs;
9728 int i, j;
9729 rtx par;
9730 rtx dwarf;
9731 int dwarf_par_index;
9732 rtx tmp, reg;
9734 for (i = 0; i <= LAST_ARM_REGNUM; i++)
9735 if (mask & (1 << i))
9736 num_regs++;
9738 if (num_regs == 0 || num_regs > 16)
9739 abort ();
9741 /* We don't record the PC in the dwarf frame information. */
9742 num_dwarf_regs = num_regs;
9743 if (mask & (1 << PC_REGNUM))
9744 num_dwarf_regs--;
9746 /* For the body of the insn we are going to generate an UNSPEC in
9747 parallel with several USEs. This allows the insn to be recognized
9748 by the push_multi pattern in the arm.md file. The insn looks
9749 something like this:
9751 (parallel [
9752 (set (mem:BLK (pre_dec:BLK (reg:SI sp)))
9753 (unspec:BLK [(reg:SI r4)] UNSPEC_PUSH_MULT))
9754 (use (reg:SI 11 fp))
9755 (use (reg:SI 12 ip))
9756 (use (reg:SI 14 lr))
9757 (use (reg:SI 15 pc))
9760 For the frame note however, we try to be more explicit and actually
9761 show each register being stored into the stack frame, plus a (single)
9762 decrement of the stack pointer. We do it this way in order to be
9763 friendly to the stack unwinding code, which only wants to see a single
9764 stack decrement per instruction. The RTL we generate for the note looks
9765 something like this:
9767 (sequence [
9768 (set (reg:SI sp) (plus:SI (reg:SI sp) (const_int -20)))
9769 (set (mem:SI (reg:SI sp)) (reg:SI r4))
9770 (set (mem:SI (plus:SI (reg:SI sp) (const_int 4))) (reg:SI fp))
9771 (set (mem:SI (plus:SI (reg:SI sp) (const_int 8))) (reg:SI ip))
9772 (set (mem:SI (plus:SI (reg:SI sp) (const_int 12))) (reg:SI lr))
9775 This sequence is used both by the code to support stack unwinding for
9776 exceptions handlers and the code to generate dwarf2 frame debugging. */
9778 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (num_regs));
9779 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (num_dwarf_regs + 1));
9780 dwarf_par_index = 1;
9782 for (i = 0; i <= LAST_ARM_REGNUM; i++)
9784 if (mask & (1 << i))
9786 reg = gen_rtx_REG (SImode, i);
9788 XVECEXP (par, 0, 0)
9789 = gen_rtx_SET (VOIDmode,
9790 gen_rtx_MEM (BLKmode,
9791 gen_rtx_PRE_DEC (BLKmode,
9792 stack_pointer_rtx)),
9793 gen_rtx_UNSPEC (BLKmode,
9794 gen_rtvec (1, reg),
9795 UNSPEC_PUSH_MULT));
9797 if (i != PC_REGNUM)
9799 tmp = gen_rtx_SET (VOIDmode,
9800 gen_rtx_MEM (SImode, stack_pointer_rtx),
9801 reg);
9802 RTX_FRAME_RELATED_P (tmp) = 1;
9803 XVECEXP (dwarf, 0, dwarf_par_index) = tmp;
9804 dwarf_par_index++;
9807 break;
9811 for (j = 1, i++; j < num_regs; i++)
9813 if (mask & (1 << i))
9815 reg = gen_rtx_REG (SImode, i);
9817 XVECEXP (par, 0, j) = gen_rtx_USE (VOIDmode, reg);
9819 if (i != PC_REGNUM)
9821 tmp = gen_rtx_SET (VOIDmode,
9822 gen_rtx_MEM (SImode,
9823 plus_constant (stack_pointer_rtx,
9824 4 * j)),
9825 reg);
9826 RTX_FRAME_RELATED_P (tmp) = 1;
9827 XVECEXP (dwarf, 0, dwarf_par_index++) = tmp;
9830 j++;
9834 par = emit_insn (par);
9836 tmp = gen_rtx_SET (SImode,
9837 stack_pointer_rtx,
9838 gen_rtx_PLUS (SImode,
9839 stack_pointer_rtx,
9840 GEN_INT (-4 * num_regs)));
9841 RTX_FRAME_RELATED_P (tmp) = 1;
9842 XVECEXP (dwarf, 0, 0) = tmp;
9844 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
9845 REG_NOTES (par));
9846 return par;
9849 static rtx
9850 emit_sfm (int base_reg, int count)
9852 rtx par;
9853 rtx dwarf;
9854 rtx tmp, reg;
9855 int i;
9857 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
9858 dwarf = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
9860 reg = gen_rtx_REG (XFmode, base_reg++);
9862 XVECEXP (par, 0, 0)
9863 = gen_rtx_SET (VOIDmode,
9864 gen_rtx_MEM (BLKmode,
9865 gen_rtx_PRE_DEC (BLKmode, stack_pointer_rtx)),
9866 gen_rtx_UNSPEC (BLKmode,
9867 gen_rtvec (1, reg),
9868 UNSPEC_PUSH_MULT));
9870 = gen_rtx_SET (VOIDmode,
9871 gen_rtx_MEM (XFmode,
9872 gen_rtx_PRE_DEC (BLKmode, stack_pointer_rtx)),
9873 reg);
9874 RTX_FRAME_RELATED_P (tmp) = 1;
9875 XVECEXP (dwarf, 0, count - 1) = tmp;
9877 for (i = 1; i < count; i++)
9879 reg = gen_rtx_REG (XFmode, base_reg++);
9880 XVECEXP (par, 0, i) = gen_rtx_USE (VOIDmode, reg);
9882 tmp = gen_rtx_SET (VOIDmode,
9883 gen_rtx_MEM (XFmode,
9884 gen_rtx_PRE_DEC (BLKmode,
9885 stack_pointer_rtx)),
9886 reg);
9887 RTX_FRAME_RELATED_P (tmp) = 1;
9888 XVECEXP (dwarf, 0, count - i - 1) = tmp;
9891 par = emit_insn (par);
9892 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
9893 REG_NOTES (par));
9894 return par;
9898 /* Compute the distance from register FROM to register TO.
9899 These can be the arg pointer (26), the soft frame pointer (25),
9900 the stack pointer (13) or the hard frame pointer (11).
9901 Typical stack layout looks like this:
9903 old stack pointer -> | |
9904 ----
9905 | | \
9906 | | saved arguments for
9907 | | vararg functions
9908 | | /
9910 hard FP & arg pointer -> | | \
9911 | | stack
9912 | | frame
9913 | | /
9915 | | \
9916 | | call saved
9917 | | registers
9918 soft frame pointer -> | | /
9920 | | \
9921 | | local
9922 | | variables
9923 | | /
9925 | | \
9926 | | outgoing
9927 | | arguments
9928 current stack pointer -> | | /
9931 For a given function some or all of these stack components
9932 may not be needed, giving rise to the possibility of
9933 eliminating some of the registers.
9935 The values returned by this function must reflect the behavior
9936 of arm_expand_prologue() and arm_compute_save_reg_mask().
9938 The sign of the number returned reflects the direction of stack
9939 growth, so the values are positive for all eliminations except
9940 from the soft frame pointer to the hard frame pointer.
9942 SFP may point just inside the local variables block to ensure correct
9943 alignment. */
9946 /* Calculate stack offsets. These are used to calculate register elimination
9947 offsets and in prologue/epilogue code. */
9949 static arm_stack_offsets *
9950 arm_get_frame_offsets (void)
9952 struct arm_stack_offsets *offsets;
9953 unsigned long func_type;
9954 int leaf;
9955 int saved;
9956 HOST_WIDE_INT frame_size;
9958 offsets = &cfun->machine->stack_offsets;
9960 /* We need to know if we are a leaf function. Unfortunately, it
9961 is possible to be called after start_sequence has been called,
9962 which causes get_insns to return the insns for the sequence,
9963 not the function, which will cause leaf_function_p to return
9964 the incorrect result.
9966 to know about leaf functions once reload has completed, and the
9967 frame size cannot be changed after that time, so we can safely
9968 use the cached value. */
9970 if (reload_completed)
9971 return offsets;
9973 /* Initially this is the size of the local variables. It will translated
9974 into an offset once we have determined the size of preceding data. */
9975 frame_size = ROUND_UP_WORD (get_frame_size ());
9977 leaf = leaf_function_p ();
9979 /* Space for variadic functions. */
9980 offsets->saved_args = current_function_pretend_args_size;
9982 offsets->frame = offsets->saved_args + (frame_pointer_needed ? 4 : 0);
9984 if (TARGET_ARM)
9986 unsigned int regno;
9988 saved = bit_count (arm_compute_save_reg_mask ()) * 4;
9990 /* We know that SP will be doubleword aligned on entry, and we must
9991 preserve that condition at any subroutine call. We also require the
9992 soft frame pointer to be doubleword aligned. */
9994 if (TARGET_REALLY_IWMMXT)
9996 /* Check for the call-saved iWMMXt registers. */
9997 for (regno = FIRST_IWMMXT_REGNUM;
9998 regno <= LAST_IWMMXT_REGNUM;
9999 regno++)
10000 if (regs_ever_live [regno] && ! call_used_regs [regno])
10001 saved += 8;
10004 func_type = arm_current_func_type ();
10005 if (! IS_VOLATILE (func_type))
10007 /* Space for saved FPA registers. */
10008 for (regno = FIRST_FPA_REGNUM; regno <= LAST_FPA_REGNUM; regno++)
10009 if (regs_ever_live[regno] && ! call_used_regs[regno])
10010 saved += 12;
10012 /* Space for saved VFP registers. */
10013 if (TARGET_HARD_FLOAT && TARGET_VFP)
10014 saved += arm_get_vfp_saved_size ();
10017 else /* TARGET_THUMB */
10019 int reg;
10020 int count_regs;
10022 saved = 0;
10023 count_regs = 0;
10024 for (reg = 8; reg < 13; reg ++)
10025 if (THUMB_REG_PUSHED_P (reg))
10026 count_regs ++;
10027 if (count_regs)
10028 saved += 4 * count_regs;
10029 count_regs = 0;
10030 for (reg = 0; reg <= LAST_LO_REGNUM; reg ++)
10031 if (THUMB_REG_PUSHED_P (reg))
10032 count_regs ++;
10033 if (count_regs || ! leaf_function_p ()
10034 || thumb_far_jump_used_p ())
10035 saved += 4 * (count_regs + 1);
10036 if (TARGET_BACKTRACE)
10038 if ((count_regs & 0xFF) == 0 && (regs_ever_live[3] != 0))
10039 saved += 20;
10040 else
10041 saved += 16;
10045 /* Saved registers include the stack frame. */
10046 offsets->saved_regs = offsets->saved_args + saved;
10047 offsets->soft_frame = offsets->saved_regs;
10048 /* A leaf function does not need any stack alignment if it has nothing
10049 on the stack. */
10050 if (leaf && frame_size == 0)
10052 offsets->outgoing_args = offsets->soft_frame;
10053 return offsets;
10056 /* Ensure SFP has the correct alignment. */
10057 if (ARM_DOUBLEWORD_ALIGN
10058 && (offsets->soft_frame & 7))
10059 offsets->soft_frame += 4;
10061 offsets->outgoing_args = offsets->soft_frame + frame_size
10062 + current_function_outgoing_args_size;
10064 if (ARM_DOUBLEWORD_ALIGN)
10066 /* Ensure SP remains doubleword aligned. */
10067 if (offsets->outgoing_args & 7)
10068 offsets->outgoing_args += 4;
10069 if (offsets->outgoing_args & 7)
10070 abort ();
10073 return offsets;
10077 /* Calculate the relative offsets for the different stack pointers. Positive
10078 offsets are in the direction of stack growth. */
10080 unsigned int
10081 arm_compute_initial_elimination_offset (unsigned int from, unsigned int to)
10083 arm_stack_offsets *offsets;
10085 offsets = arm_get_frame_offsets ();
10087 /* OK, now we have enough information to compute the distances.
10088 There must be an entry in these switch tables for each pair
10089 of registers in ELIMINABLE_REGS, even if some of the entries
10090 seem to be redundant or useless. */
10091 switch (from)
10093 case ARG_POINTER_REGNUM:
10094 switch (to)
10096 case THUMB_HARD_FRAME_POINTER_REGNUM:
10097 return 0;
10099 case FRAME_POINTER_REGNUM:
10100 /* This is the reverse of the soft frame pointer
10101 to hard frame pointer elimination below. */
10102 return offsets->soft_frame - offsets->saved_args;
10104 case ARM_HARD_FRAME_POINTER_REGNUM:
10105 /* If there is no stack frame then the hard
10106 frame pointer and the arg pointer coincide. */
10107 if (offsets->frame == offsets->saved_regs)
10108 return 0;
10109 /* FIXME: Not sure about this. Maybe we should always return 0 ? */
10110 return (frame_pointer_needed
10111 && current_function_needs_context
10112 && ! cfun->machine->uses_anonymous_args) ? 4 : 0;
10114 case STACK_POINTER_REGNUM:
10115 /* If nothing has been pushed on the stack at all
10116 then this will return -4. This *is* correct! */
10117 return offsets->outgoing_args - (offsets->saved_args + 4);
10119 default:
10120 abort ();
10122 break;
10124 case FRAME_POINTER_REGNUM:
10125 switch (to)
10127 case THUMB_HARD_FRAME_POINTER_REGNUM:
10128 return 0;
10130 case ARM_HARD_FRAME_POINTER_REGNUM:
10131 /* The hard frame pointer points to the top entry in the
10132 stack frame. The soft frame pointer to the bottom entry
10133 in the stack frame. If there is no stack frame at all,
10134 then they are identical. */
10136 return offsets->frame - offsets->soft_frame;
10138 case STACK_POINTER_REGNUM:
10139 return offsets->outgoing_args - offsets->soft_frame;
10141 default:
10142 abort ();
10144 break;
10146 default:
10147 /* You cannot eliminate from the stack pointer.
10148 In theory you could eliminate from the hard frame
10149 pointer to the stack pointer, but this will never
10150 happen, since if a stack frame is not needed the
10151 hard frame pointer will never be used. */
10152 abort ();
10157 /* Generate the prologue instructions for entry into an ARM function. */
10158 void
10159 arm_expand_prologue (void)
10161 int reg;
10162 rtx amount;
10163 rtx insn;
10164 rtx ip_rtx;
10165 unsigned long live_regs_mask;
10166 unsigned long func_type;
10167 int fp_offset = 0;
10168 int saved_pretend_args = 0;
10169 int saved_regs = 0;
10170 unsigned int args_to_push;
10171 arm_stack_offsets *offsets;
10173 func_type = arm_current_func_type ();
10175 /* Naked functions don't have prologues. */
10176 if (IS_NAKED (func_type))
10177 return;
10179 /* Make a copy of c_f_p_a_s as we may need to modify it locally. */
10180 args_to_push = current_function_pretend_args_size;
10182 /* Compute which register we will have to save onto the stack. */
10183 live_regs_mask = arm_compute_save_reg_mask ();
10185 ip_rtx = gen_rtx_REG (SImode, IP_REGNUM);
10187 if (frame_pointer_needed)
10189 if (IS_INTERRUPT (func_type))
10191 /* Interrupt functions must not corrupt any registers.
10192 Creating a frame pointer however, corrupts the IP
10193 register, so we must push it first. */
10194 insn = emit_multi_reg_push (1 << IP_REGNUM);
10196 /* Do not set RTX_FRAME_RELATED_P on this insn.
10197 The dwarf stack unwinding code only wants to see one
10198 stack decrement per function, and this is not it. If
10199 this instruction is labeled as being part of the frame
10200 creation sequence then dwarf2out_frame_debug_expr will
10201 abort when it encounters the assignment of IP to FP
10202 later on, since the use of SP here establishes SP as
10203 the CFA register and not IP.
10205 Anyway this instruction is not really part of the stack
10206 frame creation although it is part of the prologue. */
10208 else if (IS_NESTED (func_type))
10210 /* The Static chain register is the same as the IP register
10211 used as a scratch register during stack frame creation.
10212 To get around this need to find somewhere to store IP
10213 whilst the frame is being created. We try the following
10214 places in order:
10216 1. The last argument register.
10217 2. A slot on the stack above the frame. (This only
10218 works if the function is not a varargs function).
10219 3. Register r3, after pushing the argument registers
10220 onto the stack.
10222 Note - we only need to tell the dwarf2 backend about the SP
10223 adjustment in the second variant; the static chain register
10224 doesn't need to be unwound, as it doesn't contain a value
10225 inherited from the caller. */
10227 if (regs_ever_live[3] == 0)
10229 insn = gen_rtx_REG (SImode, 3);
10230 insn = gen_rtx_SET (SImode, insn, ip_rtx);
10231 insn = emit_insn (insn);
10233 else if (args_to_push == 0)
10235 rtx dwarf;
10236 insn = gen_rtx_PRE_DEC (SImode, stack_pointer_rtx);
10237 insn = gen_rtx_MEM (SImode, insn);
10238 insn = gen_rtx_SET (VOIDmode, insn, ip_rtx);
10239 insn = emit_insn (insn);
10241 fp_offset = 4;
10243 /* Just tell the dwarf backend that we adjusted SP. */
10244 dwarf = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
10245 gen_rtx_PLUS (SImode, stack_pointer_rtx,
10246 GEN_INT (-fp_offset)));
10247 RTX_FRAME_RELATED_P (insn) = 1;
10248 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
10249 dwarf, REG_NOTES (insn));
10251 else
10253 /* Store the args on the stack. */
10254 if (cfun->machine->uses_anonymous_args)
10255 insn = emit_multi_reg_push
10256 ((0xf0 >> (args_to_push / 4)) & 0xf);
10257 else
10258 insn = emit_insn
10259 (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10260 GEN_INT (- args_to_push)));
10262 RTX_FRAME_RELATED_P (insn) = 1;
10264 saved_pretend_args = 1;
10265 fp_offset = args_to_push;
10266 args_to_push = 0;
10268 /* Now reuse r3 to preserve IP. */
10269 insn = gen_rtx_REG (SImode, 3);
10270 insn = gen_rtx_SET (SImode, insn, ip_rtx);
10271 (void) emit_insn (insn);
10275 if (fp_offset)
10277 insn = gen_rtx_PLUS (SImode, stack_pointer_rtx, GEN_INT (fp_offset));
10278 insn = gen_rtx_SET (SImode, ip_rtx, insn);
10280 else
10281 insn = gen_movsi (ip_rtx, stack_pointer_rtx);
10283 insn = emit_insn (insn);
10284 RTX_FRAME_RELATED_P (insn) = 1;
10287 if (args_to_push)
10289 /* Push the argument registers, or reserve space for them. */
10290 if (cfun->machine->uses_anonymous_args)
10291 insn = emit_multi_reg_push
10292 ((0xf0 >> (args_to_push / 4)) & 0xf);
10293 else
10294 insn = emit_insn
10295 (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10296 GEN_INT (- args_to_push)));
10297 RTX_FRAME_RELATED_P (insn) = 1;
10300 /* If this is an interrupt service routine, and the link register
10301 is going to be pushed, and we are not creating a stack frame,
10302 (which would involve an extra push of IP and a pop in the epilogue)
10303 subtracting four from LR now will mean that the function return
10304 can be done with a single instruction. */
10305 if ((func_type == ARM_FT_ISR || func_type == ARM_FT_FIQ)
10306 && (live_regs_mask & (1 << LR_REGNUM)) != 0
10307 && ! frame_pointer_needed)
10308 emit_insn (gen_rtx_SET (SImode,
10309 gen_rtx_REG (SImode, LR_REGNUM),
10310 gen_rtx_PLUS (SImode,
10311 gen_rtx_REG (SImode, LR_REGNUM),
10312 GEN_INT (-4))));
10314 if (live_regs_mask)
10316 insn = emit_multi_reg_push (live_regs_mask);
10317 saved_regs += bit_count (live_regs_mask) * 4;
10318 RTX_FRAME_RELATED_P (insn) = 1;
10321 if (TARGET_IWMMXT)
10322 for (reg = FIRST_IWMMXT_REGNUM; reg <= LAST_IWMMXT_REGNUM; reg++)
10323 if (regs_ever_live[reg] && ! call_used_regs [reg])
10325 insn = gen_rtx_PRE_DEC (V2SImode, stack_pointer_rtx);
10326 insn = gen_rtx_MEM (V2SImode, insn);
10327 insn = emit_insn (gen_rtx_SET (VOIDmode, insn,
10328 gen_rtx_REG (V2SImode, reg)));
10329 RTX_FRAME_RELATED_P (insn) = 1;
10330 saved_regs += 8;
10333 if (! IS_VOLATILE (func_type))
10335 int start_reg;
10337 /* Save any floating point call-saved registers used by this
10338 function. */
10339 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
10341 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
10342 if (regs_ever_live[reg] && !call_used_regs[reg])
10344 insn = gen_rtx_PRE_DEC (XFmode, stack_pointer_rtx);
10345 insn = gen_rtx_MEM (XFmode, insn);
10346 insn = emit_insn (gen_rtx_SET (VOIDmode, insn,
10347 gen_rtx_REG (XFmode, reg)));
10348 RTX_FRAME_RELATED_P (insn) = 1;
10349 saved_regs += 12;
10352 else
10354 start_reg = LAST_FPA_REGNUM;
10356 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
10358 if (regs_ever_live[reg] && !call_used_regs[reg])
10360 if (start_reg - reg == 3)
10362 insn = emit_sfm (reg, 4);
10363 RTX_FRAME_RELATED_P (insn) = 1;
10364 start_reg = reg - 1;
10367 else
10369 if (start_reg != reg)
10371 insn = emit_sfm (reg + 1, start_reg - reg);
10372 RTX_FRAME_RELATED_P (insn) = 1;
10373 saved_regs += (reg - start_reg) * 12;
10375 start_reg = reg - 1;
10379 if (start_reg != reg)
10381 insn = emit_sfm (reg + 1, start_reg - reg);
10382 saved_regs += (reg - start_reg) * 12;
10383 RTX_FRAME_RELATED_P (insn) = 1;
10386 if (TARGET_HARD_FLOAT && TARGET_VFP)
10388 start_reg = FIRST_VFP_REGNUM;
10390 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
10392 if ((!regs_ever_live[reg] || call_used_regs[reg])
10393 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
10395 if (start_reg != reg)
10396 saved_regs += vfp_emit_fstmx (start_reg,
10397 (reg - start_reg) / 2);
10398 start_reg = reg + 2;
10401 if (start_reg != reg)
10402 saved_regs += vfp_emit_fstmx (start_reg,
10403 (reg - start_reg) / 2);
10407 if (frame_pointer_needed)
10409 /* Create the new frame pointer. */
10410 insn = GEN_INT (-(4 + args_to_push + fp_offset));
10411 insn = emit_insn (gen_addsi3 (hard_frame_pointer_rtx, ip_rtx, insn));
10412 RTX_FRAME_RELATED_P (insn) = 1;
10414 if (IS_NESTED (func_type))
10416 /* Recover the static chain register. */
10417 if (regs_ever_live [3] == 0
10418 || saved_pretend_args)
10419 insn = gen_rtx_REG (SImode, 3);
10420 else /* if (current_function_pretend_args_size == 0) */
10422 insn = gen_rtx_PLUS (SImode, hard_frame_pointer_rtx,
10423 GEN_INT (4));
10424 insn = gen_rtx_MEM (SImode, insn);
10427 emit_insn (gen_rtx_SET (SImode, ip_rtx, insn));
10428 /* Add a USE to stop propagate_one_insn() from barfing. */
10429 emit_insn (gen_prologue_use (ip_rtx));
10433 offsets = arm_get_frame_offsets ();
10434 if (offsets->outgoing_args != offsets->saved_args + saved_regs)
10436 /* This add can produce multiple insns for a large constant, so we
10437 need to get tricky. */
10438 rtx last = get_last_insn ();
10440 amount = GEN_INT (offsets->saved_args + saved_regs
10441 - offsets->outgoing_args);
10443 insn = emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10444 amount));
10447 last = last ? NEXT_INSN (last) : get_insns ();
10448 RTX_FRAME_RELATED_P (last) = 1;
10450 while (last != insn);
10452 /* If the frame pointer is needed, emit a special barrier that
10453 will prevent the scheduler from moving stores to the frame
10454 before the stack adjustment. */
10455 if (frame_pointer_needed)
10456 insn = emit_insn (gen_stack_tie (stack_pointer_rtx,
10457 hard_frame_pointer_rtx));
10460 /* If we are profiling, make sure no instructions are scheduled before
10461 the call to mcount. Similarly if the user has requested no
10462 scheduling in the prolog. */
10463 if (current_function_profile || TARGET_NO_SCHED_PRO)
10464 emit_insn (gen_blockage ());
10466 /* If the link register is being kept alive, with the return address in it,
10467 then make sure that it does not get reused by the ce2 pass. */
10468 if ((live_regs_mask & (1 << LR_REGNUM)) == 0)
10470 emit_insn (gen_prologue_use (gen_rtx_REG (SImode, LR_REGNUM)));
10471 cfun->machine->lr_save_eliminated = 1;
10475 /* If CODE is 'd', then the X is a condition operand and the instruction
10476 should only be executed if the condition is true.
10477 if CODE is 'D', then the X is a condition operand and the instruction
10478 should only be executed if the condition is false: however, if the mode
10479 of the comparison is CCFPEmode, then always execute the instruction -- we
10480 do this because in these circumstances !GE does not necessarily imply LT;
10481 in these cases the instruction pattern will take care to make sure that
10482 an instruction containing %d will follow, thereby undoing the effects of
10483 doing this instruction unconditionally.
10484 If CODE is 'N' then X is a floating point operand that must be negated
10485 before output.
10486 If CODE is 'B' then output a bitwise inverted value of X (a const int).
10487 If X is a REG and CODE is `M', output a ldm/stm style multi-reg. */
10488 void
10489 arm_print_operand (FILE *stream, rtx x, int code)
10491 switch (code)
10493 case '@':
10494 fputs (ASM_COMMENT_START, stream);
10495 return;
10497 case '_':
10498 fputs (user_label_prefix, stream);
10499 return;
10501 case '|':
10502 fputs (REGISTER_PREFIX, stream);
10503 return;
10505 case '?':
10506 if (arm_ccfsm_state == 3 || arm_ccfsm_state == 4)
10508 if (TARGET_THUMB || current_insn_predicate != NULL)
10509 abort ();
10511 fputs (arm_condition_codes[arm_current_cc], stream);
10513 else if (current_insn_predicate)
10515 enum arm_cond_code code;
10517 if (TARGET_THUMB)
10518 abort ();
10520 code = get_arm_condition_code (current_insn_predicate);
10521 fputs (arm_condition_codes[code], stream);
10523 return;
10525 case 'N':
10527 REAL_VALUE_TYPE r;
10528 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
10529 r = REAL_VALUE_NEGATE (r);
10530 fprintf (stream, "%s", fp_const_from_val (&r));
10532 return;
10534 case 'B':
10535 if (GET_CODE (x) == CONST_INT)
10537 HOST_WIDE_INT val;
10538 val = ARM_SIGN_EXTEND (~INTVAL (x));
10539 fprintf (stream, HOST_WIDE_INT_PRINT_DEC, val);
10541 else
10543 putc ('~', stream);
10544 output_addr_const (stream, x);
10546 return;
10548 case 'i':
10549 fprintf (stream, "%s", arithmetic_instr (x, 1));
10550 return;
10552 /* Truncate Cirrus shift counts. */
10553 case 's':
10554 if (GET_CODE (x) == CONST_INT)
10556 fprintf (stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0x3f);
10557 return;
10559 arm_print_operand (stream, x, 0);
10560 return;
10562 case 'I':
10563 fprintf (stream, "%s", arithmetic_instr (x, 0));
10564 return;
10566 case 'S':
10568 HOST_WIDE_INT val;
10569 const char * shift = shift_op (x, &val);
10571 if (shift)
10573 fprintf (stream, ", %s ", shift_op (x, &val));
10574 if (val == -1)
10575 arm_print_operand (stream, XEXP (x, 1), 0);
10576 else
10577 fprintf (stream, "#" HOST_WIDE_INT_PRINT_DEC, val);
10580 return;
10582 /* An explanation of the 'Q', 'R' and 'H' register operands:
10584 In a pair of registers containing a DI or DF value the 'Q'
10585 operand returns the register number of the register containing
10586 the least significant part of the value. The 'R' operand returns
10587 the register number of the register containing the most
10588 significant part of the value.
10590 The 'H' operand returns the higher of the two register numbers.
10591 On a run where WORDS_BIG_ENDIAN is true the 'H' operand is the
10592 same as the 'Q' operand, since the most significant part of the
10593 value is held in the lower number register. The reverse is true
10594 on systems where WORDS_BIG_ENDIAN is false.
10596 The purpose of these operands is to distinguish between cases
10597 where the endian-ness of the values is important (for example
10598 when they are added together), and cases where the endian-ness
10599 is irrelevant, but the order of register operations is important.
10600 For example when loading a value from memory into a register
10601 pair, the endian-ness does not matter. Provided that the value
10602 from the lower memory address is put into the lower numbered
10603 register, and the value from the higher address is put into the
10604 higher numbered register, the load will work regardless of whether
10605 the value being loaded is big-wordian or little-wordian. The
10606 order of the two register loads can matter however, if the address
10607 of the memory location is actually held in one of the registers
10608 being overwritten by the load. */
10609 case 'Q':
10610 if (REGNO (x) > LAST_ARM_REGNUM)
10611 abort ();
10612 asm_fprintf (stream, "%r", REGNO (x) + (WORDS_BIG_ENDIAN ? 1 : 0));
10613 return;
10615 case 'R':
10616 if (REGNO (x) > LAST_ARM_REGNUM)
10617 abort ();
10618 asm_fprintf (stream, "%r", REGNO (x) + (WORDS_BIG_ENDIAN ? 0 : 1));
10619 return;
10621 case 'H':
10622 if (REGNO (x) > LAST_ARM_REGNUM)
10623 abort ();
10624 asm_fprintf (stream, "%r", REGNO (x) + 1);
10625 return;
10627 case 'm':
10628 asm_fprintf (stream, "%r",
10629 GET_CODE (XEXP (x, 0)) == REG
10630 ? REGNO (XEXP (x, 0)) : REGNO (XEXP (XEXP (x, 0), 0)));
10631 return;
10633 case 'M':
10634 asm_fprintf (stream, "{%r-%r}",
10635 REGNO (x),
10636 REGNO (x) + ARM_NUM_REGS (GET_MODE (x)) - 1);
10637 return;
10639 case 'd':
10640 /* CONST_TRUE_RTX means always -- that's the default. */
10641 if (x == const_true_rtx)
10642 return;
10644 fputs (arm_condition_codes[get_arm_condition_code (x)],
10645 stream);
10646 return;
10648 case 'D':
10649 /* CONST_TRUE_RTX means not always -- ie never. We shouldn't ever
10650 want to do that. */
10651 if (x == const_true_rtx)
10652 abort ();
10654 fputs (arm_condition_codes[ARM_INVERSE_CONDITION_CODE
10655 (get_arm_condition_code (x))],
10656 stream);
10657 return;
10659 /* Cirrus registers can be accessed in a variety of ways:
10660 single floating point (f)
10661 double floating point (d)
10662 32bit integer (fx)
10663 64bit integer (dx). */
10664 case 'W': /* Cirrus register in F mode. */
10665 case 'X': /* Cirrus register in D mode. */
10666 case 'Y': /* Cirrus register in FX mode. */
10667 case 'Z': /* Cirrus register in DX mode. */
10668 if (GET_CODE (x) != REG || REGNO_REG_CLASS (REGNO (x)) != CIRRUS_REGS)
10669 abort ();
10671 fprintf (stream, "mv%s%s",
10672 code == 'W' ? "f"
10673 : code == 'X' ? "d"
10674 : code == 'Y' ? "fx" : "dx", reg_names[REGNO (x)] + 2);
10676 return;
10678 /* Print cirrus register in the mode specified by the register's mode. */
10679 case 'V':
10681 int mode = GET_MODE (x);
10683 if (GET_CODE (x) != REG || REGNO_REG_CLASS (REGNO (x)) != CIRRUS_REGS)
10684 abort ();
10686 fprintf (stream, "mv%s%s",
10687 mode == DFmode ? "d"
10688 : mode == SImode ? "fx"
10689 : mode == DImode ? "dx"
10690 : "f", reg_names[REGNO (x)] + 2);
10692 return;
10695 case 'U':
10696 if (GET_CODE (x) != REG
10697 || REGNO (x) < FIRST_IWMMXT_GR_REGNUM
10698 || REGNO (x) > LAST_IWMMXT_GR_REGNUM)
10699 /* Bad value for wCG register number. */
10700 abort ();
10701 else
10702 fprintf (stream, "%d", REGNO (x) - FIRST_IWMMXT_GR_REGNUM);
10703 return;
10705 /* Print an iWMMXt control register name. */
10706 case 'w':
10707 if (GET_CODE (x) != CONST_INT
10708 || INTVAL (x) < 0
10709 || INTVAL (x) >= 16)
10710 /* Bad value for wC register number. */
10711 abort ();
10712 else
10714 static const char * wc_reg_names [16] =
10716 "wCID", "wCon", "wCSSF", "wCASF",
10717 "wC4", "wC5", "wC6", "wC7",
10718 "wCGR0", "wCGR1", "wCGR2", "wCGR3",
10719 "wC12", "wC13", "wC14", "wC15"
10722 fprintf (stream, wc_reg_names [INTVAL (x)]);
10724 return;
10726 /* Print a VFP double precision register name. */
10727 case 'P':
10729 int mode = GET_MODE (x);
10730 int num;
10732 if (mode != DImode && mode != DFmode)
10733 abort ();
10735 if (GET_CODE (x) != REG
10736 || !IS_VFP_REGNUM (REGNO (x)))
10737 abort ();
10739 num = REGNO(x) - FIRST_VFP_REGNUM;
10740 if (num & 1)
10741 abort ();
10743 fprintf (stream, "d%d", num >> 1);
10745 return;
10747 default:
10748 if (x == 0)
10749 abort ();
10751 if (GET_CODE (x) == REG)
10752 asm_fprintf (stream, "%r", REGNO (x));
10753 else if (GET_CODE (x) == MEM)
10755 output_memory_reference_mode = GET_MODE (x);
10756 output_address (XEXP (x, 0));
10758 else if (GET_CODE (x) == CONST_DOUBLE)
10759 fprintf (stream, "#%s", fp_immediate_constant (x));
10760 else if (GET_CODE (x) == NEG)
10761 abort (); /* This should never happen now. */
10762 else
10764 fputc ('#', stream);
10765 output_addr_const (stream, x);
10770 #ifndef AOF_ASSEMBLER
10771 /* Target hook for assembling integer objects. The ARM version needs to
10772 handle word-sized values specially. */
10773 static bool
10774 arm_assemble_integer (rtx x, unsigned int size, int aligned_p)
10776 if (size == UNITS_PER_WORD && aligned_p)
10778 fputs ("\t.word\t", asm_out_file);
10779 output_addr_const (asm_out_file, x);
10781 /* Mark symbols as position independent. We only do this in the
10782 .text segment, not in the .data segment. */
10783 if (NEED_GOT_RELOC && flag_pic && making_const_table &&
10784 (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF))
10786 if (GET_CODE (x) == SYMBOL_REF
10787 && (CONSTANT_POOL_ADDRESS_P (x)
10788 || SYMBOL_REF_LOCAL_P (x)))
10789 fputs ("(GOTOFF)", asm_out_file);
10790 else if (GET_CODE (x) == LABEL_REF)
10791 fputs ("(GOTOFF)", asm_out_file);
10792 else
10793 fputs ("(GOT)", asm_out_file);
10795 fputc ('\n', asm_out_file);
10796 return true;
10799 if (VECTOR_MODE_SUPPORTED_P (GET_MODE (x)))
10801 int i, units;
10803 if (GET_CODE (x) != CONST_VECTOR)
10804 abort ();
10806 units = CONST_VECTOR_NUNITS (x);
10808 switch (GET_MODE (x))
10810 case V2SImode: size = 4; break;
10811 case V4HImode: size = 2; break;
10812 case V8QImode: size = 1; break;
10813 default:
10814 abort ();
10817 for (i = 0; i < units; i++)
10819 rtx elt;
10821 elt = CONST_VECTOR_ELT (x, i);
10822 assemble_integer
10823 (elt, size, i == 0 ? BIGGEST_ALIGNMENT : size * BITS_PER_UNIT, 1);
10826 return true;
10829 return default_assemble_integer (x, size, aligned_p);
10831 #endif
10833 /* A finite state machine takes care of noticing whether or not instructions
10834 can be conditionally executed, and thus decrease execution time and code
10835 size by deleting branch instructions. The fsm is controlled by
10836 final_prescan_insn, and controls the actions of ASM_OUTPUT_OPCODE. */
10838 /* The state of the fsm controlling condition codes are:
10839 0: normal, do nothing special
10840 1: make ASM_OUTPUT_OPCODE not output this instruction
10841 2: make ASM_OUTPUT_OPCODE not output this instruction
10842 3: make instructions conditional
10843 4: make instructions conditional
10845 State transitions (state->state by whom under condition):
10846 0 -> 1 final_prescan_insn if the `target' is a label
10847 0 -> 2 final_prescan_insn if the `target' is an unconditional branch
10848 1 -> 3 ASM_OUTPUT_OPCODE after not having output the conditional branch
10849 2 -> 4 ASM_OUTPUT_OPCODE after not having output the conditional branch
10850 3 -> 0 (*targetm.asm_out.internal_label) if the `target' label is reached
10851 (the target label has CODE_LABEL_NUMBER equal to arm_target_label).
10852 4 -> 0 final_prescan_insn if the `target' unconditional branch is reached
10853 (the target insn is arm_target_insn).
10855 If the jump clobbers the conditions then we use states 2 and 4.
10857 A similar thing can be done with conditional return insns.
10859 XXX In case the `target' is an unconditional branch, this conditionalising
10860 of the instructions always reduces code size, but not always execution
10861 time. But then, I want to reduce the code size to somewhere near what
10862 /bin/cc produces. */
10864 /* Returns the index of the ARM condition code string in
10865 `arm_condition_codes'. COMPARISON should be an rtx like
10866 `(eq (...) (...))'. */
10867 static enum arm_cond_code
10868 get_arm_condition_code (rtx comparison)
10870 enum machine_mode mode = GET_MODE (XEXP (comparison, 0));
10871 int code;
10872 enum rtx_code comp_code = GET_CODE (comparison);
10874 if (GET_MODE_CLASS (mode) != MODE_CC)
10875 mode = SELECT_CC_MODE (comp_code, XEXP (comparison, 0),
10876 XEXP (comparison, 1));
10878 switch (mode)
10880 case CC_DNEmode: code = ARM_NE; goto dominance;
10881 case CC_DEQmode: code = ARM_EQ; goto dominance;
10882 case CC_DGEmode: code = ARM_GE; goto dominance;
10883 case CC_DGTmode: code = ARM_GT; goto dominance;
10884 case CC_DLEmode: code = ARM_LE; goto dominance;
10885 case CC_DLTmode: code = ARM_LT; goto dominance;
10886 case CC_DGEUmode: code = ARM_CS; goto dominance;
10887 case CC_DGTUmode: code = ARM_HI; goto dominance;
10888 case CC_DLEUmode: code = ARM_LS; goto dominance;
10889 case CC_DLTUmode: code = ARM_CC;
10891 dominance:
10892 if (comp_code != EQ && comp_code != NE)
10893 abort ();
10895 if (comp_code == EQ)
10896 return ARM_INVERSE_CONDITION_CODE (code);
10897 return code;
10899 case CC_NOOVmode:
10900 switch (comp_code)
10902 case NE: return ARM_NE;
10903 case EQ: return ARM_EQ;
10904 case GE: return ARM_PL;
10905 case LT: return ARM_MI;
10906 default: abort ();
10909 case CC_Zmode:
10910 switch (comp_code)
10912 case NE: return ARM_NE;
10913 case EQ: return ARM_EQ;
10914 default: abort ();
10917 case CC_Nmode:
10918 switch (comp_code)
10920 case NE: return ARM_MI;
10921 case EQ: return ARM_PL;
10922 default: abort ();
10925 case CCFPEmode:
10926 case CCFPmode:
10927 /* These encodings assume that AC=1 in the FPA system control
10928 byte. This allows us to handle all cases except UNEQ and
10929 LTGT. */
10930 switch (comp_code)
10932 case GE: return ARM_GE;
10933 case GT: return ARM_GT;
10934 case LE: return ARM_LS;
10935 case LT: return ARM_MI;
10936 case NE: return ARM_NE;
10937 case EQ: return ARM_EQ;
10938 case ORDERED: return ARM_VC;
10939 case UNORDERED: return ARM_VS;
10940 case UNLT: return ARM_LT;
10941 case UNLE: return ARM_LE;
10942 case UNGT: return ARM_HI;
10943 case UNGE: return ARM_PL;
10944 /* UNEQ and LTGT do not have a representation. */
10945 case UNEQ: /* Fall through. */
10946 case LTGT: /* Fall through. */
10947 default: abort ();
10950 case CC_SWPmode:
10951 switch (comp_code)
10953 case NE: return ARM_NE;
10954 case EQ: return ARM_EQ;
10955 case GE: return ARM_LE;
10956 case GT: return ARM_LT;
10957 case LE: return ARM_GE;
10958 case LT: return ARM_GT;
10959 case GEU: return ARM_LS;
10960 case GTU: return ARM_CC;
10961 case LEU: return ARM_CS;
10962 case LTU: return ARM_HI;
10963 default: abort ();
10966 case CC_Cmode:
10967 switch (comp_code)
10969 case LTU: return ARM_CS;
10970 case GEU: return ARM_CC;
10971 default: abort ();
10974 case CCmode:
10975 switch (comp_code)
10977 case NE: return ARM_NE;
10978 case EQ: return ARM_EQ;
10979 case GE: return ARM_GE;
10980 case GT: return ARM_GT;
10981 case LE: return ARM_LE;
10982 case LT: return ARM_LT;
10983 case GEU: return ARM_CS;
10984 case GTU: return ARM_HI;
10985 case LEU: return ARM_LS;
10986 case LTU: return ARM_CC;
10987 default: abort ();
10990 default: abort ();
10993 abort ();
10996 void
10997 arm_final_prescan_insn (rtx insn)
10999 /* BODY will hold the body of INSN. */
11000 rtx body = PATTERN (insn);
11002 /* This will be 1 if trying to repeat the trick, and things need to be
11003 reversed if it appears to fail. */
11004 int reverse = 0;
11006 /* JUMP_CLOBBERS will be one implies that the conditions if a branch is
11007 taken are clobbered, even if the rtl suggests otherwise. It also
11008 means that we have to grub around within the jump expression to find
11009 out what the conditions are when the jump isn't taken. */
11010 int jump_clobbers = 0;
11012 /* If we start with a return insn, we only succeed if we find another one. */
11013 int seeking_return = 0;
11015 /* START_INSN will hold the insn from where we start looking. This is the
11016 first insn after the following code_label if REVERSE is true. */
11017 rtx start_insn = insn;
11019 /* If in state 4, check if the target branch is reached, in order to
11020 change back to state 0. */
11021 if (arm_ccfsm_state == 4)
11023 if (insn == arm_target_insn)
11025 arm_target_insn = NULL;
11026 arm_ccfsm_state = 0;
11028 return;
11031 /* If in state 3, it is possible to repeat the trick, if this insn is an
11032 unconditional branch to a label, and immediately following this branch
11033 is the previous target label which is only used once, and the label this
11034 branch jumps to is not too far off. */
11035 if (arm_ccfsm_state == 3)
11037 if (simplejump_p (insn))
11039 start_insn = next_nonnote_insn (start_insn);
11040 if (GET_CODE (start_insn) == BARRIER)
11042 /* XXX Isn't this always a barrier? */
11043 start_insn = next_nonnote_insn (start_insn);
11045 if (GET_CODE (start_insn) == CODE_LABEL
11046 && CODE_LABEL_NUMBER (start_insn) == arm_target_label
11047 && LABEL_NUSES (start_insn) == 1)
11048 reverse = TRUE;
11049 else
11050 return;
11052 else if (GET_CODE (body) == RETURN)
11054 start_insn = next_nonnote_insn (start_insn);
11055 if (GET_CODE (start_insn) == BARRIER)
11056 start_insn = next_nonnote_insn (start_insn);
11057 if (GET_CODE (start_insn) == CODE_LABEL
11058 && CODE_LABEL_NUMBER (start_insn) == arm_target_label
11059 && LABEL_NUSES (start_insn) == 1)
11061 reverse = TRUE;
11062 seeking_return = 1;
11064 else
11065 return;
11067 else
11068 return;
11071 if (arm_ccfsm_state != 0 && !reverse)
11072 abort ();
11073 if (GET_CODE (insn) != JUMP_INSN)
11074 return;
11076 /* This jump might be paralleled with a clobber of the condition codes
11077 the jump should always come first */
11078 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) > 0)
11079 body = XVECEXP (body, 0, 0);
11081 if (reverse
11082 || (GET_CODE (body) == SET && GET_CODE (SET_DEST (body)) == PC
11083 && GET_CODE (SET_SRC (body)) == IF_THEN_ELSE))
11085 int insns_skipped;
11086 int fail = FALSE, succeed = FALSE;
11087 /* Flag which part of the IF_THEN_ELSE is the LABEL_REF. */
11088 int then_not_else = TRUE;
11089 rtx this_insn = start_insn, label = 0;
11091 /* If the jump cannot be done with one instruction, we cannot
11092 conditionally execute the instruction in the inverse case. */
11093 if (get_attr_conds (insn) == CONDS_JUMP_CLOB)
11095 jump_clobbers = 1;
11096 return;
11099 /* Register the insn jumped to. */
11100 if (reverse)
11102 if (!seeking_return)
11103 label = XEXP (SET_SRC (body), 0);
11105 else if (GET_CODE (XEXP (SET_SRC (body), 1)) == LABEL_REF)
11106 label = XEXP (XEXP (SET_SRC (body), 1), 0);
11107 else if (GET_CODE (XEXP (SET_SRC (body), 2)) == LABEL_REF)
11109 label = XEXP (XEXP (SET_SRC (body), 2), 0);
11110 then_not_else = FALSE;
11112 else if (GET_CODE (XEXP (SET_SRC (body), 1)) == RETURN)
11113 seeking_return = 1;
11114 else if (GET_CODE (XEXP (SET_SRC (body), 2)) == RETURN)
11116 seeking_return = 1;
11117 then_not_else = FALSE;
11119 else
11120 abort ();
11122 /* See how many insns this branch skips, and what kind of insns. If all
11123 insns are okay, and the label or unconditional branch to the same
11124 label is not too far away, succeed. */
11125 for (insns_skipped = 0;
11126 !fail && !succeed && insns_skipped++ < max_insns_skipped;)
11128 rtx scanbody;
11130 this_insn = next_nonnote_insn (this_insn);
11131 if (!this_insn)
11132 break;
11134 switch (GET_CODE (this_insn))
11136 case CODE_LABEL:
11137 /* Succeed if it is the target label, otherwise fail since
11138 control falls in from somewhere else. */
11139 if (this_insn == label)
11141 if (jump_clobbers)
11143 arm_ccfsm_state = 2;
11144 this_insn = next_nonnote_insn (this_insn);
11146 else
11147 arm_ccfsm_state = 1;
11148 succeed = TRUE;
11150 else
11151 fail = TRUE;
11152 break;
11154 case BARRIER:
11155 /* Succeed if the following insn is the target label.
11156 Otherwise fail.
11157 If return insns are used then the last insn in a function
11158 will be a barrier. */
11159 this_insn = next_nonnote_insn (this_insn);
11160 if (this_insn && this_insn == label)
11162 if (jump_clobbers)
11164 arm_ccfsm_state = 2;
11165 this_insn = next_nonnote_insn (this_insn);
11167 else
11168 arm_ccfsm_state = 1;
11169 succeed = TRUE;
11171 else
11172 fail = TRUE;
11173 break;
11175 case CALL_INSN:
11176 /* If using 32-bit addresses the cc is not preserved over
11177 calls. */
11178 if (TARGET_APCS_32)
11180 /* Succeed if the following insn is the target label,
11181 or if the following two insns are a barrier and
11182 the target label. */
11183 this_insn = next_nonnote_insn (this_insn);
11184 if (this_insn && GET_CODE (this_insn) == BARRIER)
11185 this_insn = next_nonnote_insn (this_insn);
11187 if (this_insn && this_insn == label
11188 && insns_skipped < max_insns_skipped)
11190 if (jump_clobbers)
11192 arm_ccfsm_state = 2;
11193 this_insn = next_nonnote_insn (this_insn);
11195 else
11196 arm_ccfsm_state = 1;
11197 succeed = TRUE;
11199 else
11200 fail = TRUE;
11202 break;
11204 case JUMP_INSN:
11205 /* If this is an unconditional branch to the same label, succeed.
11206 If it is to another label, do nothing. If it is conditional,
11207 fail. */
11208 /* XXX Probably, the tests for SET and the PC are
11209 unnecessary. */
11211 scanbody = PATTERN (this_insn);
11212 if (GET_CODE (scanbody) == SET
11213 && GET_CODE (SET_DEST (scanbody)) == PC)
11215 if (GET_CODE (SET_SRC (scanbody)) == LABEL_REF
11216 && XEXP (SET_SRC (scanbody), 0) == label && !reverse)
11218 arm_ccfsm_state = 2;
11219 succeed = TRUE;
11221 else if (GET_CODE (SET_SRC (scanbody)) == IF_THEN_ELSE)
11222 fail = TRUE;
11224 /* Fail if a conditional return is undesirable (eg on a
11225 StrongARM), but still allow this if optimizing for size. */
11226 else if (GET_CODE (scanbody) == RETURN
11227 && !use_return_insn (TRUE, NULL)
11228 && !optimize_size)
11229 fail = TRUE;
11230 else if (GET_CODE (scanbody) == RETURN
11231 && seeking_return)
11233 arm_ccfsm_state = 2;
11234 succeed = TRUE;
11236 else if (GET_CODE (scanbody) == PARALLEL)
11238 switch (get_attr_conds (this_insn))
11240 case CONDS_NOCOND:
11241 break;
11242 default:
11243 fail = TRUE;
11244 break;
11247 else
11248 fail = TRUE; /* Unrecognized jump (eg epilogue). */
11250 break;
11252 case INSN:
11253 /* Instructions using or affecting the condition codes make it
11254 fail. */
11255 scanbody = PATTERN (this_insn);
11256 if (!(GET_CODE (scanbody) == SET
11257 || GET_CODE (scanbody) == PARALLEL)
11258 || get_attr_conds (this_insn) != CONDS_NOCOND)
11259 fail = TRUE;
11261 /* A conditional cirrus instruction must be followed by
11262 a non Cirrus instruction. However, since we
11263 conditionalize instructions in this function and by
11264 the time we get here we can't add instructions
11265 (nops), because shorten_branches() has already been
11266 called, we will disable conditionalizing Cirrus
11267 instructions to be safe. */
11268 if (GET_CODE (scanbody) != USE
11269 && GET_CODE (scanbody) != CLOBBER
11270 && get_attr_cirrus (this_insn) != CIRRUS_NOT)
11271 fail = TRUE;
11272 break;
11274 default:
11275 break;
11278 if (succeed)
11280 if ((!seeking_return) && (arm_ccfsm_state == 1 || reverse))
11281 arm_target_label = CODE_LABEL_NUMBER (label);
11282 else if (seeking_return || arm_ccfsm_state == 2)
11284 while (this_insn && GET_CODE (PATTERN (this_insn)) == USE)
11286 this_insn = next_nonnote_insn (this_insn);
11287 if (this_insn && (GET_CODE (this_insn) == BARRIER
11288 || GET_CODE (this_insn) == CODE_LABEL))
11289 abort ();
11291 if (!this_insn)
11293 /* Oh, dear! we ran off the end.. give up. */
11294 recog (PATTERN (insn), insn, NULL);
11295 arm_ccfsm_state = 0;
11296 arm_target_insn = NULL;
11297 return;
11299 arm_target_insn = this_insn;
11301 else
11302 abort ();
11303 if (jump_clobbers)
11305 if (reverse)
11306 abort ();
11307 arm_current_cc =
11308 get_arm_condition_code (XEXP (XEXP (XEXP (SET_SRC (body),
11309 0), 0), 1));
11310 if (GET_CODE (XEXP (XEXP (SET_SRC (body), 0), 0)) == AND)
11311 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
11312 if (GET_CODE (XEXP (SET_SRC (body), 0)) == NE)
11313 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
11315 else
11317 /* If REVERSE is true, ARM_CURRENT_CC needs to be inverted from
11318 what it was. */
11319 if (!reverse)
11320 arm_current_cc = get_arm_condition_code (XEXP (SET_SRC (body),
11321 0));
11324 if (reverse || then_not_else)
11325 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
11328 /* Restore recog_data (getting the attributes of other insns can
11329 destroy this array, but final.c assumes that it remains intact
11330 across this call; since the insn has been recognized already we
11331 call recog direct). */
11332 recog (PATTERN (insn), insn, NULL);
11336 /* Returns true if REGNO is a valid register
11337 for holding a quantity of tyoe MODE. */
11339 arm_hard_regno_mode_ok (unsigned int regno, enum machine_mode mode)
11341 if (GET_MODE_CLASS (mode) == MODE_CC)
11342 return regno == CC_REGNUM || regno == VFPCC_REGNUM;
11344 if (TARGET_THUMB)
11345 /* For the Thumb we only allow values bigger than SImode in
11346 registers 0 - 6, so that there is always a second low
11347 register available to hold the upper part of the value.
11348 We probably we ought to ensure that the register is the
11349 start of an even numbered register pair. */
11350 return (ARM_NUM_REGS (mode) < 2) || (regno < LAST_LO_REGNUM);
11352 if (IS_CIRRUS_REGNUM (regno))
11353 /* We have outlawed SI values in Cirrus registers because they
11354 reside in the lower 32 bits, but SF values reside in the
11355 upper 32 bits. This causes gcc all sorts of grief. We can't
11356 even split the registers into pairs because Cirrus SI values
11357 get sign extended to 64bits-- aldyh. */
11358 return (GET_MODE_CLASS (mode) == MODE_FLOAT) || (mode == DImode);
11360 if (IS_VFP_REGNUM (regno))
11362 if (mode == SFmode || mode == SImode)
11363 return TRUE;
11365 /* DFmode values are only valid in even register pairs. */
11366 if (mode == DFmode)
11367 return ((regno - FIRST_VFP_REGNUM) & 1) == 0;
11368 return FALSE;
11371 if (IS_IWMMXT_GR_REGNUM (regno))
11372 return mode == SImode;
11374 if (IS_IWMMXT_REGNUM (regno))
11375 return VALID_IWMMXT_REG_MODE (mode);
11377 if (regno <= LAST_ARM_REGNUM)
11378 /* We allow any value to be stored in the general registers. */
11379 return 1;
11381 if ( regno == FRAME_POINTER_REGNUM
11382 || regno == ARG_POINTER_REGNUM)
11383 /* We only allow integers in the fake hard registers. */
11384 return GET_MODE_CLASS (mode) == MODE_INT;
11386 /* The only registers left are the FPA registers
11387 which we only allow to hold FP values. */
11388 return GET_MODE_CLASS (mode) == MODE_FLOAT
11389 && regno >= FIRST_FPA_REGNUM
11390 && regno <= LAST_FPA_REGNUM;
11394 arm_regno_class (int regno)
11396 if (TARGET_THUMB)
11398 if (regno == STACK_POINTER_REGNUM)
11399 return STACK_REG;
11400 if (regno == CC_REGNUM)
11401 return CC_REG;
11402 if (regno < 8)
11403 return LO_REGS;
11404 return HI_REGS;
11407 if ( regno <= LAST_ARM_REGNUM
11408 || regno == FRAME_POINTER_REGNUM
11409 || regno == ARG_POINTER_REGNUM)
11410 return GENERAL_REGS;
11412 if (regno == CC_REGNUM || regno == VFPCC_REGNUM)
11413 return NO_REGS;
11415 if (IS_CIRRUS_REGNUM (regno))
11416 return CIRRUS_REGS;
11418 if (IS_VFP_REGNUM (regno))
11419 return VFP_REGS;
11421 if (IS_IWMMXT_REGNUM (regno))
11422 return IWMMXT_REGS;
11424 if (IS_IWMMXT_GR_REGNUM (regno))
11425 return IWMMXT_GR_REGS;
11427 return FPA_REGS;
11430 /* Handle a special case when computing the offset
11431 of an argument from the frame pointer. */
11433 arm_debugger_arg_offset (int value, rtx addr)
11435 rtx insn;
11437 /* We are only interested if dbxout_parms() failed to compute the offset. */
11438 if (value != 0)
11439 return 0;
11441 /* We can only cope with the case where the address is held in a register. */
11442 if (GET_CODE (addr) != REG)
11443 return 0;
11445 /* If we are using the frame pointer to point at the argument, then
11446 an offset of 0 is correct. */
11447 if (REGNO (addr) == (unsigned) HARD_FRAME_POINTER_REGNUM)
11448 return 0;
11450 /* If we are using the stack pointer to point at the
11451 argument, then an offset of 0 is correct. */
11452 if ((TARGET_THUMB || !frame_pointer_needed)
11453 && REGNO (addr) == SP_REGNUM)
11454 return 0;
11456 /* Oh dear. The argument is pointed to by a register rather
11457 than being held in a register, or being stored at a known
11458 offset from the frame pointer. Since GDB only understands
11459 those two kinds of argument we must translate the address
11460 held in the register into an offset from the frame pointer.
11461 We do this by searching through the insns for the function
11462 looking to see where this register gets its value. If the
11463 register is initialized from the frame pointer plus an offset
11464 then we are in luck and we can continue, otherwise we give up.
11466 This code is exercised by producing debugging information
11467 for a function with arguments like this:
11469 double func (double a, double b, int c, double d) {return d;}
11471 Without this code the stab for parameter 'd' will be set to
11472 an offset of 0 from the frame pointer, rather than 8. */
11474 /* The if() statement says:
11476 If the insn is a normal instruction
11477 and if the insn is setting the value in a register
11478 and if the register being set is the register holding the address of the argument
11479 and if the address is computing by an addition
11480 that involves adding to a register
11481 which is the frame pointer
11482 a constant integer
11484 then... */
11486 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
11488 if ( GET_CODE (insn) == INSN
11489 && GET_CODE (PATTERN (insn)) == SET
11490 && REGNO (XEXP (PATTERN (insn), 0)) == REGNO (addr)
11491 && GET_CODE (XEXP (PATTERN (insn), 1)) == PLUS
11492 && GET_CODE (XEXP (XEXP (PATTERN (insn), 1), 0)) == REG
11493 && REGNO (XEXP (XEXP (PATTERN (insn), 1), 0)) == (unsigned) HARD_FRAME_POINTER_REGNUM
11494 && GET_CODE (XEXP (XEXP (PATTERN (insn), 1), 1)) == CONST_INT
11497 value = INTVAL (XEXP (XEXP (PATTERN (insn), 1), 1));
11499 break;
11503 if (value == 0)
11505 debug_rtx (addr);
11506 warning ("unable to compute real location of stacked parameter");
11507 value = 8; /* XXX magic hack */
11510 return value;
11513 #define def_mbuiltin(MASK, NAME, TYPE, CODE) \
11514 do \
11516 if ((MASK) & insn_flags) \
11517 builtin_function ((NAME), (TYPE), (CODE), BUILT_IN_MD, NULL, NULL_TREE); \
11519 while (0)
11521 struct builtin_description
11523 const unsigned int mask;
11524 const enum insn_code icode;
11525 const char * const name;
11526 const enum arm_builtins code;
11527 const enum rtx_code comparison;
11528 const unsigned int flag;
11531 static const struct builtin_description bdesc_2arg[] =
11533 #define IWMMXT_BUILTIN(code, string, builtin) \
11534 { FL_IWMMXT, CODE_FOR_##code, "__builtin_arm_" string, \
11535 ARM_BUILTIN_##builtin, 0, 0 },
11537 IWMMXT_BUILTIN (addv8qi3, "waddb", WADDB)
11538 IWMMXT_BUILTIN (addv4hi3, "waddh", WADDH)
11539 IWMMXT_BUILTIN (addv2si3, "waddw", WADDW)
11540 IWMMXT_BUILTIN (subv8qi3, "wsubb", WSUBB)
11541 IWMMXT_BUILTIN (subv4hi3, "wsubh", WSUBH)
11542 IWMMXT_BUILTIN (subv2si3, "wsubw", WSUBW)
11543 IWMMXT_BUILTIN (ssaddv8qi3, "waddbss", WADDSSB)
11544 IWMMXT_BUILTIN (ssaddv4hi3, "waddhss", WADDSSH)
11545 IWMMXT_BUILTIN (ssaddv2si3, "waddwss", WADDSSW)
11546 IWMMXT_BUILTIN (sssubv8qi3, "wsubbss", WSUBSSB)
11547 IWMMXT_BUILTIN (sssubv4hi3, "wsubhss", WSUBSSH)
11548 IWMMXT_BUILTIN (sssubv2si3, "wsubwss", WSUBSSW)
11549 IWMMXT_BUILTIN (usaddv8qi3, "waddbus", WADDUSB)
11550 IWMMXT_BUILTIN (usaddv4hi3, "waddhus", WADDUSH)
11551 IWMMXT_BUILTIN (usaddv2si3, "waddwus", WADDUSW)
11552 IWMMXT_BUILTIN (ussubv8qi3, "wsubbus", WSUBUSB)
11553 IWMMXT_BUILTIN (ussubv4hi3, "wsubhus", WSUBUSH)
11554 IWMMXT_BUILTIN (ussubv2si3, "wsubwus", WSUBUSW)
11555 IWMMXT_BUILTIN (mulv4hi3, "wmulul", WMULUL)
11556 IWMMXT_BUILTIN (smulv4hi3_highpart, "wmulsm", WMULSM)
11557 IWMMXT_BUILTIN (umulv4hi3_highpart, "wmulum", WMULUM)
11558 IWMMXT_BUILTIN (eqv8qi3, "wcmpeqb", WCMPEQB)
11559 IWMMXT_BUILTIN (eqv4hi3, "wcmpeqh", WCMPEQH)
11560 IWMMXT_BUILTIN (eqv2si3, "wcmpeqw", WCMPEQW)
11561 IWMMXT_BUILTIN (gtuv8qi3, "wcmpgtub", WCMPGTUB)
11562 IWMMXT_BUILTIN (gtuv4hi3, "wcmpgtuh", WCMPGTUH)
11563 IWMMXT_BUILTIN (gtuv2si3, "wcmpgtuw", WCMPGTUW)
11564 IWMMXT_BUILTIN (gtv8qi3, "wcmpgtsb", WCMPGTSB)
11565 IWMMXT_BUILTIN (gtv4hi3, "wcmpgtsh", WCMPGTSH)
11566 IWMMXT_BUILTIN (gtv2si3, "wcmpgtsw", WCMPGTSW)
11567 IWMMXT_BUILTIN (umaxv8qi3, "wmaxub", WMAXUB)
11568 IWMMXT_BUILTIN (smaxv8qi3, "wmaxsb", WMAXSB)
11569 IWMMXT_BUILTIN (umaxv4hi3, "wmaxuh", WMAXUH)
11570 IWMMXT_BUILTIN (smaxv4hi3, "wmaxsh", WMAXSH)
11571 IWMMXT_BUILTIN (umaxv2si3, "wmaxuw", WMAXUW)
11572 IWMMXT_BUILTIN (smaxv2si3, "wmaxsw", WMAXSW)
11573 IWMMXT_BUILTIN (uminv8qi3, "wminub", WMINUB)
11574 IWMMXT_BUILTIN (sminv8qi3, "wminsb", WMINSB)
11575 IWMMXT_BUILTIN (uminv4hi3, "wminuh", WMINUH)
11576 IWMMXT_BUILTIN (sminv4hi3, "wminsh", WMINSH)
11577 IWMMXT_BUILTIN (uminv2si3, "wminuw", WMINUW)
11578 IWMMXT_BUILTIN (sminv2si3, "wminsw", WMINSW)
11579 IWMMXT_BUILTIN (iwmmxt_anddi3, "wand", WAND)
11580 IWMMXT_BUILTIN (iwmmxt_nanddi3, "wandn", WANDN)
11581 IWMMXT_BUILTIN (iwmmxt_iordi3, "wor", WOR)
11582 IWMMXT_BUILTIN (iwmmxt_xordi3, "wxor", WXOR)
11583 IWMMXT_BUILTIN (iwmmxt_uavgv8qi3, "wavg2b", WAVG2B)
11584 IWMMXT_BUILTIN (iwmmxt_uavgv4hi3, "wavg2h", WAVG2H)
11585 IWMMXT_BUILTIN (iwmmxt_uavgrndv8qi3, "wavg2br", WAVG2BR)
11586 IWMMXT_BUILTIN (iwmmxt_uavgrndv4hi3, "wavg2hr", WAVG2HR)
11587 IWMMXT_BUILTIN (iwmmxt_wunpckilb, "wunpckilb", WUNPCKILB)
11588 IWMMXT_BUILTIN (iwmmxt_wunpckilh, "wunpckilh", WUNPCKILH)
11589 IWMMXT_BUILTIN (iwmmxt_wunpckilw, "wunpckilw", WUNPCKILW)
11590 IWMMXT_BUILTIN (iwmmxt_wunpckihb, "wunpckihb", WUNPCKIHB)
11591 IWMMXT_BUILTIN (iwmmxt_wunpckihh, "wunpckihh", WUNPCKIHH)
11592 IWMMXT_BUILTIN (iwmmxt_wunpckihw, "wunpckihw", WUNPCKIHW)
11593 IWMMXT_BUILTIN (iwmmxt_wmadds, "wmadds", WMADDS)
11594 IWMMXT_BUILTIN (iwmmxt_wmaddu, "wmaddu", WMADDU)
11596 #define IWMMXT_BUILTIN2(code, builtin) \
11597 { FL_IWMMXT, CODE_FOR_##code, NULL, ARM_BUILTIN_##builtin, 0, 0 },
11599 IWMMXT_BUILTIN2 (iwmmxt_wpackhss, WPACKHSS)
11600 IWMMXT_BUILTIN2 (iwmmxt_wpackwss, WPACKWSS)
11601 IWMMXT_BUILTIN2 (iwmmxt_wpackdss, WPACKDSS)
11602 IWMMXT_BUILTIN2 (iwmmxt_wpackhus, WPACKHUS)
11603 IWMMXT_BUILTIN2 (iwmmxt_wpackwus, WPACKWUS)
11604 IWMMXT_BUILTIN2 (iwmmxt_wpackdus, WPACKDUS)
11605 IWMMXT_BUILTIN2 (ashlv4hi3_di, WSLLH)
11606 IWMMXT_BUILTIN2 (ashlv4hi3, WSLLHI)
11607 IWMMXT_BUILTIN2 (ashlv2si3_di, WSLLW)
11608 IWMMXT_BUILTIN2 (ashlv2si3, WSLLWI)
11609 IWMMXT_BUILTIN2 (ashldi3_di, WSLLD)
11610 IWMMXT_BUILTIN2 (ashldi3_iwmmxt, WSLLDI)
11611 IWMMXT_BUILTIN2 (lshrv4hi3_di, WSRLH)
11612 IWMMXT_BUILTIN2 (lshrv4hi3, WSRLHI)
11613 IWMMXT_BUILTIN2 (lshrv2si3_di, WSRLW)
11614 IWMMXT_BUILTIN2 (lshrv2si3, WSRLWI)
11615 IWMMXT_BUILTIN2 (lshrdi3_di, WSRLD)
11616 IWMMXT_BUILTIN2 (lshrdi3_iwmmxt, WSRLDI)
11617 IWMMXT_BUILTIN2 (ashrv4hi3_di, WSRAH)
11618 IWMMXT_BUILTIN2 (ashrv4hi3, WSRAHI)
11619 IWMMXT_BUILTIN2 (ashrv2si3_di, WSRAW)
11620 IWMMXT_BUILTIN2 (ashrv2si3, WSRAWI)
11621 IWMMXT_BUILTIN2 (ashrdi3_di, WSRAD)
11622 IWMMXT_BUILTIN2 (ashrdi3_iwmmxt, WSRADI)
11623 IWMMXT_BUILTIN2 (rorv4hi3_di, WRORH)
11624 IWMMXT_BUILTIN2 (rorv4hi3, WRORHI)
11625 IWMMXT_BUILTIN2 (rorv2si3_di, WRORW)
11626 IWMMXT_BUILTIN2 (rorv2si3, WRORWI)
11627 IWMMXT_BUILTIN2 (rordi3_di, WRORD)
11628 IWMMXT_BUILTIN2 (rordi3, WRORDI)
11629 IWMMXT_BUILTIN2 (iwmmxt_wmacuz, WMACUZ)
11630 IWMMXT_BUILTIN2 (iwmmxt_wmacsz, WMACSZ)
11633 static const struct builtin_description bdesc_1arg[] =
11635 IWMMXT_BUILTIN (iwmmxt_tmovmskb, "tmovmskb", TMOVMSKB)
11636 IWMMXT_BUILTIN (iwmmxt_tmovmskh, "tmovmskh", TMOVMSKH)
11637 IWMMXT_BUILTIN (iwmmxt_tmovmskw, "tmovmskw", TMOVMSKW)
11638 IWMMXT_BUILTIN (iwmmxt_waccb, "waccb", WACCB)
11639 IWMMXT_BUILTIN (iwmmxt_wacch, "wacch", WACCH)
11640 IWMMXT_BUILTIN (iwmmxt_waccw, "waccw", WACCW)
11641 IWMMXT_BUILTIN (iwmmxt_wunpckehub, "wunpckehub", WUNPCKEHUB)
11642 IWMMXT_BUILTIN (iwmmxt_wunpckehuh, "wunpckehuh", WUNPCKEHUH)
11643 IWMMXT_BUILTIN (iwmmxt_wunpckehuw, "wunpckehuw", WUNPCKEHUW)
11644 IWMMXT_BUILTIN (iwmmxt_wunpckehsb, "wunpckehsb", WUNPCKEHSB)
11645 IWMMXT_BUILTIN (iwmmxt_wunpckehsh, "wunpckehsh", WUNPCKEHSH)
11646 IWMMXT_BUILTIN (iwmmxt_wunpckehsw, "wunpckehsw", WUNPCKEHSW)
11647 IWMMXT_BUILTIN (iwmmxt_wunpckelub, "wunpckelub", WUNPCKELUB)
11648 IWMMXT_BUILTIN (iwmmxt_wunpckeluh, "wunpckeluh", WUNPCKELUH)
11649 IWMMXT_BUILTIN (iwmmxt_wunpckeluw, "wunpckeluw", WUNPCKELUW)
11650 IWMMXT_BUILTIN (iwmmxt_wunpckelsb, "wunpckelsb", WUNPCKELSB)
11651 IWMMXT_BUILTIN (iwmmxt_wunpckelsh, "wunpckelsh", WUNPCKELSH)
11652 IWMMXT_BUILTIN (iwmmxt_wunpckelsw, "wunpckelsw", WUNPCKELSW)
11655 /* Set up all the iWMMXt builtins. This is
11656 not called if TARGET_IWMMXT is zero. */
11658 static void
11659 arm_init_iwmmxt_builtins (void)
11661 const struct builtin_description * d;
11662 size_t i;
11663 tree endlink = void_list_node;
11665 tree V2SI_type_node = build_vector_type_for_mode (intSI_type_node, V2SImode);
11666 tree V4HI_type_node = build_vector_type_for_mode (intHI_type_node, V4HImode);
11667 tree V8QI_type_node = build_vector_type_for_mode (intQI_type_node, V8QImode);
11669 tree int_ftype_int
11670 = build_function_type (integer_type_node,
11671 tree_cons (NULL_TREE, integer_type_node, endlink));
11672 tree v8qi_ftype_v8qi_v8qi_int
11673 = build_function_type (V8QI_type_node,
11674 tree_cons (NULL_TREE, V8QI_type_node,
11675 tree_cons (NULL_TREE, V8QI_type_node,
11676 tree_cons (NULL_TREE,
11677 integer_type_node,
11678 endlink))));
11679 tree v4hi_ftype_v4hi_int
11680 = build_function_type (V4HI_type_node,
11681 tree_cons (NULL_TREE, V4HI_type_node,
11682 tree_cons (NULL_TREE, integer_type_node,
11683 endlink)));
11684 tree v2si_ftype_v2si_int
11685 = build_function_type (V2SI_type_node,
11686 tree_cons (NULL_TREE, V2SI_type_node,
11687 tree_cons (NULL_TREE, integer_type_node,
11688 endlink)));
11689 tree v2si_ftype_di_di
11690 = build_function_type (V2SI_type_node,
11691 tree_cons (NULL_TREE, long_long_integer_type_node,
11692 tree_cons (NULL_TREE, long_long_integer_type_node,
11693 endlink)));
11694 tree di_ftype_di_int
11695 = build_function_type (long_long_integer_type_node,
11696 tree_cons (NULL_TREE, long_long_integer_type_node,
11697 tree_cons (NULL_TREE, integer_type_node,
11698 endlink)));
11699 tree di_ftype_di_int_int
11700 = build_function_type (long_long_integer_type_node,
11701 tree_cons (NULL_TREE, long_long_integer_type_node,
11702 tree_cons (NULL_TREE, integer_type_node,
11703 tree_cons (NULL_TREE,
11704 integer_type_node,
11705 endlink))));
11706 tree int_ftype_v8qi
11707 = build_function_type (integer_type_node,
11708 tree_cons (NULL_TREE, V8QI_type_node,
11709 endlink));
11710 tree int_ftype_v4hi
11711 = build_function_type (integer_type_node,
11712 tree_cons (NULL_TREE, V4HI_type_node,
11713 endlink));
11714 tree int_ftype_v2si
11715 = build_function_type (integer_type_node,
11716 tree_cons (NULL_TREE, V2SI_type_node,
11717 endlink));
11718 tree int_ftype_v8qi_int
11719 = build_function_type (integer_type_node,
11720 tree_cons (NULL_TREE, V8QI_type_node,
11721 tree_cons (NULL_TREE, integer_type_node,
11722 endlink)));
11723 tree int_ftype_v4hi_int
11724 = build_function_type (integer_type_node,
11725 tree_cons (NULL_TREE, V4HI_type_node,
11726 tree_cons (NULL_TREE, integer_type_node,
11727 endlink)));
11728 tree int_ftype_v2si_int
11729 = build_function_type (integer_type_node,
11730 tree_cons (NULL_TREE, V2SI_type_node,
11731 tree_cons (NULL_TREE, integer_type_node,
11732 endlink)));
11733 tree v8qi_ftype_v8qi_int_int
11734 = build_function_type (V8QI_type_node,
11735 tree_cons (NULL_TREE, V8QI_type_node,
11736 tree_cons (NULL_TREE, integer_type_node,
11737 tree_cons (NULL_TREE,
11738 integer_type_node,
11739 endlink))));
11740 tree v4hi_ftype_v4hi_int_int
11741 = build_function_type (V4HI_type_node,
11742 tree_cons (NULL_TREE, V4HI_type_node,
11743 tree_cons (NULL_TREE, integer_type_node,
11744 tree_cons (NULL_TREE,
11745 integer_type_node,
11746 endlink))));
11747 tree v2si_ftype_v2si_int_int
11748 = build_function_type (V2SI_type_node,
11749 tree_cons (NULL_TREE, V2SI_type_node,
11750 tree_cons (NULL_TREE, integer_type_node,
11751 tree_cons (NULL_TREE,
11752 integer_type_node,
11753 endlink))));
11754 /* Miscellaneous. */
11755 tree v8qi_ftype_v4hi_v4hi
11756 = build_function_type (V8QI_type_node,
11757 tree_cons (NULL_TREE, V4HI_type_node,
11758 tree_cons (NULL_TREE, V4HI_type_node,
11759 endlink)));
11760 tree v4hi_ftype_v2si_v2si
11761 = build_function_type (V4HI_type_node,
11762 tree_cons (NULL_TREE, V2SI_type_node,
11763 tree_cons (NULL_TREE, V2SI_type_node,
11764 endlink)));
11765 tree v2si_ftype_v4hi_v4hi
11766 = build_function_type (V2SI_type_node,
11767 tree_cons (NULL_TREE, V4HI_type_node,
11768 tree_cons (NULL_TREE, V4HI_type_node,
11769 endlink)));
11770 tree v2si_ftype_v8qi_v8qi
11771 = build_function_type (V2SI_type_node,
11772 tree_cons (NULL_TREE, V8QI_type_node,
11773 tree_cons (NULL_TREE, V8QI_type_node,
11774 endlink)));
11775 tree v4hi_ftype_v4hi_di
11776 = build_function_type (V4HI_type_node,
11777 tree_cons (NULL_TREE, V4HI_type_node,
11778 tree_cons (NULL_TREE,
11779 long_long_integer_type_node,
11780 endlink)));
11781 tree v2si_ftype_v2si_di
11782 = build_function_type (V2SI_type_node,
11783 tree_cons (NULL_TREE, V2SI_type_node,
11784 tree_cons (NULL_TREE,
11785 long_long_integer_type_node,
11786 endlink)));
11787 tree void_ftype_int_int
11788 = build_function_type (void_type_node,
11789 tree_cons (NULL_TREE, integer_type_node,
11790 tree_cons (NULL_TREE, integer_type_node,
11791 endlink)));
11792 tree di_ftype_void
11793 = build_function_type (long_long_unsigned_type_node, endlink);
11794 tree di_ftype_v8qi
11795 = build_function_type (long_long_integer_type_node,
11796 tree_cons (NULL_TREE, V8QI_type_node,
11797 endlink));
11798 tree di_ftype_v4hi
11799 = build_function_type (long_long_integer_type_node,
11800 tree_cons (NULL_TREE, V4HI_type_node,
11801 endlink));
11802 tree di_ftype_v2si
11803 = build_function_type (long_long_integer_type_node,
11804 tree_cons (NULL_TREE, V2SI_type_node,
11805 endlink));
11806 tree v2si_ftype_v4hi
11807 = build_function_type (V2SI_type_node,
11808 tree_cons (NULL_TREE, V4HI_type_node,
11809 endlink));
11810 tree v4hi_ftype_v8qi
11811 = build_function_type (V4HI_type_node,
11812 tree_cons (NULL_TREE, V8QI_type_node,
11813 endlink));
11815 tree di_ftype_di_v4hi_v4hi
11816 = build_function_type (long_long_unsigned_type_node,
11817 tree_cons (NULL_TREE,
11818 long_long_unsigned_type_node,
11819 tree_cons (NULL_TREE, V4HI_type_node,
11820 tree_cons (NULL_TREE,
11821 V4HI_type_node,
11822 endlink))));
11824 tree di_ftype_v4hi_v4hi
11825 = build_function_type (long_long_unsigned_type_node,
11826 tree_cons (NULL_TREE, V4HI_type_node,
11827 tree_cons (NULL_TREE, V4HI_type_node,
11828 endlink)));
11830 /* Normal vector binops. */
11831 tree v8qi_ftype_v8qi_v8qi
11832 = build_function_type (V8QI_type_node,
11833 tree_cons (NULL_TREE, V8QI_type_node,
11834 tree_cons (NULL_TREE, V8QI_type_node,
11835 endlink)));
11836 tree v4hi_ftype_v4hi_v4hi
11837 = build_function_type (V4HI_type_node,
11838 tree_cons (NULL_TREE, V4HI_type_node,
11839 tree_cons (NULL_TREE, V4HI_type_node,
11840 endlink)));
11841 tree v2si_ftype_v2si_v2si
11842 = build_function_type (V2SI_type_node,
11843 tree_cons (NULL_TREE, V2SI_type_node,
11844 tree_cons (NULL_TREE, V2SI_type_node,
11845 endlink)));
11846 tree di_ftype_di_di
11847 = build_function_type (long_long_unsigned_type_node,
11848 tree_cons (NULL_TREE, long_long_unsigned_type_node,
11849 tree_cons (NULL_TREE,
11850 long_long_unsigned_type_node,
11851 endlink)));
11853 /* Add all builtins that are more or less simple operations on two
11854 operands. */
11855 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
11857 /* Use one of the operands; the target can have a different mode for
11858 mask-generating compares. */
11859 enum machine_mode mode;
11860 tree type;
11862 if (d->name == 0)
11863 continue;
11865 mode = insn_data[d->icode].operand[1].mode;
11867 switch (mode)
11869 case V8QImode:
11870 type = v8qi_ftype_v8qi_v8qi;
11871 break;
11872 case V4HImode:
11873 type = v4hi_ftype_v4hi_v4hi;
11874 break;
11875 case V2SImode:
11876 type = v2si_ftype_v2si_v2si;
11877 break;
11878 case DImode:
11879 type = di_ftype_di_di;
11880 break;
11882 default:
11883 abort ();
11886 def_mbuiltin (d->mask, d->name, type, d->code);
11889 /* Add the remaining MMX insns with somewhat more complicated types. */
11890 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wzero", di_ftype_void, ARM_BUILTIN_WZERO);
11891 def_mbuiltin (FL_IWMMXT, "__builtin_arm_setwcx", void_ftype_int_int, ARM_BUILTIN_SETWCX);
11892 def_mbuiltin (FL_IWMMXT, "__builtin_arm_getwcx", int_ftype_int, ARM_BUILTIN_GETWCX);
11894 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSLLH);
11895 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllw", v2si_ftype_v2si_di, ARM_BUILTIN_WSLLW);
11896 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wslld", di_ftype_di_di, ARM_BUILTIN_WSLLD);
11897 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSLLHI);
11898 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllwi", v2si_ftype_v2si_int, ARM_BUILTIN_WSLLWI);
11899 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wslldi", di_ftype_di_int, ARM_BUILTIN_WSLLDI);
11901 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSRLH);
11902 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlw", v2si_ftype_v2si_di, ARM_BUILTIN_WSRLW);
11903 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrld", di_ftype_di_di, ARM_BUILTIN_WSRLD);
11904 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSRLHI);
11905 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlwi", v2si_ftype_v2si_int, ARM_BUILTIN_WSRLWI);
11906 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrldi", di_ftype_di_int, ARM_BUILTIN_WSRLDI);
11908 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrah", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSRAH);
11909 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsraw", v2si_ftype_v2si_di, ARM_BUILTIN_WSRAW);
11910 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrad", di_ftype_di_di, ARM_BUILTIN_WSRAD);
11911 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrahi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSRAHI);
11912 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrawi", v2si_ftype_v2si_int, ARM_BUILTIN_WSRAWI);
11913 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsradi", di_ftype_di_int, ARM_BUILTIN_WSRADI);
11915 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WRORH);
11916 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorw", v2si_ftype_v2si_di, ARM_BUILTIN_WRORW);
11917 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrord", di_ftype_di_di, ARM_BUILTIN_WRORD);
11918 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WRORHI);
11919 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorwi", v2si_ftype_v2si_int, ARM_BUILTIN_WRORWI);
11920 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrordi", di_ftype_di_int, ARM_BUILTIN_WRORDI);
11922 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wshufh", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSHUFH);
11924 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadb", v2si_ftype_v8qi_v8qi, ARM_BUILTIN_WSADB);
11925 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadh", v2si_ftype_v4hi_v4hi, ARM_BUILTIN_WSADH);
11926 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadbz", v2si_ftype_v8qi_v8qi, ARM_BUILTIN_WSADBZ);
11927 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadhz", v2si_ftype_v4hi_v4hi, ARM_BUILTIN_WSADHZ);
11929 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsb", int_ftype_v8qi_int, ARM_BUILTIN_TEXTRMSB);
11930 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsh", int_ftype_v4hi_int, ARM_BUILTIN_TEXTRMSH);
11931 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsw", int_ftype_v2si_int, ARM_BUILTIN_TEXTRMSW);
11932 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmub", int_ftype_v8qi_int, ARM_BUILTIN_TEXTRMUB);
11933 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmuh", int_ftype_v4hi_int, ARM_BUILTIN_TEXTRMUH);
11934 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmuw", int_ftype_v2si_int, ARM_BUILTIN_TEXTRMUW);
11935 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrb", v8qi_ftype_v8qi_int_int, ARM_BUILTIN_TINSRB);
11936 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrh", v4hi_ftype_v4hi_int_int, ARM_BUILTIN_TINSRH);
11937 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrw", v2si_ftype_v2si_int_int, ARM_BUILTIN_TINSRW);
11939 def_mbuiltin (FL_IWMMXT, "__builtin_arm_waccb", di_ftype_v8qi, ARM_BUILTIN_WACCB);
11940 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wacch", di_ftype_v4hi, ARM_BUILTIN_WACCH);
11941 def_mbuiltin (FL_IWMMXT, "__builtin_arm_waccw", di_ftype_v2si, ARM_BUILTIN_WACCW);
11943 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskb", int_ftype_v8qi, ARM_BUILTIN_TMOVMSKB);
11944 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskh", int_ftype_v4hi, ARM_BUILTIN_TMOVMSKH);
11945 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskw", int_ftype_v2si, ARM_BUILTIN_TMOVMSKW);
11947 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackhss", v8qi_ftype_v4hi_v4hi, ARM_BUILTIN_WPACKHSS);
11948 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackhus", v8qi_ftype_v4hi_v4hi, ARM_BUILTIN_WPACKHUS);
11949 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackwus", v4hi_ftype_v2si_v2si, ARM_BUILTIN_WPACKWUS);
11950 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackwss", v4hi_ftype_v2si_v2si, ARM_BUILTIN_WPACKWSS);
11951 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackdus", v2si_ftype_di_di, ARM_BUILTIN_WPACKDUS);
11952 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackdss", v2si_ftype_di_di, ARM_BUILTIN_WPACKDSS);
11954 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehub", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKEHUB);
11955 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehuh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKEHUH);
11956 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehuw", di_ftype_v2si, ARM_BUILTIN_WUNPCKEHUW);
11957 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsb", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKEHSB);
11958 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKEHSH);
11959 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsw", di_ftype_v2si, ARM_BUILTIN_WUNPCKEHSW);
11960 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelub", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKELUB);
11961 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckeluh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKELUH);
11962 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckeluw", di_ftype_v2si, ARM_BUILTIN_WUNPCKELUW);
11963 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsb", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKELSB);
11964 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKELSH);
11965 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsw", di_ftype_v2si, ARM_BUILTIN_WUNPCKELSW);
11967 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacs", di_ftype_di_v4hi_v4hi, ARM_BUILTIN_WMACS);
11968 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacsz", di_ftype_v4hi_v4hi, ARM_BUILTIN_WMACSZ);
11969 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacu", di_ftype_di_v4hi_v4hi, ARM_BUILTIN_WMACU);
11970 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacuz", di_ftype_v4hi_v4hi, ARM_BUILTIN_WMACUZ);
11972 def_mbuiltin (FL_IWMMXT, "__builtin_arm_walign", v8qi_ftype_v8qi_v8qi_int, ARM_BUILTIN_WALIGN);
11973 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmia", di_ftype_di_int_int, ARM_BUILTIN_TMIA);
11974 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiaph", di_ftype_di_int_int, ARM_BUILTIN_TMIAPH);
11975 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiabb", di_ftype_di_int_int, ARM_BUILTIN_TMIABB);
11976 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiabt", di_ftype_di_int_int, ARM_BUILTIN_TMIABT);
11977 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiatb", di_ftype_di_int_int, ARM_BUILTIN_TMIATB);
11978 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiatt", di_ftype_di_int_int, ARM_BUILTIN_TMIATT);
11981 static void
11982 arm_init_builtins (void)
11984 if (TARGET_REALLY_IWMMXT)
11985 arm_init_iwmmxt_builtins ();
11988 /* Errors in the source file can cause expand_expr to return const0_rtx
11989 where we expect a vector. To avoid crashing, use one of the vector
11990 clear instructions. */
11992 static rtx
11993 safe_vector_operand (rtx x, enum machine_mode mode)
11995 if (x != const0_rtx)
11996 return x;
11997 x = gen_reg_rtx (mode);
11999 emit_insn (gen_iwmmxt_clrdi (mode == DImode ? x
12000 : gen_rtx_SUBREG (DImode, x, 0)));
12001 return x;
12004 /* Subroutine of arm_expand_builtin to take care of binop insns. */
12006 static rtx
12007 arm_expand_binop_builtin (enum insn_code icode,
12008 tree arglist, rtx target)
12010 rtx pat;
12011 tree arg0 = TREE_VALUE (arglist);
12012 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12013 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12014 rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12015 enum machine_mode tmode = insn_data[icode].operand[0].mode;
12016 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
12017 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
12019 if (VECTOR_MODE_P (mode0))
12020 op0 = safe_vector_operand (op0, mode0);
12021 if (VECTOR_MODE_P (mode1))
12022 op1 = safe_vector_operand (op1, mode1);
12024 if (! target
12025 || GET_MODE (target) != tmode
12026 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12027 target = gen_reg_rtx (tmode);
12029 /* In case the insn wants input operands in modes different from
12030 the result, abort. */
12031 if (GET_MODE (op0) != mode0 || GET_MODE (op1) != mode1)
12032 abort ();
12034 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12035 op0 = copy_to_mode_reg (mode0, op0);
12036 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12037 op1 = copy_to_mode_reg (mode1, op1);
12039 pat = GEN_FCN (icode) (target, op0, op1);
12040 if (! pat)
12041 return 0;
12042 emit_insn (pat);
12043 return target;
12046 /* Subroutine of arm_expand_builtin to take care of unop insns. */
12048 static rtx
12049 arm_expand_unop_builtin (enum insn_code icode,
12050 tree arglist, rtx target, int do_load)
12052 rtx pat;
12053 tree arg0 = TREE_VALUE (arglist);
12054 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12055 enum machine_mode tmode = insn_data[icode].operand[0].mode;
12056 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
12058 if (! target
12059 || GET_MODE (target) != tmode
12060 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12061 target = gen_reg_rtx (tmode);
12062 if (do_load)
12063 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
12064 else
12066 if (VECTOR_MODE_P (mode0))
12067 op0 = safe_vector_operand (op0, mode0);
12069 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12070 op0 = copy_to_mode_reg (mode0, op0);
12073 pat = GEN_FCN (icode) (target, op0);
12074 if (! pat)
12075 return 0;
12076 emit_insn (pat);
12077 return target;
12080 /* Expand an expression EXP that calls a built-in function,
12081 with result going to TARGET if that's convenient
12082 (and in mode MODE if that's convenient).
12083 SUBTARGET may be used as the target for computing one of EXP's operands.
12084 IGNORE is nonzero if the value is to be ignored. */
12086 static rtx
12087 arm_expand_builtin (tree exp,
12088 rtx target,
12089 rtx subtarget ATTRIBUTE_UNUSED,
12090 enum machine_mode mode ATTRIBUTE_UNUSED,
12091 int ignore ATTRIBUTE_UNUSED)
12093 const struct builtin_description * d;
12094 enum insn_code icode;
12095 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
12096 tree arglist = TREE_OPERAND (exp, 1);
12097 tree arg0;
12098 tree arg1;
12099 tree arg2;
12100 rtx op0;
12101 rtx op1;
12102 rtx op2;
12103 rtx pat;
12104 int fcode = DECL_FUNCTION_CODE (fndecl);
12105 size_t i;
12106 enum machine_mode tmode;
12107 enum machine_mode mode0;
12108 enum machine_mode mode1;
12109 enum machine_mode mode2;
12111 switch (fcode)
12113 case ARM_BUILTIN_TEXTRMSB:
12114 case ARM_BUILTIN_TEXTRMUB:
12115 case ARM_BUILTIN_TEXTRMSH:
12116 case ARM_BUILTIN_TEXTRMUH:
12117 case ARM_BUILTIN_TEXTRMSW:
12118 case ARM_BUILTIN_TEXTRMUW:
12119 icode = (fcode == ARM_BUILTIN_TEXTRMSB ? CODE_FOR_iwmmxt_textrmsb
12120 : fcode == ARM_BUILTIN_TEXTRMUB ? CODE_FOR_iwmmxt_textrmub
12121 : fcode == ARM_BUILTIN_TEXTRMSH ? CODE_FOR_iwmmxt_textrmsh
12122 : fcode == ARM_BUILTIN_TEXTRMUH ? CODE_FOR_iwmmxt_textrmuh
12123 : CODE_FOR_iwmmxt_textrmw);
12125 arg0 = TREE_VALUE (arglist);
12126 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12127 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12128 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12129 tmode = insn_data[icode].operand[0].mode;
12130 mode0 = insn_data[icode].operand[1].mode;
12131 mode1 = insn_data[icode].operand[2].mode;
12133 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12134 op0 = copy_to_mode_reg (mode0, op0);
12135 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12137 /* @@@ better error message */
12138 error ("selector must be an immediate");
12139 return gen_reg_rtx (tmode);
12141 if (target == 0
12142 || GET_MODE (target) != tmode
12143 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12144 target = gen_reg_rtx (tmode);
12145 pat = GEN_FCN (icode) (target, op0, op1);
12146 if (! pat)
12147 return 0;
12148 emit_insn (pat);
12149 return target;
12151 case ARM_BUILTIN_TINSRB:
12152 case ARM_BUILTIN_TINSRH:
12153 case ARM_BUILTIN_TINSRW:
12154 icode = (fcode == ARM_BUILTIN_TINSRB ? CODE_FOR_iwmmxt_tinsrb
12155 : fcode == ARM_BUILTIN_TINSRH ? CODE_FOR_iwmmxt_tinsrh
12156 : CODE_FOR_iwmmxt_tinsrw);
12157 arg0 = TREE_VALUE (arglist);
12158 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12159 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
12160 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12161 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12162 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
12163 tmode = insn_data[icode].operand[0].mode;
12164 mode0 = insn_data[icode].operand[1].mode;
12165 mode1 = insn_data[icode].operand[2].mode;
12166 mode2 = insn_data[icode].operand[3].mode;
12168 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12169 op0 = copy_to_mode_reg (mode0, op0);
12170 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12171 op1 = copy_to_mode_reg (mode1, op1);
12172 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
12174 /* @@@ better error message */
12175 error ("selector must be an immediate");
12176 return const0_rtx;
12178 if (target == 0
12179 || GET_MODE (target) != tmode
12180 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12181 target = gen_reg_rtx (tmode);
12182 pat = GEN_FCN (icode) (target, op0, op1, op2);
12183 if (! pat)
12184 return 0;
12185 emit_insn (pat);
12186 return target;
12188 case ARM_BUILTIN_SETWCX:
12189 arg0 = TREE_VALUE (arglist);
12190 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12191 op0 = force_reg (SImode, expand_expr (arg0, NULL_RTX, VOIDmode, 0));
12192 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12193 emit_insn (gen_iwmmxt_tmcr (op1, op0));
12194 return 0;
12196 case ARM_BUILTIN_GETWCX:
12197 arg0 = TREE_VALUE (arglist);
12198 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12199 target = gen_reg_rtx (SImode);
12200 emit_insn (gen_iwmmxt_tmrc (target, op0));
12201 return target;
12203 case ARM_BUILTIN_WSHUFH:
12204 icode = CODE_FOR_iwmmxt_wshufh;
12205 arg0 = TREE_VALUE (arglist);
12206 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12207 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12208 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12209 tmode = insn_data[icode].operand[0].mode;
12210 mode1 = insn_data[icode].operand[1].mode;
12211 mode2 = insn_data[icode].operand[2].mode;
12213 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
12214 op0 = copy_to_mode_reg (mode1, op0);
12215 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
12217 /* @@@ better error message */
12218 error ("mask must be an immediate");
12219 return const0_rtx;
12221 if (target == 0
12222 || GET_MODE (target) != tmode
12223 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12224 target = gen_reg_rtx (tmode);
12225 pat = GEN_FCN (icode) (target, op0, op1);
12226 if (! pat)
12227 return 0;
12228 emit_insn (pat);
12229 return target;
12231 case ARM_BUILTIN_WSADB:
12232 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadb, arglist, target);
12233 case ARM_BUILTIN_WSADH:
12234 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadh, arglist, target);
12235 case ARM_BUILTIN_WSADBZ:
12236 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadbz, arglist, target);
12237 case ARM_BUILTIN_WSADHZ:
12238 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadhz, arglist, target);
12240 /* Several three-argument builtins. */
12241 case ARM_BUILTIN_WMACS:
12242 case ARM_BUILTIN_WMACU:
12243 case ARM_BUILTIN_WALIGN:
12244 case ARM_BUILTIN_TMIA:
12245 case ARM_BUILTIN_TMIAPH:
12246 case ARM_BUILTIN_TMIATT:
12247 case ARM_BUILTIN_TMIATB:
12248 case ARM_BUILTIN_TMIABT:
12249 case ARM_BUILTIN_TMIABB:
12250 icode = (fcode == ARM_BUILTIN_WMACS ? CODE_FOR_iwmmxt_wmacs
12251 : fcode == ARM_BUILTIN_WMACU ? CODE_FOR_iwmmxt_wmacu
12252 : fcode == ARM_BUILTIN_TMIA ? CODE_FOR_iwmmxt_tmia
12253 : fcode == ARM_BUILTIN_TMIAPH ? CODE_FOR_iwmmxt_tmiaph
12254 : fcode == ARM_BUILTIN_TMIABB ? CODE_FOR_iwmmxt_tmiabb
12255 : fcode == ARM_BUILTIN_TMIABT ? CODE_FOR_iwmmxt_tmiabt
12256 : fcode == ARM_BUILTIN_TMIATB ? CODE_FOR_iwmmxt_tmiatb
12257 : fcode == ARM_BUILTIN_TMIATT ? CODE_FOR_iwmmxt_tmiatt
12258 : CODE_FOR_iwmmxt_walign);
12259 arg0 = TREE_VALUE (arglist);
12260 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12261 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
12262 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12263 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12264 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
12265 tmode = insn_data[icode].operand[0].mode;
12266 mode0 = insn_data[icode].operand[1].mode;
12267 mode1 = insn_data[icode].operand[2].mode;
12268 mode2 = insn_data[icode].operand[3].mode;
12270 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12271 op0 = copy_to_mode_reg (mode0, op0);
12272 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12273 op1 = copy_to_mode_reg (mode1, op1);
12274 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
12275 op2 = copy_to_mode_reg (mode2, op2);
12276 if (target == 0
12277 || GET_MODE (target) != tmode
12278 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12279 target = gen_reg_rtx (tmode);
12280 pat = GEN_FCN (icode) (target, op0, op1, op2);
12281 if (! pat)
12282 return 0;
12283 emit_insn (pat);
12284 return target;
12286 case ARM_BUILTIN_WZERO:
12287 target = gen_reg_rtx (DImode);
12288 emit_insn (gen_iwmmxt_clrdi (target));
12289 return target;
12291 default:
12292 break;
12295 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
12296 if (d->code == (const enum arm_builtins) fcode)
12297 return arm_expand_binop_builtin (d->icode, arglist, target);
12299 for (i = 0, d = bdesc_1arg; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
12300 if (d->code == (const enum arm_builtins) fcode)
12301 return arm_expand_unop_builtin (d->icode, arglist, target, 0);
12303 /* @@@ Should really do something sensible here. */
12304 return NULL_RTX;
12307 /* Recursively search through all of the blocks in a function
12308 checking to see if any of the variables created in that
12309 function match the RTX called 'orig'. If they do then
12310 replace them with the RTX called 'new'. */
12311 static void
12312 replace_symbols_in_block (tree block, rtx orig, rtx new)
12314 for (; block; block = BLOCK_CHAIN (block))
12316 tree sym;
12318 if (!TREE_USED (block))
12319 continue;
12321 for (sym = BLOCK_VARS (block); sym; sym = TREE_CHAIN (sym))
12323 if ( (DECL_NAME (sym) == 0 && TREE_CODE (sym) != TYPE_DECL)
12324 || DECL_IGNORED_P (sym)
12325 || TREE_CODE (sym) != VAR_DECL
12326 || DECL_EXTERNAL (sym)
12327 || !rtx_equal_p (DECL_RTL (sym), orig)
12329 continue;
12331 SET_DECL_RTL (sym, new);
12334 replace_symbols_in_block (BLOCK_SUBBLOCKS (block), orig, new);
12338 /* Return the number (counting from 0) of
12339 the least significant set bit in MASK. */
12341 inline static int
12342 number_of_first_bit_set (int mask)
12344 int bit;
12346 for (bit = 0;
12347 (mask & (1 << bit)) == 0;
12348 ++bit)
12349 continue;
12351 return bit;
12354 /* Generate code to return from a thumb function.
12355 If 'reg_containing_return_addr' is -1, then the return address is
12356 actually on the stack, at the stack pointer. */
12357 static void
12358 thumb_exit (FILE *f, int reg_containing_return_addr, rtx eh_ofs)
12360 unsigned regs_available_for_popping;
12361 unsigned regs_to_pop;
12362 int pops_needed;
12363 unsigned available;
12364 unsigned required;
12365 int mode;
12366 int size;
12367 int restore_a4 = FALSE;
12369 /* Compute the registers we need to pop. */
12370 regs_to_pop = 0;
12371 pops_needed = 0;
12373 /* There is an assumption here, that if eh_ofs is not NULL, the
12374 normal return address will have been pushed. */
12375 if (reg_containing_return_addr == -1 || eh_ofs)
12377 /* When we are generating a return for __builtin_eh_return,
12378 reg_containing_return_addr must specify the return regno. */
12379 if (eh_ofs && reg_containing_return_addr == -1)
12380 abort ();
12382 regs_to_pop |= 1 << LR_REGNUM;
12383 ++pops_needed;
12386 if (TARGET_BACKTRACE)
12388 /* Restore the (ARM) frame pointer and stack pointer. */
12389 regs_to_pop |= (1 << ARM_HARD_FRAME_POINTER_REGNUM) | (1 << SP_REGNUM);
12390 pops_needed += 2;
12393 /* If there is nothing to pop then just emit the BX instruction and
12394 return. */
12395 if (pops_needed == 0)
12397 if (eh_ofs)
12398 asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, REGNO (eh_ofs));
12400 asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
12401 return;
12403 /* Otherwise if we are not supporting interworking and we have not created
12404 a backtrace structure and the function was not entered in ARM mode then
12405 just pop the return address straight into the PC. */
12406 else if (!TARGET_INTERWORK
12407 && !TARGET_BACKTRACE
12408 && !is_called_in_ARM_mode (current_function_decl))
12410 if (eh_ofs)
12412 asm_fprintf (f, "\tadd\t%r, #4\n", SP_REGNUM);
12413 asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, REGNO (eh_ofs));
12414 asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
12416 else
12417 asm_fprintf (f, "\tpop\t{%r}\n", PC_REGNUM);
12419 return;
12422 /* Find out how many of the (return) argument registers we can corrupt. */
12423 regs_available_for_popping = 0;
12425 /* If returning via __builtin_eh_return, the bottom three registers
12426 all contain information needed for the return. */
12427 if (eh_ofs)
12428 size = 12;
12429 else
12431 #ifdef RTX_CODE
12432 /* If we can deduce the registers used from the function's
12433 return value. This is more reliable that examining
12434 regs_ever_live[] because that will be set if the register is
12435 ever used in the function, not just if the register is used
12436 to hold a return value. */
12438 if (current_function_return_rtx != 0)
12439 mode = GET_MODE (current_function_return_rtx);
12440 else
12441 #endif
12442 mode = DECL_MODE (DECL_RESULT (current_function_decl));
12444 size = GET_MODE_SIZE (mode);
12446 if (size == 0)
12448 /* In a void function we can use any argument register.
12449 In a function that returns a structure on the stack
12450 we can use the second and third argument registers. */
12451 if (mode == VOIDmode)
12452 regs_available_for_popping =
12453 (1 << ARG_REGISTER (1))
12454 | (1 << ARG_REGISTER (2))
12455 | (1 << ARG_REGISTER (3));
12456 else
12457 regs_available_for_popping =
12458 (1 << ARG_REGISTER (2))
12459 | (1 << ARG_REGISTER (3));
12461 else if (size <= 4)
12462 regs_available_for_popping =
12463 (1 << ARG_REGISTER (2))
12464 | (1 << ARG_REGISTER (3));
12465 else if (size <= 8)
12466 regs_available_for_popping =
12467 (1 << ARG_REGISTER (3));
12470 /* Match registers to be popped with registers into which we pop them. */
12471 for (available = regs_available_for_popping,
12472 required = regs_to_pop;
12473 required != 0 && available != 0;
12474 available &= ~(available & - available),
12475 required &= ~(required & - required))
12476 -- pops_needed;
12478 /* If we have any popping registers left over, remove them. */
12479 if (available > 0)
12480 regs_available_for_popping &= ~available;
12482 /* Otherwise if we need another popping register we can use
12483 the fourth argument register. */
12484 else if (pops_needed)
12486 /* If we have not found any free argument registers and
12487 reg a4 contains the return address, we must move it. */
12488 if (regs_available_for_popping == 0
12489 && reg_containing_return_addr == LAST_ARG_REGNUM)
12491 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM, LAST_ARG_REGNUM);
12492 reg_containing_return_addr = LR_REGNUM;
12494 else if (size > 12)
12496 /* Register a4 is being used to hold part of the return value,
12497 but we have dire need of a free, low register. */
12498 restore_a4 = TRUE;
12500 asm_fprintf (f, "\tmov\t%r, %r\n",IP_REGNUM, LAST_ARG_REGNUM);
12503 if (reg_containing_return_addr != LAST_ARG_REGNUM)
12505 /* The fourth argument register is available. */
12506 regs_available_for_popping |= 1 << LAST_ARG_REGNUM;
12508 --pops_needed;
12512 /* Pop as many registers as we can. */
12513 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
12514 regs_available_for_popping);
12516 /* Process the registers we popped. */
12517 if (reg_containing_return_addr == -1)
12519 /* The return address was popped into the lowest numbered register. */
12520 regs_to_pop &= ~(1 << LR_REGNUM);
12522 reg_containing_return_addr =
12523 number_of_first_bit_set (regs_available_for_popping);
12525 /* Remove this register for the mask of available registers, so that
12526 the return address will not be corrupted by further pops. */
12527 regs_available_for_popping &= ~(1 << reg_containing_return_addr);
12530 /* If we popped other registers then handle them here. */
12531 if (regs_available_for_popping)
12533 int frame_pointer;
12535 /* Work out which register currently contains the frame pointer. */
12536 frame_pointer = number_of_first_bit_set (regs_available_for_popping);
12538 /* Move it into the correct place. */
12539 asm_fprintf (f, "\tmov\t%r, %r\n",
12540 ARM_HARD_FRAME_POINTER_REGNUM, frame_pointer);
12542 /* (Temporarily) remove it from the mask of popped registers. */
12543 regs_available_for_popping &= ~(1 << frame_pointer);
12544 regs_to_pop &= ~(1 << ARM_HARD_FRAME_POINTER_REGNUM);
12546 if (regs_available_for_popping)
12548 int stack_pointer;
12550 /* We popped the stack pointer as well,
12551 find the register that contains it. */
12552 stack_pointer = number_of_first_bit_set (regs_available_for_popping);
12554 /* Move it into the stack register. */
12555 asm_fprintf (f, "\tmov\t%r, %r\n", SP_REGNUM, stack_pointer);
12557 /* At this point we have popped all necessary registers, so
12558 do not worry about restoring regs_available_for_popping
12559 to its correct value:
12561 assert (pops_needed == 0)
12562 assert (regs_available_for_popping == (1 << frame_pointer))
12563 assert (regs_to_pop == (1 << STACK_POINTER)) */
12565 else
12567 /* Since we have just move the popped value into the frame
12568 pointer, the popping register is available for reuse, and
12569 we know that we still have the stack pointer left to pop. */
12570 regs_available_for_popping |= (1 << frame_pointer);
12574 /* If we still have registers left on the stack, but we no longer have
12575 any registers into which we can pop them, then we must move the return
12576 address into the link register and make available the register that
12577 contained it. */
12578 if (regs_available_for_popping == 0 && pops_needed > 0)
12580 regs_available_for_popping |= 1 << reg_containing_return_addr;
12582 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM,
12583 reg_containing_return_addr);
12585 reg_containing_return_addr = LR_REGNUM;
12588 /* If we have registers left on the stack then pop some more.
12589 We know that at most we will want to pop FP and SP. */
12590 if (pops_needed > 0)
12592 int popped_into;
12593 int move_to;
12595 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
12596 regs_available_for_popping);
12598 /* We have popped either FP or SP.
12599 Move whichever one it is into the correct register. */
12600 popped_into = number_of_first_bit_set (regs_available_for_popping);
12601 move_to = number_of_first_bit_set (regs_to_pop);
12603 asm_fprintf (f, "\tmov\t%r, %r\n", move_to, popped_into);
12605 regs_to_pop &= ~(1 << move_to);
12607 --pops_needed;
12610 /* If we still have not popped everything then we must have only
12611 had one register available to us and we are now popping the SP. */
12612 if (pops_needed > 0)
12614 int popped_into;
12616 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
12617 regs_available_for_popping);
12619 popped_into = number_of_first_bit_set (regs_available_for_popping);
12621 asm_fprintf (f, "\tmov\t%r, %r\n", SP_REGNUM, popped_into);
12623 assert (regs_to_pop == (1 << STACK_POINTER))
12624 assert (pops_needed == 1)
12628 /* If necessary restore the a4 register. */
12629 if (restore_a4)
12631 if (reg_containing_return_addr != LR_REGNUM)
12633 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM, LAST_ARG_REGNUM);
12634 reg_containing_return_addr = LR_REGNUM;
12637 asm_fprintf (f, "\tmov\t%r, %r\n", LAST_ARG_REGNUM, IP_REGNUM);
12640 if (eh_ofs)
12641 asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, REGNO (eh_ofs));
12643 /* Return to caller. */
12644 asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
12647 /* Emit code to push or pop registers to or from the stack. F is the
12648 assembly file. MASK is the registers to push or pop. PUSH is
12649 nonzero if we should push, and zero if we should pop. For debugging
12650 output, if pushing, adjust CFA_OFFSET by the amount of space added
12651 to the stack. REAL_REGS should have the same number of bits set as
12652 MASK, and will be used instead (in the same order) to describe which
12653 registers were saved - this is used to mark the save slots when we
12654 push high registers after moving them to low registers. */
12655 static void
12656 thumb_pushpop (FILE *f, int mask, int push, int *cfa_offset, int real_regs)
12658 int regno;
12659 int lo_mask = mask & 0xFF;
12660 int pushed_words = 0;
12662 if (lo_mask == 0 && !push && (mask & (1 << 15)))
12664 /* Special case. Do not generate a POP PC statement here, do it in
12665 thumb_exit() */
12666 thumb_exit (f, -1, NULL_RTX);
12667 return;
12670 fprintf (f, "\t%s\t{", push ? "push" : "pop");
12672 /* Look at the low registers first. */
12673 for (regno = 0; regno <= LAST_LO_REGNUM; regno++, lo_mask >>= 1)
12675 if (lo_mask & 1)
12677 asm_fprintf (f, "%r", regno);
12679 if ((lo_mask & ~1) != 0)
12680 fprintf (f, ", ");
12682 pushed_words++;
12686 if (push && (mask & (1 << LR_REGNUM)))
12688 /* Catch pushing the LR. */
12689 if (mask & 0xFF)
12690 fprintf (f, ", ");
12692 asm_fprintf (f, "%r", LR_REGNUM);
12694 pushed_words++;
12696 else if (!push && (mask & (1 << PC_REGNUM)))
12698 /* Catch popping the PC. */
12699 if (TARGET_INTERWORK || TARGET_BACKTRACE)
12701 /* The PC is never poped directly, instead
12702 it is popped into r3 and then BX is used. */
12703 fprintf (f, "}\n");
12705 thumb_exit (f, -1, NULL_RTX);
12707 return;
12709 else
12711 if (mask & 0xFF)
12712 fprintf (f, ", ");
12714 asm_fprintf (f, "%r", PC_REGNUM);
12718 fprintf (f, "}\n");
12720 if (push && pushed_words && dwarf2out_do_frame ())
12722 char *l = dwarf2out_cfi_label ();
12723 int pushed_mask = real_regs;
12725 *cfa_offset += pushed_words * 4;
12726 dwarf2out_def_cfa (l, SP_REGNUM, *cfa_offset);
12728 pushed_words = 0;
12729 pushed_mask = real_regs;
12730 for (regno = 0; regno <= 14; regno++, pushed_mask >>= 1)
12732 if (pushed_mask & 1)
12733 dwarf2out_reg_save (l, regno, 4 * pushed_words++ - *cfa_offset);
12738 void
12739 thumb_final_prescan_insn (rtx insn)
12741 if (flag_print_asm_name)
12742 asm_fprintf (asm_out_file, "%@ 0x%04x\n",
12743 INSN_ADDRESSES (INSN_UID (insn)));
12747 thumb_shiftable_const (unsigned HOST_WIDE_INT val)
12749 unsigned HOST_WIDE_INT mask = 0xff;
12750 int i;
12752 if (val == 0) /* XXX */
12753 return 0;
12755 for (i = 0; i < 25; i++)
12756 if ((val & (mask << i)) == val)
12757 return 1;
12759 return 0;
12762 /* Returns nonzero if the current function contains,
12763 or might contain a far jump. */
12764 static int
12765 thumb_far_jump_used_p (void)
12767 rtx insn;
12769 /* This test is only important for leaf functions. */
12770 /* assert (!leaf_function_p ()); */
12772 /* If we have already decided that far jumps may be used,
12773 do not bother checking again, and always return true even if
12774 it turns out that they are not being used. Once we have made
12775 the decision that far jumps are present (and that hence the link
12776 register will be pushed onto the stack) we cannot go back on it. */
12777 if (cfun->machine->far_jump_used)
12778 return 1;
12780 /* If this function is not being called from the prologue/epilogue
12781 generation code then it must be being called from the
12782 INITIAL_ELIMINATION_OFFSET macro. */
12783 if (!(ARM_DOUBLEWORD_ALIGN || reload_completed))
12785 /* In this case we know that we are being asked about the elimination
12786 of the arg pointer register. If that register is not being used,
12787 then there are no arguments on the stack, and we do not have to
12788 worry that a far jump might force the prologue to push the link
12789 register, changing the stack offsets. In this case we can just
12790 return false, since the presence of far jumps in the function will
12791 not affect stack offsets.
12793 If the arg pointer is live (or if it was live, but has now been
12794 eliminated and so set to dead) then we do have to test to see if
12795 the function might contain a far jump. This test can lead to some
12796 false negatives, since before reload is completed, then length of
12797 branch instructions is not known, so gcc defaults to returning their
12798 longest length, which in turn sets the far jump attribute to true.
12800 A false negative will not result in bad code being generated, but it
12801 will result in a needless push and pop of the link register. We
12802 hope that this does not occur too often.
12804 If we need doubleword stack alignment this could affect the other
12805 elimination offsets so we can't risk getting it wrong. */
12806 if (regs_ever_live [ARG_POINTER_REGNUM])
12807 cfun->machine->arg_pointer_live = 1;
12808 else if (!cfun->machine->arg_pointer_live)
12809 return 0;
12812 /* Check to see if the function contains a branch
12813 insn with the far jump attribute set. */
12814 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
12816 if (GET_CODE (insn) == JUMP_INSN
12817 /* Ignore tablejump patterns. */
12818 && GET_CODE (PATTERN (insn)) != ADDR_VEC
12819 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC
12820 && get_attr_far_jump (insn) == FAR_JUMP_YES
12823 /* Record the fact that we have decided that
12824 the function does use far jumps. */
12825 cfun->machine->far_jump_used = 1;
12826 return 1;
12830 return 0;
12833 /* Return nonzero if FUNC must be entered in ARM mode. */
12835 is_called_in_ARM_mode (tree func)
12837 if (TREE_CODE (func) != FUNCTION_DECL)
12838 abort ();
12840 /* Ignore the problem about functions whoes address is taken. */
12841 if (TARGET_CALLEE_INTERWORKING && TREE_PUBLIC (func))
12842 return TRUE;
12844 #ifdef ARM_PE
12845 return lookup_attribute ("interfacearm", DECL_ATTRIBUTES (func)) != NULL_TREE;
12846 #else
12847 return FALSE;
12848 #endif
12851 /* The bits which aren't usefully expanded as rtl. */
12852 const char *
12853 thumb_unexpanded_epilogue (void)
12855 int regno;
12856 int live_regs_mask = 0;
12857 int high_regs_pushed = 0;
12858 int leaf_function = leaf_function_p ();
12859 int had_to_push_lr;
12860 rtx eh_ofs = cfun->machine->eh_epilogue_sp_ofs;
12862 if (return_used_this_function)
12863 return "";
12865 if (IS_NAKED (arm_current_func_type ()))
12866 return "";
12868 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
12869 if (THUMB_REG_PUSHED_P (regno))
12870 live_regs_mask |= 1 << regno;
12872 for (regno = 8; regno < 13; regno++)
12873 if (THUMB_REG_PUSHED_P (regno))
12874 high_regs_pushed++;
12876 /* The prolog may have pushed some high registers to use as
12877 work registers. eg the testsuite file:
12878 gcc/testsuite/gcc/gcc.c-torture/execute/complex-2.c
12879 compiles to produce:
12880 push {r4, r5, r6, r7, lr}
12881 mov r7, r9
12882 mov r6, r8
12883 push {r6, r7}
12884 as part of the prolog. We have to undo that pushing here. */
12886 if (high_regs_pushed)
12888 int mask = live_regs_mask;
12889 int next_hi_reg;
12890 int size;
12891 int mode;
12893 #ifdef RTX_CODE
12894 /* If we can deduce the registers used from the function's return value.
12895 This is more reliable that examining regs_ever_live[] because that
12896 will be set if the register is ever used in the function, not just if
12897 the register is used to hold a return value. */
12899 if (current_function_return_rtx != 0)
12900 mode = GET_MODE (current_function_return_rtx);
12901 else
12902 #endif
12903 mode = DECL_MODE (DECL_RESULT (current_function_decl));
12905 size = GET_MODE_SIZE (mode);
12907 /* Unless we are returning a type of size > 12 register r3 is
12908 available. */
12909 if (size < 13)
12910 mask |= 1 << 3;
12912 if (mask == 0)
12913 /* Oh dear! We have no low registers into which we can pop
12914 high registers! */
12915 internal_error
12916 ("no low registers available for popping high registers");
12918 for (next_hi_reg = 8; next_hi_reg < 13; next_hi_reg++)
12919 if (THUMB_REG_PUSHED_P (next_hi_reg))
12920 break;
12922 while (high_regs_pushed)
12924 /* Find lo register(s) into which the high register(s) can
12925 be popped. */
12926 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
12928 if (mask & (1 << regno))
12929 high_regs_pushed--;
12930 if (high_regs_pushed == 0)
12931 break;
12934 mask &= (2 << regno) - 1; /* A noop if regno == 8 */
12936 /* Pop the values into the low register(s). */
12937 thumb_pushpop (asm_out_file, mask, 0, NULL, mask);
12939 /* Move the value(s) into the high registers. */
12940 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
12942 if (mask & (1 << regno))
12944 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", next_hi_reg,
12945 regno);
12947 for (next_hi_reg++; next_hi_reg < 13; next_hi_reg++)
12948 if (THUMB_REG_PUSHED_P (next_hi_reg))
12949 break;
12955 had_to_push_lr = (live_regs_mask || !leaf_function
12956 || thumb_far_jump_used_p ());
12958 if (TARGET_BACKTRACE
12959 && ((live_regs_mask & 0xFF) == 0)
12960 && regs_ever_live [LAST_ARG_REGNUM] != 0)
12962 /* The stack backtrace structure creation code had to
12963 push R7 in order to get a work register, so we pop
12964 it now. */
12965 live_regs_mask |= (1 << LAST_LO_REGNUM);
12968 if (current_function_pretend_args_size == 0 || TARGET_BACKTRACE)
12970 if (had_to_push_lr
12971 && !is_called_in_ARM_mode (current_function_decl)
12972 && !eh_ofs)
12973 live_regs_mask |= 1 << PC_REGNUM;
12975 /* Either no argument registers were pushed or a backtrace
12976 structure was created which includes an adjusted stack
12977 pointer, so just pop everything. */
12978 if (live_regs_mask)
12979 thumb_pushpop (asm_out_file, live_regs_mask, FALSE, NULL,
12980 live_regs_mask);
12982 if (eh_ofs)
12983 thumb_exit (asm_out_file, 2, eh_ofs);
12984 /* We have either just popped the return address into the
12985 PC or it is was kept in LR for the entire function or
12986 it is still on the stack because we do not want to
12987 return by doing a pop {pc}. */
12988 else if ((live_regs_mask & (1 << PC_REGNUM)) == 0)
12989 thumb_exit (asm_out_file,
12990 (had_to_push_lr
12991 && is_called_in_ARM_mode (current_function_decl)) ?
12992 -1 : LR_REGNUM, NULL_RTX);
12994 else
12996 /* Pop everything but the return address. */
12997 live_regs_mask &= ~(1 << PC_REGNUM);
12999 if (live_regs_mask)
13000 thumb_pushpop (asm_out_file, live_regs_mask, FALSE, NULL,
13001 live_regs_mask);
13003 if (had_to_push_lr)
13004 /* Get the return address into a temporary register. */
13005 thumb_pushpop (asm_out_file, 1 << LAST_ARG_REGNUM, 0, NULL,
13006 1 << LAST_ARG_REGNUM);
13008 /* Remove the argument registers that were pushed onto the stack. */
13009 asm_fprintf (asm_out_file, "\tadd\t%r, %r, #%d\n",
13010 SP_REGNUM, SP_REGNUM,
13011 current_function_pretend_args_size);
13013 if (eh_ofs)
13014 thumb_exit (asm_out_file, 2, eh_ofs);
13015 else
13016 thumb_exit (asm_out_file,
13017 had_to_push_lr ? LAST_ARG_REGNUM : LR_REGNUM, NULL_RTX);
13020 return "";
13023 /* Functions to save and restore machine-specific function data. */
13024 static struct machine_function *
13025 arm_init_machine_status (void)
13027 struct machine_function *machine;
13028 machine = (machine_function *) ggc_alloc_cleared (sizeof (machine_function));
13030 #if ARM_FT_UNKNOWN != 0
13031 machine->func_type = ARM_FT_UNKNOWN;
13032 #endif
13033 return machine;
13036 /* Return an RTX indicating where the return address to the
13037 calling function can be found. */
13039 arm_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
13041 if (count != 0)
13042 return NULL_RTX;
13044 if (TARGET_APCS_32)
13045 return get_hard_reg_initial_val (Pmode, LR_REGNUM);
13046 else
13048 rtx lr = gen_rtx_AND (Pmode, gen_rtx_REG (Pmode, LR_REGNUM),
13049 GEN_INT (RETURN_ADDR_MASK26));
13050 return get_func_hard_reg_initial_val (cfun, lr);
13054 /* Do anything needed before RTL is emitted for each function. */
13055 void
13056 arm_init_expanders (void)
13058 /* Arrange to initialize and mark the machine per-function status. */
13059 init_machine_status = arm_init_machine_status;
13063 /* Like arm_compute_initial_elimination offset. Simpler because
13064 THUMB_HARD_FRAME_POINTER isn't actually the ABI specified frame pointer. */
13066 HOST_WIDE_INT
13067 thumb_compute_initial_elimination_offset (unsigned int from, unsigned int to)
13069 arm_stack_offsets *offsets;
13071 offsets = arm_get_frame_offsets ();
13073 switch (from)
13075 case ARG_POINTER_REGNUM:
13076 switch (to)
13078 case STACK_POINTER_REGNUM:
13079 return offsets->outgoing_args - offsets->saved_args;
13081 case FRAME_POINTER_REGNUM:
13082 return offsets->soft_frame - offsets->saved_args;
13084 case THUMB_HARD_FRAME_POINTER_REGNUM:
13085 case ARM_HARD_FRAME_POINTER_REGNUM:
13086 return offsets->saved_regs - offsets->saved_args;
13088 default:
13089 abort();
13091 break;
13093 case FRAME_POINTER_REGNUM:
13094 switch (to)
13096 case STACK_POINTER_REGNUM:
13097 return offsets->outgoing_args - offsets->soft_frame;
13099 case THUMB_HARD_FRAME_POINTER_REGNUM:
13100 case ARM_HARD_FRAME_POINTER_REGNUM:
13101 return offsets->saved_regs - offsets->soft_frame;
13103 default:
13104 abort();
13106 break;
13108 default:
13109 abort ();
13114 /* Generate the rest of a function's prologue. */
13115 void
13116 thumb_expand_prologue (void)
13118 rtx insn, dwarf;
13120 HOST_WIDE_INT amount;
13121 arm_stack_offsets *offsets;
13122 unsigned long func_type;
13124 func_type = arm_current_func_type ();
13126 /* Naked functions don't have prologues. */
13127 if (IS_NAKED (func_type))
13128 return;
13130 if (IS_INTERRUPT (func_type))
13132 error ("interrupt Service Routines cannot be coded in Thumb mode");
13133 return;
13136 offsets = arm_get_frame_offsets ();
13138 if (frame_pointer_needed)
13140 insn = emit_insn (gen_movsi (hard_frame_pointer_rtx,
13141 stack_pointer_rtx));
13142 RTX_FRAME_RELATED_P (insn) = 1;
13145 amount = offsets->outgoing_args - offsets->saved_regs;
13146 if (amount)
13148 if (amount < 512)
13150 insn = emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
13151 GEN_INT (- amount)));
13152 RTX_FRAME_RELATED_P (insn) = 1;
13154 else
13156 int regno;
13157 rtx reg;
13159 /* The stack decrement is too big for an immediate value in a single
13160 insn. In theory we could issue multiple subtracts, but after
13161 three of them it becomes more space efficient to place the full
13162 value in the constant pool and load into a register. (Also the
13163 ARM debugger really likes to see only one stack decrement per
13164 function). So instead we look for a scratch register into which
13165 we can load the decrement, and then we subtract this from the
13166 stack pointer. Unfortunately on the thumb the only available
13167 scratch registers are the argument registers, and we cannot use
13168 these as they may hold arguments to the function. Instead we
13169 attempt to locate a call preserved register which is used by this
13170 function. If we can find one, then we know that it will have
13171 been pushed at the start of the prologue and so we can corrupt
13172 it now. */
13173 for (regno = LAST_ARG_REGNUM + 1; regno <= LAST_LO_REGNUM; regno++)
13174 if (THUMB_REG_PUSHED_P (regno)
13175 && !(frame_pointer_needed
13176 && (regno == THUMB_HARD_FRAME_POINTER_REGNUM)))
13177 break;
13179 if (regno > LAST_LO_REGNUM) /* Very unlikely. */
13181 rtx spare = gen_rtx_REG (SImode, IP_REGNUM);
13183 /* Choose an arbitrary, non-argument low register. */
13184 reg = gen_rtx_REG (SImode, LAST_LO_REGNUM);
13186 /* Save it by copying it into a high, scratch register. */
13187 emit_insn (gen_movsi (spare, reg));
13188 /* Add a USE to stop propagate_one_insn() from barfing. */
13189 emit_insn (gen_prologue_use (spare));
13191 /* Decrement the stack. */
13192 emit_insn (gen_movsi (reg, GEN_INT (- amount)));
13193 insn = emit_insn (gen_addsi3 (stack_pointer_rtx,
13194 stack_pointer_rtx, reg));
13195 RTX_FRAME_RELATED_P (insn) = 1;
13196 dwarf = gen_rtx_SET (SImode, stack_pointer_rtx,
13197 plus_constant (stack_pointer_rtx,
13198 GEN_INT (- amount)));
13199 RTX_FRAME_RELATED_P (dwarf) = 1;
13200 REG_NOTES (insn)
13201 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
13202 REG_NOTES (insn));
13204 /* Restore the low register's original value. */
13205 emit_insn (gen_movsi (reg, spare));
13207 /* Emit a USE of the restored scratch register, so that flow
13208 analysis will not consider the restore redundant. The
13209 register won't be used again in this function and isn't
13210 restored by the epilogue. */
13211 emit_insn (gen_prologue_use (reg));
13213 else
13215 reg = gen_rtx_REG (SImode, regno);
13217 emit_insn (gen_movsi (reg, GEN_INT (- amount)));
13219 insn = emit_insn (gen_addsi3 (stack_pointer_rtx,
13220 stack_pointer_rtx, reg));
13221 RTX_FRAME_RELATED_P (insn) = 1;
13222 dwarf = gen_rtx_SET (SImode, stack_pointer_rtx,
13223 plus_constant (stack_pointer_rtx,
13224 GEN_INT (- amount)));
13225 RTX_FRAME_RELATED_P (dwarf) = 1;
13226 REG_NOTES (insn)
13227 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
13228 REG_NOTES (insn));
13231 /* If the frame pointer is needed, emit a special barrier that
13232 will prevent the scheduler from moving stores to the frame
13233 before the stack adjustment. */
13234 if (frame_pointer_needed)
13235 emit_insn (gen_stack_tie (stack_pointer_rtx,
13236 hard_frame_pointer_rtx));
13239 if (current_function_profile || TARGET_NO_SCHED_PRO)
13240 emit_insn (gen_blockage ());
13243 void
13244 thumb_expand_epilogue (void)
13246 HOST_WIDE_INT amount;
13247 arm_stack_offsets *offsets;
13248 int regno;
13250 /* Naked functions don't have prologues. */
13251 if (IS_NAKED (arm_current_func_type ()))
13252 return;
13254 offsets = arm_get_frame_offsets ();
13255 amount = offsets->outgoing_args - offsets->saved_regs;
13257 if (frame_pointer_needed)
13258 emit_insn (gen_movsi (stack_pointer_rtx, hard_frame_pointer_rtx));
13259 else if (amount)
13261 if (amount < 512)
13262 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
13263 GEN_INT (amount)));
13264 else
13266 /* r3 is always free in the epilogue. */
13267 rtx reg = gen_rtx_REG (SImode, LAST_ARG_REGNUM);
13269 emit_insn (gen_movsi (reg, GEN_INT (amount)));
13270 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, reg));
13274 /* Emit a USE (stack_pointer_rtx), so that
13275 the stack adjustment will not be deleted. */
13276 emit_insn (gen_prologue_use (stack_pointer_rtx));
13278 if (current_function_profile || TARGET_NO_SCHED_PRO)
13279 emit_insn (gen_blockage ());
13281 /* Emit a clobber for each insn that will be restored in the epilogue,
13282 so that flow2 will get register lifetimes correct. */
13283 for (regno = 0; regno < 13; regno++)
13284 if (regs_ever_live[regno] && !call_used_regs[regno])
13285 emit_insn (gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, regno)));
13287 if (! regs_ever_live[LR_REGNUM])
13288 emit_insn (gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, LR_REGNUM)));
13291 static void
13292 thumb_output_function_prologue (FILE *f, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
13294 int live_regs_mask = 0;
13295 int high_regs_pushed = 0;
13296 int cfa_offset = 0;
13297 int regno;
13299 if (IS_NAKED (arm_current_func_type ()))
13300 return;
13302 if (is_called_in_ARM_mode (current_function_decl))
13304 const char * name;
13306 if (GET_CODE (DECL_RTL (current_function_decl)) != MEM)
13307 abort ();
13308 if (GET_CODE (XEXP (DECL_RTL (current_function_decl), 0)) != SYMBOL_REF)
13309 abort ();
13310 name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
13312 /* Generate code sequence to switch us into Thumb mode. */
13313 /* The .code 32 directive has already been emitted by
13314 ASM_DECLARE_FUNCTION_NAME. */
13315 asm_fprintf (f, "\torr\t%r, %r, #1\n", IP_REGNUM, PC_REGNUM);
13316 asm_fprintf (f, "\tbx\t%r\n", IP_REGNUM);
13318 /* Generate a label, so that the debugger will notice the
13319 change in instruction sets. This label is also used by
13320 the assembler to bypass the ARM code when this function
13321 is called from a Thumb encoded function elsewhere in the
13322 same file. Hence the definition of STUB_NAME here must
13323 agree with the definition in gas/config/tc-arm.c. */
13325 #define STUB_NAME ".real_start_of"
13327 fprintf (f, "\t.code\t16\n");
13328 #ifdef ARM_PE
13329 if (arm_dllexport_name_p (name))
13330 name = arm_strip_name_encoding (name);
13331 #endif
13332 asm_fprintf (f, "\t.globl %s%U%s\n", STUB_NAME, name);
13333 fprintf (f, "\t.thumb_func\n");
13334 asm_fprintf (f, "%s%U%s:\n", STUB_NAME, name);
13337 if (current_function_pretend_args_size)
13339 if (cfun->machine->uses_anonymous_args)
13341 int num_pushes;
13343 fprintf (f, "\tpush\t{");
13345 num_pushes = ARM_NUM_INTS (current_function_pretend_args_size);
13347 for (regno = LAST_ARG_REGNUM + 1 - num_pushes;
13348 regno <= LAST_ARG_REGNUM;
13349 regno++)
13350 asm_fprintf (f, "%r%s", regno,
13351 regno == LAST_ARG_REGNUM ? "" : ", ");
13353 fprintf (f, "}\n");
13355 else
13356 asm_fprintf (f, "\tsub\t%r, %r, #%d\n",
13357 SP_REGNUM, SP_REGNUM,
13358 current_function_pretend_args_size);
13360 /* We don't need to record the stores for unwinding (would it
13361 help the debugger any if we did?), but record the change in
13362 the stack pointer. */
13363 if (dwarf2out_do_frame ())
13365 char *l = dwarf2out_cfi_label ();
13366 cfa_offset = cfa_offset + current_function_pretend_args_size;
13367 dwarf2out_def_cfa (l, SP_REGNUM, cfa_offset);
13371 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
13372 if (THUMB_REG_PUSHED_P (regno))
13373 live_regs_mask |= 1 << regno;
13375 if (live_regs_mask || !leaf_function_p () || thumb_far_jump_used_p ())
13376 live_regs_mask |= 1 << LR_REGNUM;
13378 if (TARGET_BACKTRACE)
13380 int offset;
13381 int work_register = 0;
13382 int wr;
13384 /* We have been asked to create a stack backtrace structure.
13385 The code looks like this:
13387 0 .align 2
13388 0 func:
13389 0 sub SP, #16 Reserve space for 4 registers.
13390 2 push {R7} Get a work register.
13391 4 add R7, SP, #20 Get the stack pointer before the push.
13392 6 str R7, [SP, #8] Store the stack pointer (before reserving the space).
13393 8 mov R7, PC Get hold of the start of this code plus 12.
13394 10 str R7, [SP, #16] Store it.
13395 12 mov R7, FP Get hold of the current frame pointer.
13396 14 str R7, [SP, #4] Store it.
13397 16 mov R7, LR Get hold of the current return address.
13398 18 str R7, [SP, #12] Store it.
13399 20 add R7, SP, #16 Point at the start of the backtrace structure.
13400 22 mov FP, R7 Put this value into the frame pointer. */
13402 if ((live_regs_mask & 0xFF) == 0)
13404 /* See if the a4 register is free. */
13406 if (regs_ever_live [LAST_ARG_REGNUM] == 0)
13407 work_register = LAST_ARG_REGNUM;
13408 else /* We must push a register of our own. */
13409 live_regs_mask |= (1 << LAST_LO_REGNUM);
13412 if (work_register == 0)
13414 /* Select a register from the list that will be pushed to
13415 use as our work register. */
13416 for (work_register = (LAST_LO_REGNUM + 1); work_register--;)
13417 if ((1 << work_register) & live_regs_mask)
13418 break;
13421 asm_fprintf
13422 (f, "\tsub\t%r, %r, #16\t%@ Create stack backtrace structure\n",
13423 SP_REGNUM, SP_REGNUM);
13425 if (dwarf2out_do_frame ())
13427 char *l = dwarf2out_cfi_label ();
13428 cfa_offset = cfa_offset + 16;
13429 dwarf2out_def_cfa (l, SP_REGNUM, cfa_offset);
13432 if (live_regs_mask)
13433 thumb_pushpop (f, live_regs_mask, 1, &cfa_offset, live_regs_mask);
13435 for (offset = 0, wr = 1 << 15; wr != 0; wr >>= 1)
13436 if (wr & live_regs_mask)
13437 offset += 4;
13439 asm_fprintf (f, "\tadd\t%r, %r, #%d\n", work_register, SP_REGNUM,
13440 offset + 16 + current_function_pretend_args_size);
13442 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13443 offset + 4);
13445 /* Make sure that the instruction fetching the PC is in the right place
13446 to calculate "start of backtrace creation code + 12". */
13447 if (live_regs_mask)
13449 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, PC_REGNUM);
13450 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13451 offset + 12);
13452 asm_fprintf (f, "\tmov\t%r, %r\n", work_register,
13453 ARM_HARD_FRAME_POINTER_REGNUM);
13454 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13455 offset);
13457 else
13459 asm_fprintf (f, "\tmov\t%r, %r\n", work_register,
13460 ARM_HARD_FRAME_POINTER_REGNUM);
13461 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13462 offset);
13463 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, PC_REGNUM);
13464 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13465 offset + 12);
13468 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, LR_REGNUM);
13469 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13470 offset + 8);
13471 asm_fprintf (f, "\tadd\t%r, %r, #%d\n", work_register, SP_REGNUM,
13472 offset + 12);
13473 asm_fprintf (f, "\tmov\t%r, %r\t\t%@ Backtrace structure created\n",
13474 ARM_HARD_FRAME_POINTER_REGNUM, work_register);
13476 else if (live_regs_mask)
13477 thumb_pushpop (f, live_regs_mask, 1, &cfa_offset, live_regs_mask);
13479 for (regno = 8; regno < 13; regno++)
13480 if (THUMB_REG_PUSHED_P (regno))
13481 high_regs_pushed++;
13483 if (high_regs_pushed)
13485 int pushable_regs = 0;
13486 int mask = live_regs_mask & 0xff;
13487 int next_hi_reg;
13489 for (next_hi_reg = 12; next_hi_reg > LAST_LO_REGNUM; next_hi_reg--)
13490 if (THUMB_REG_PUSHED_P (next_hi_reg))
13491 break;
13493 pushable_regs = mask;
13495 if (pushable_regs == 0)
13497 /* Desperation time -- this probably will never happen. */
13498 if (THUMB_REG_PUSHED_P (LAST_ARG_REGNUM))
13499 asm_fprintf (f, "\tmov\t%r, %r\n", IP_REGNUM, LAST_ARG_REGNUM);
13500 mask = 1 << LAST_ARG_REGNUM;
13503 while (high_regs_pushed > 0)
13505 int real_regs_mask = 0;
13507 for (regno = LAST_LO_REGNUM; regno >= 0; regno--)
13509 if (mask & (1 << regno))
13511 asm_fprintf (f, "\tmov\t%r, %r\n", regno, next_hi_reg);
13513 high_regs_pushed--;
13514 real_regs_mask |= (1 << next_hi_reg);
13516 if (high_regs_pushed)
13518 for (next_hi_reg--; next_hi_reg > LAST_LO_REGNUM;
13519 next_hi_reg--)
13520 if (THUMB_REG_PUSHED_P (next_hi_reg))
13521 break;
13523 else
13525 mask &= ~((1 << regno) - 1);
13526 break;
13531 thumb_pushpop (f, mask, 1, &cfa_offset, real_regs_mask);
13534 if (pushable_regs == 0
13535 && (THUMB_REG_PUSHED_P (LAST_ARG_REGNUM)))
13536 asm_fprintf (f, "\tmov\t%r, %r\n", LAST_ARG_REGNUM, IP_REGNUM);
13540 /* Handle the case of a double word load into a low register from
13541 a computed memory address. The computed address may involve a
13542 register which is overwritten by the load. */
13543 const char *
13544 thumb_load_double_from_address (rtx *operands)
13546 rtx addr;
13547 rtx base;
13548 rtx offset;
13549 rtx arg1;
13550 rtx arg2;
13552 if (GET_CODE (operands[0]) != REG)
13553 abort ();
13555 if (GET_CODE (operands[1]) != MEM)
13556 abort ();
13558 /* Get the memory address. */
13559 addr = XEXP (operands[1], 0);
13561 /* Work out how the memory address is computed. */
13562 switch (GET_CODE (addr))
13564 case REG:
13565 operands[2] = gen_rtx_MEM (SImode,
13566 plus_constant (XEXP (operands[1], 0), 4));
13568 if (REGNO (operands[0]) == REGNO (addr))
13570 output_asm_insn ("ldr\t%H0, %2", operands);
13571 output_asm_insn ("ldr\t%0, %1", operands);
13573 else
13575 output_asm_insn ("ldr\t%0, %1", operands);
13576 output_asm_insn ("ldr\t%H0, %2", operands);
13578 break;
13580 case CONST:
13581 /* Compute <address> + 4 for the high order load. */
13582 operands[2] = gen_rtx_MEM (SImode,
13583 plus_constant (XEXP (operands[1], 0), 4));
13585 output_asm_insn ("ldr\t%0, %1", operands);
13586 output_asm_insn ("ldr\t%H0, %2", operands);
13587 break;
13589 case PLUS:
13590 arg1 = XEXP (addr, 0);
13591 arg2 = XEXP (addr, 1);
13593 if (CONSTANT_P (arg1))
13594 base = arg2, offset = arg1;
13595 else
13596 base = arg1, offset = arg2;
13598 if (GET_CODE (base) != REG)
13599 abort ();
13601 /* Catch the case of <address> = <reg> + <reg> */
13602 if (GET_CODE (offset) == REG)
13604 int reg_offset = REGNO (offset);
13605 int reg_base = REGNO (base);
13606 int reg_dest = REGNO (operands[0]);
13608 /* Add the base and offset registers together into the
13609 higher destination register. */
13610 asm_fprintf (asm_out_file, "\tadd\t%r, %r, %r",
13611 reg_dest + 1, reg_base, reg_offset);
13613 /* Load the lower destination register from the address in
13614 the higher destination register. */
13615 asm_fprintf (asm_out_file, "\tldr\t%r, [%r, #0]",
13616 reg_dest, reg_dest + 1);
13618 /* Load the higher destination register from its own address
13619 plus 4. */
13620 asm_fprintf (asm_out_file, "\tldr\t%r, [%r, #4]",
13621 reg_dest + 1, reg_dest + 1);
13623 else
13625 /* Compute <address> + 4 for the high order load. */
13626 operands[2] = gen_rtx_MEM (SImode,
13627 plus_constant (XEXP (operands[1], 0), 4));
13629 /* If the computed address is held in the low order register
13630 then load the high order register first, otherwise always
13631 load the low order register first. */
13632 if (REGNO (operands[0]) == REGNO (base))
13634 output_asm_insn ("ldr\t%H0, %2", operands);
13635 output_asm_insn ("ldr\t%0, %1", operands);
13637 else
13639 output_asm_insn ("ldr\t%0, %1", operands);
13640 output_asm_insn ("ldr\t%H0, %2", operands);
13643 break;
13645 case LABEL_REF:
13646 /* With no registers to worry about we can just load the value
13647 directly. */
13648 operands[2] = gen_rtx_MEM (SImode,
13649 plus_constant (XEXP (operands[1], 0), 4));
13651 output_asm_insn ("ldr\t%H0, %2", operands);
13652 output_asm_insn ("ldr\t%0, %1", operands);
13653 break;
13655 default:
13656 abort ();
13657 break;
13660 return "";
13663 const char *
13664 thumb_output_move_mem_multiple (int n, rtx *operands)
13666 rtx tmp;
13668 switch (n)
13670 case 2:
13671 if (REGNO (operands[4]) > REGNO (operands[5]))
13673 tmp = operands[4];
13674 operands[4] = operands[5];
13675 operands[5] = tmp;
13677 output_asm_insn ("ldmia\t%1!, {%4, %5}", operands);
13678 output_asm_insn ("stmia\t%0!, {%4, %5}", operands);
13679 break;
13681 case 3:
13682 if (REGNO (operands[4]) > REGNO (operands[5]))
13684 tmp = operands[4];
13685 operands[4] = operands[5];
13686 operands[5] = tmp;
13688 if (REGNO (operands[5]) > REGNO (operands[6]))
13690 tmp = operands[5];
13691 operands[5] = operands[6];
13692 operands[6] = tmp;
13694 if (REGNO (operands[4]) > REGNO (operands[5]))
13696 tmp = operands[4];
13697 operands[4] = operands[5];
13698 operands[5] = tmp;
13701 output_asm_insn ("ldmia\t%1!, {%4, %5, %6}", operands);
13702 output_asm_insn ("stmia\t%0!, {%4, %5, %6}", operands);
13703 break;
13705 default:
13706 abort ();
13709 return "";
13712 /* Routines for generating rtl. */
13713 void
13714 thumb_expand_movstrqi (rtx *operands)
13716 rtx out = copy_to_mode_reg (SImode, XEXP (operands[0], 0));
13717 rtx in = copy_to_mode_reg (SImode, XEXP (operands[1], 0));
13718 HOST_WIDE_INT len = INTVAL (operands[2]);
13719 HOST_WIDE_INT offset = 0;
13721 while (len >= 12)
13723 emit_insn (gen_movmem12b (out, in, out, in));
13724 len -= 12;
13727 if (len >= 8)
13729 emit_insn (gen_movmem8b (out, in, out, in));
13730 len -= 8;
13733 if (len >= 4)
13735 rtx reg = gen_reg_rtx (SImode);
13736 emit_insn (gen_movsi (reg, gen_rtx_MEM (SImode, in)));
13737 emit_insn (gen_movsi (gen_rtx_MEM (SImode, out), reg));
13738 len -= 4;
13739 offset += 4;
13742 if (len >= 2)
13744 rtx reg = gen_reg_rtx (HImode);
13745 emit_insn (gen_movhi (reg, gen_rtx_MEM (HImode,
13746 plus_constant (in, offset))));
13747 emit_insn (gen_movhi (gen_rtx_MEM (HImode, plus_constant (out, offset)),
13748 reg));
13749 len -= 2;
13750 offset += 2;
13753 if (len)
13755 rtx reg = gen_reg_rtx (QImode);
13756 emit_insn (gen_movqi (reg, gen_rtx_MEM (QImode,
13757 plus_constant (in, offset))));
13758 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (out, offset)),
13759 reg));
13764 thumb_cmp_operand (rtx op, enum machine_mode mode)
13766 return ((GET_CODE (op) == CONST_INT
13767 && INTVAL (op) < 256
13768 && INTVAL (op) >= 0)
13769 || s_register_operand (op, mode));
13773 thumb_cmpneg_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
13775 return (GET_CODE (op) == CONST_INT
13776 && INTVAL (op) < 0
13777 && INTVAL (op) > -256);
13780 /* Return TRUE if a result can be stored in OP without clobbering the
13781 condition code register. Prior to reload we only accept a
13782 register. After reload we have to be able to handle memory as
13783 well, since a pseudo may not get a hard reg and reload cannot
13784 handle output-reloads on jump insns.
13786 We could possibly handle mem before reload as well, but that might
13787 complicate things with the need to handle increment
13788 side-effects. */
13791 thumb_cbrch_target_operand (rtx op, enum machine_mode mode)
13793 return (s_register_operand (op, mode)
13794 || ((reload_in_progress || reload_completed)
13795 && memory_operand (op, mode)));
13798 /* Handle storing a half-word to memory during reload. */
13799 void
13800 thumb_reload_out_hi (rtx *operands)
13802 emit_insn (gen_thumb_movhi_clobber (operands[0], operands[1], operands[2]));
13805 /* Handle reading a half-word from memory during reload. */
13806 void
13807 thumb_reload_in_hi (rtx *operands ATTRIBUTE_UNUSED)
13809 abort ();
13812 /* Return the length of a function name prefix
13813 that starts with the character 'c'. */
13814 static int
13815 arm_get_strip_length (int c)
13817 switch (c)
13819 ARM_NAME_ENCODING_LENGTHS
13820 default: return 0;
13824 /* Return a pointer to a function's name with any
13825 and all prefix encodings stripped from it. */
13826 const char *
13827 arm_strip_name_encoding (const char *name)
13829 int skip;
13831 while ((skip = arm_get_strip_length (* name)))
13832 name += skip;
13834 return name;
13837 /* If there is a '*' anywhere in the name's prefix, then
13838 emit the stripped name verbatim, otherwise prepend an
13839 underscore if leading underscores are being used. */
13840 void
13841 arm_asm_output_labelref (FILE *stream, const char *name)
13843 int skip;
13844 int verbatim = 0;
13846 while ((skip = arm_get_strip_length (* name)))
13848 verbatim |= (*name == '*');
13849 name += skip;
13852 if (verbatim)
13853 fputs (name, stream);
13854 else
13855 asm_fprintf (stream, "%U%s", name);
13858 rtx aof_pic_label;
13860 #ifdef AOF_ASSEMBLER
13861 /* Special functions only needed when producing AOF syntax assembler. */
13863 struct pic_chain
13865 struct pic_chain * next;
13866 const char * symname;
13869 static struct pic_chain * aof_pic_chain = NULL;
13872 aof_pic_entry (rtx x)
13874 struct pic_chain ** chainp;
13875 int offset;
13877 if (aof_pic_label == NULL_RTX)
13879 aof_pic_label = gen_rtx_SYMBOL_REF (Pmode, "x$adcons");
13882 for (offset = 0, chainp = &aof_pic_chain; *chainp;
13883 offset += 4, chainp = &(*chainp)->next)
13884 if ((*chainp)->symname == XSTR (x, 0))
13885 return plus_constant (aof_pic_label, offset);
13887 *chainp = (struct pic_chain *) xmalloc (sizeof (struct pic_chain));
13888 (*chainp)->next = NULL;
13889 (*chainp)->symname = XSTR (x, 0);
13890 return plus_constant (aof_pic_label, offset);
13893 void
13894 aof_dump_pic_table (FILE *f)
13896 struct pic_chain * chain;
13898 if (aof_pic_chain == NULL)
13899 return;
13901 asm_fprintf (f, "\tAREA |%r$$adcons|, BASED %r\n",
13902 PIC_OFFSET_TABLE_REGNUM,
13903 PIC_OFFSET_TABLE_REGNUM);
13904 fputs ("|x$adcons|\n", f);
13906 for (chain = aof_pic_chain; chain; chain = chain->next)
13908 fputs ("\tDCD\t", f);
13909 assemble_name (f, chain->symname);
13910 fputs ("\n", f);
13914 int arm_text_section_count = 1;
13916 char *
13917 aof_text_section (void )
13919 static char buf[100];
13920 sprintf (buf, "\tAREA |C$$code%d|, CODE, READONLY",
13921 arm_text_section_count++);
13922 if (flag_pic)
13923 strcat (buf, ", PIC, REENTRANT");
13924 return buf;
13927 static int arm_data_section_count = 1;
13929 char *
13930 aof_data_section (void)
13932 static char buf[100];
13933 sprintf (buf, "\tAREA |C$$data%d|, DATA", arm_data_section_count++);
13934 return buf;
13937 /* The AOF assembler is religiously strict about declarations of
13938 imported and exported symbols, so that it is impossible to declare
13939 a function as imported near the beginning of the file, and then to
13940 export it later on. It is, however, possible to delay the decision
13941 until all the functions in the file have been compiled. To get
13942 around this, we maintain a list of the imports and exports, and
13943 delete from it any that are subsequently defined. At the end of
13944 compilation we spit the remainder of the list out before the END
13945 directive. */
13947 struct import
13949 struct import * next;
13950 const char * name;
13953 static struct import * imports_list = NULL;
13955 void
13956 aof_add_import (const char *name)
13958 struct import * new;
13960 for (new = imports_list; new; new = new->next)
13961 if (new->name == name)
13962 return;
13964 new = (struct import *) xmalloc (sizeof (struct import));
13965 new->next = imports_list;
13966 imports_list = new;
13967 new->name = name;
13970 void
13971 aof_delete_import (const char *name)
13973 struct import ** old;
13975 for (old = &imports_list; *old; old = & (*old)->next)
13977 if ((*old)->name == name)
13979 *old = (*old)->next;
13980 return;
13985 int arm_main_function = 0;
13987 static void
13988 aof_dump_imports (FILE *f)
13990 /* The AOF assembler needs this to cause the startup code to be extracted
13991 from the library. Brining in __main causes the whole thing to work
13992 automagically. */
13993 if (arm_main_function)
13995 text_section ();
13996 fputs ("\tIMPORT __main\n", f);
13997 fputs ("\tDCD __main\n", f);
14000 /* Now dump the remaining imports. */
14001 while (imports_list)
14003 fprintf (f, "\tIMPORT\t");
14004 assemble_name (f, imports_list->name);
14005 fputc ('\n', f);
14006 imports_list = imports_list->next;
14010 static void
14011 aof_globalize_label (FILE *stream, const char *name)
14013 default_globalize_label (stream, name);
14014 if (! strcmp (name, "main"))
14015 arm_main_function = 1;
14018 static void
14019 aof_file_start (void)
14021 fputs ("__r0\tRN\t0\n", asm_out_file);
14022 fputs ("__a1\tRN\t0\n", asm_out_file);
14023 fputs ("__a2\tRN\t1\n", asm_out_file);
14024 fputs ("__a3\tRN\t2\n", asm_out_file);
14025 fputs ("__a4\tRN\t3\n", asm_out_file);
14026 fputs ("__v1\tRN\t4\n", asm_out_file);
14027 fputs ("__v2\tRN\t5\n", asm_out_file);
14028 fputs ("__v3\tRN\t6\n", asm_out_file);
14029 fputs ("__v4\tRN\t7\n", asm_out_file);
14030 fputs ("__v5\tRN\t8\n", asm_out_file);
14031 fputs ("__v6\tRN\t9\n", asm_out_file);
14032 fputs ("__sl\tRN\t10\n", asm_out_file);
14033 fputs ("__fp\tRN\t11\n", asm_out_file);
14034 fputs ("__ip\tRN\t12\n", asm_out_file);
14035 fputs ("__sp\tRN\t13\n", asm_out_file);
14036 fputs ("__lr\tRN\t14\n", asm_out_file);
14037 fputs ("__pc\tRN\t15\n", asm_out_file);
14038 fputs ("__f0\tFN\t0\n", asm_out_file);
14039 fputs ("__f1\tFN\t1\n", asm_out_file);
14040 fputs ("__f2\tFN\t2\n", asm_out_file);
14041 fputs ("__f3\tFN\t3\n", asm_out_file);
14042 fputs ("__f4\tFN\t4\n", asm_out_file);
14043 fputs ("__f5\tFN\t5\n", asm_out_file);
14044 fputs ("__f6\tFN\t6\n", asm_out_file);
14045 fputs ("__f7\tFN\t7\n", asm_out_file);
14046 text_section ();
14049 static void
14050 aof_file_end (void)
14052 if (flag_pic)
14053 aof_dump_pic_table (asm_out_file);
14054 aof_dump_imports (asm_out_file);
14055 fputs ("\tEND\n", asm_out_file);
14057 #endif /* AOF_ASSEMBLER */
14059 #ifdef OBJECT_FORMAT_ELF
14060 /* Switch to an arbitrary section NAME with attributes as specified
14061 by FLAGS. ALIGN specifies any known alignment requirements for
14062 the section; 0 if the default should be used.
14064 Differs from the default elf version only in the prefix character
14065 used before the section type. */
14067 static void
14068 arm_elf_asm_named_section (const char *name, unsigned int flags)
14070 char flagchars[10], *f = flagchars;
14072 if (! named_section_first_declaration (name))
14074 fprintf (asm_out_file, "\t.section\t%s\n", name);
14075 return;
14078 if (!(flags & SECTION_DEBUG))
14079 *f++ = 'a';
14080 if (flags & SECTION_WRITE)
14081 *f++ = 'w';
14082 if (flags & SECTION_CODE)
14083 *f++ = 'x';
14084 if (flags & SECTION_SMALL)
14085 *f++ = 's';
14086 if (flags & SECTION_MERGE)
14087 *f++ = 'M';
14088 if (flags & SECTION_STRINGS)
14089 *f++ = 'S';
14090 if (flags & SECTION_TLS)
14091 *f++ = 'T';
14092 *f = '\0';
14094 fprintf (asm_out_file, "\t.section\t%s,\"%s\"", name, flagchars);
14096 if (!(flags & SECTION_NOTYPE))
14098 const char *type;
14100 if (flags & SECTION_BSS)
14101 type = "nobits";
14102 else
14103 type = "progbits";
14105 fprintf (asm_out_file, ",%%%s", type);
14107 if (flags & SECTION_ENTSIZE)
14108 fprintf (asm_out_file, ",%d", flags & SECTION_ENTSIZE);
14111 putc ('\n', asm_out_file);
14113 #endif
14115 #ifndef ARM_PE
14116 /* Symbols in the text segment can be accessed without indirecting via the
14117 constant pool; it may take an extra binary operation, but this is still
14118 faster than indirecting via memory. Don't do this when not optimizing,
14119 since we won't be calculating al of the offsets necessary to do this
14120 simplification. */
14122 static void
14123 arm_encode_section_info (tree decl, rtx rtl, int first)
14125 /* This doesn't work with AOF syntax, since the string table may be in
14126 a different AREA. */
14127 #ifndef AOF_ASSEMBLER
14128 if (optimize > 0 && TREE_CONSTANT (decl))
14129 SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
14130 #endif
14132 /* If we are referencing a function that is weak then encode a long call
14133 flag in the function name, otherwise if the function is static or
14134 or known to be defined in this file then encode a short call flag. */
14135 if (first && TREE_CODE_CLASS (TREE_CODE (decl)) == 'd')
14137 if (TREE_CODE (decl) == FUNCTION_DECL && DECL_WEAK (decl))
14138 arm_encode_call_attribute (decl, LONG_CALL_FLAG_CHAR);
14139 else if (! TREE_PUBLIC (decl))
14140 arm_encode_call_attribute (decl, SHORT_CALL_FLAG_CHAR);
14143 #endif /* !ARM_PE */
14145 static void
14146 arm_internal_label (FILE *stream, const char *prefix, unsigned long labelno)
14148 if (arm_ccfsm_state == 3 && (unsigned) arm_target_label == labelno
14149 && !strcmp (prefix, "L"))
14151 arm_ccfsm_state = 0;
14152 arm_target_insn = NULL;
14154 default_internal_label (stream, prefix, labelno);
14157 /* Output code to add DELTA to the first argument, and then jump
14158 to FUNCTION. Used for C++ multiple inheritance. */
14159 static void
14160 arm_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
14161 HOST_WIDE_INT delta,
14162 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
14163 tree function)
14165 static int thunk_label = 0;
14166 char label[256];
14167 int mi_delta = delta;
14168 const char *const mi_op = mi_delta < 0 ? "sub" : "add";
14169 int shift = 0;
14170 int this_regno = (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function)
14171 ? 1 : 0);
14172 if (mi_delta < 0)
14173 mi_delta = - mi_delta;
14174 if (TARGET_THUMB)
14176 int labelno = thunk_label++;
14177 ASM_GENERATE_INTERNAL_LABEL (label, "LTHUMBFUNC", labelno);
14178 fputs ("\tldr\tr12, ", file);
14179 assemble_name (file, label);
14180 fputc ('\n', file);
14182 while (mi_delta != 0)
14184 if ((mi_delta & (3 << shift)) == 0)
14185 shift += 2;
14186 else
14188 asm_fprintf (file, "\t%s\t%r, %r, #%d\n",
14189 mi_op, this_regno, this_regno,
14190 mi_delta & (0xff << shift));
14191 mi_delta &= ~(0xff << shift);
14192 shift += 8;
14195 if (TARGET_THUMB)
14197 fprintf (file, "\tbx\tr12\n");
14198 ASM_OUTPUT_ALIGN (file, 2);
14199 assemble_name (file, label);
14200 fputs (":\n", file);
14201 assemble_integer (XEXP (DECL_RTL (function), 0), 4, BITS_PER_WORD, 1);
14203 else
14205 fputs ("\tb\t", file);
14206 assemble_name (file, XSTR (XEXP (DECL_RTL (function), 0), 0));
14207 if (NEED_PLT_RELOC)
14208 fputs ("(PLT)", file);
14209 fputc ('\n', file);
14214 arm_emit_vector_const (FILE *file, rtx x)
14216 int i;
14217 const char * pattern;
14219 if (GET_CODE (x) != CONST_VECTOR)
14220 abort ();
14222 switch (GET_MODE (x))
14224 case V2SImode: pattern = "%08x"; break;
14225 case V4HImode: pattern = "%04x"; break;
14226 case V8QImode: pattern = "%02x"; break;
14227 default: abort ();
14230 fprintf (file, "0x");
14231 for (i = CONST_VECTOR_NUNITS (x); i--;)
14233 rtx element;
14235 element = CONST_VECTOR_ELT (x, i);
14236 fprintf (file, pattern, INTVAL (element));
14239 return 1;
14242 const char *
14243 arm_output_load_gr (rtx *operands)
14245 rtx reg;
14246 rtx offset;
14247 rtx wcgr;
14248 rtx sum;
14250 if (GET_CODE (operands [1]) != MEM
14251 || GET_CODE (sum = XEXP (operands [1], 0)) != PLUS
14252 || GET_CODE (reg = XEXP (sum, 0)) != REG
14253 || GET_CODE (offset = XEXP (sum, 1)) != CONST_INT
14254 || ((INTVAL (offset) < 1024) && (INTVAL (offset) > -1024)))
14255 return "wldrw%?\t%0, %1";
14257 /* Fix up an out-of-range load of a GR register. */
14258 output_asm_insn ("str%?\t%0, [sp, #-4]!\t@ Start of GR load expansion", & reg);
14259 wcgr = operands[0];
14260 operands[0] = reg;
14261 output_asm_insn ("ldr%?\t%0, %1", operands);
14263 operands[0] = wcgr;
14264 operands[1] = reg;
14265 output_asm_insn ("tmcr%?\t%0, %1", operands);
14266 output_asm_insn ("ldr%?\t%0, [sp], #4\t@ End of GR load expansion", & reg);
14268 return "";
14271 static rtx
14272 arm_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
14273 int incoming ATTRIBUTE_UNUSED)
14275 #if 0
14276 /* FIXME: The ARM backend has special code to handle structure
14277 returns, and will reserve its own hidden first argument. So
14278 if this macro is enabled a *second* hidden argument will be
14279 reserved, which will break binary compatibility with old
14280 toolchains and also thunk handling. One day this should be
14281 fixed. */
14282 return 0;
14283 #else
14284 /* Register in which address to store a structure value
14285 is passed to a function. */
14286 return gen_rtx_REG (Pmode, ARG_REGISTER (1));
14287 #endif
14290 /* Worker function for TARGET_SETUP_INCOMING_VARARGS.
14292 On the ARM, PRETEND_SIZE is set in order to have the prologue push the last
14293 named arg and all anonymous args onto the stack.
14294 XXX I know the prologue shouldn't be pushing registers, but it is faster
14295 that way. */
14297 static void
14298 arm_setup_incoming_varargs (CUMULATIVE_ARGS *cum,
14299 enum machine_mode mode ATTRIBUTE_UNUSED,
14300 tree type ATTRIBUTE_UNUSED,
14301 int *pretend_size,
14302 int second_time ATTRIBUTE_UNUSED)
14304 cfun->machine->uses_anonymous_args = 1;
14305 if (cum->nregs < NUM_ARG_REGS)
14306 *pretend_size = (NUM_ARG_REGS - cum->nregs) * UNITS_PER_WORD;
14309 /* Return nonzero if the CONSUMER instruction (a store) does not need
14310 PRODUCER's value to calculate the address. */
14313 arm_no_early_store_addr_dep (rtx producer, rtx consumer)
14315 rtx value = PATTERN (producer);
14316 rtx addr = PATTERN (consumer);
14318 if (GET_CODE (value) == COND_EXEC)
14319 value = COND_EXEC_CODE (value);
14320 if (GET_CODE (value) == PARALLEL)
14321 value = XVECEXP (value, 0, 0);
14322 value = XEXP (value, 0);
14323 if (GET_CODE (addr) == COND_EXEC)
14324 addr = COND_EXEC_CODE (addr);
14325 if (GET_CODE (addr) == PARALLEL)
14326 addr = XVECEXP (addr, 0, 0);
14327 addr = XEXP (addr, 0);
14329 return !reg_overlap_mentioned_p (value, addr);
14332 /* Return nonzero if the CONSUMER instruction (an ALU op) does not
14333 have an early register shift value or amount dependency on the
14334 result of PRODUCER. */
14337 arm_no_early_alu_shift_dep (rtx producer, rtx consumer)
14339 rtx value = PATTERN (producer);
14340 rtx op = PATTERN (consumer);
14341 rtx early_op;
14343 if (GET_CODE (value) == COND_EXEC)
14344 value = COND_EXEC_CODE (value);
14345 if (GET_CODE (value) == PARALLEL)
14346 value = XVECEXP (value, 0, 0);
14347 value = XEXP (value, 0);
14348 if (GET_CODE (op) == COND_EXEC)
14349 op = COND_EXEC_CODE (op);
14350 if (GET_CODE (op) == PARALLEL)
14351 op = XVECEXP (op, 0, 0);
14352 op = XEXP (op, 1);
14354 early_op = XEXP (op, 0);
14355 /* This is either an actual independent shift, or a shift applied to
14356 the first operand of another operation. We want the whole shift
14357 operation. */
14358 if (GET_CODE (early_op) == REG)
14359 early_op = op;
14361 return !reg_overlap_mentioned_p (value, early_op);
14364 /* Return nonzero if the CONSUMER instruction (an ALU op) does not
14365 have an early register shift value dependency on the result of
14366 PRODUCER. */
14369 arm_no_early_alu_shift_value_dep (rtx producer, rtx consumer)
14371 rtx value = PATTERN (producer);
14372 rtx op = PATTERN (consumer);
14373 rtx early_op;
14375 if (GET_CODE (value) == COND_EXEC)
14376 value = COND_EXEC_CODE (value);
14377 if (GET_CODE (value) == PARALLEL)
14378 value = XVECEXP (value, 0, 0);
14379 value = XEXP (value, 0);
14380 if (GET_CODE (op) == COND_EXEC)
14381 op = COND_EXEC_CODE (op);
14382 if (GET_CODE (op) == PARALLEL)
14383 op = XVECEXP (op, 0, 0);
14384 op = XEXP (op, 1);
14386 early_op = XEXP (op, 0);
14388 /* This is either an actual independent shift, or a shift applied to
14389 the first operand of another operation. We want the value being
14390 shifted, in either case. */
14391 if (GET_CODE (early_op) != REG)
14392 early_op = XEXP (early_op, 0);
14394 return !reg_overlap_mentioned_p (value, early_op);
14397 /* Return nonzero if the CONSUMER (a mul or mac op) does not
14398 have an early register mult dependency on the result of
14399 PRODUCER. */
14402 arm_no_early_mul_dep (rtx producer, rtx consumer)
14404 rtx value = PATTERN (producer);
14405 rtx op = PATTERN (consumer);
14407 if (GET_CODE (value) == COND_EXEC)
14408 value = COND_EXEC_CODE (value);
14409 if (GET_CODE (value) == PARALLEL)
14410 value = XVECEXP (value, 0, 0);
14411 value = XEXP (value, 0);
14412 if (GET_CODE (op) == COND_EXEC)
14413 op = COND_EXEC_CODE (op);
14414 if (GET_CODE (op) == PARALLEL)
14415 op = XVECEXP (op, 0, 0);
14416 op = XEXP (op, 1);
14418 return (GET_CODE (op) == PLUS
14419 && !reg_overlap_mentioned_p (value, XEXP (op, 0)));