* config/arm/arm.c (arm_output_epilogue): Reverse the order of
[official-gcc.git] / gcc / config / arm / arm.c
blob1c2f2b2b4949fdd89f6eae4146fb3d1eb5e83691
1 /* Output routines for GCC for ARM.
2 Copyright (C) 1991, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004 Free Software Foundation, Inc.
4 Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
5 and Martin Simmons (@harleqn.co.uk).
6 More major hacks by Richard Earnshaw (rearnsha@arm.com).
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it
11 under the terms of the GNU General Public License as published
12 by the Free Software Foundation; either version 2, or (at your
13 option) any later version.
15 GCC is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
17 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
18 License for more details.
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING. If not, write to
22 the Free Software Foundation, 59 Temple Place - Suite 330,
23 Boston, MA 02111-1307, USA. */
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "tm.h"
29 #include "rtl.h"
30 #include "tree.h"
31 #include "obstack.h"
32 #include "regs.h"
33 #include "hard-reg-set.h"
34 #include "real.h"
35 #include "insn-config.h"
36 #include "conditions.h"
37 #include "output.h"
38 #include "insn-attr.h"
39 #include "flags.h"
40 #include "reload.h"
41 #include "function.h"
42 #include "expr.h"
43 #include "optabs.h"
44 #include "toplev.h"
45 #include "recog.h"
46 #include "ggc.h"
47 #include "except.h"
48 #include "c-pragma.h"
49 #include "integrate.h"
50 #include "tm_p.h"
51 #include "target.h"
52 #include "target-def.h"
53 #include "debug.h"
55 /* Forward definitions of types. */
56 typedef struct minipool_node Mnode;
57 typedef struct minipool_fixup Mfix;
59 const struct attribute_spec arm_attribute_table[];
61 /* Forward function declarations. */
62 static arm_stack_offsets *arm_get_frame_offsets (void);
63 static void arm_add_gc_roots (void);
64 static int arm_gen_constant (enum rtx_code, enum machine_mode, rtx,
65 HOST_WIDE_INT, rtx, rtx, int, int);
66 static unsigned bit_count (unsigned long);
67 static int arm_address_register_rtx_p (rtx, int);
68 static int arm_legitimate_index_p (enum machine_mode, rtx, RTX_CODE, int);
69 static int thumb_base_register_rtx_p (rtx, enum machine_mode, int);
70 inline static int thumb_index_register_rtx_p (rtx, int);
71 static int thumb_far_jump_used_p (void);
72 static int const_ok_for_op (HOST_WIDE_INT, enum rtx_code);
73 static rtx emit_multi_reg_push (int);
74 static rtx emit_sfm (int, int);
75 #ifndef AOF_ASSEMBLER
76 static bool arm_assemble_integer (rtx, unsigned int, int);
77 #endif
78 static const char *fp_const_from_val (REAL_VALUE_TYPE *);
79 static arm_cc get_arm_condition_code (rtx);
80 static HOST_WIDE_INT int_log2 (HOST_WIDE_INT);
81 static rtx is_jump_table (rtx);
82 static const char *output_multi_immediate (rtx *, const char *, const char *,
83 int, HOST_WIDE_INT);
84 static void print_multi_reg (FILE *, const char *, int, int);
85 static const char *shift_op (rtx, HOST_WIDE_INT *);
86 static struct machine_function *arm_init_machine_status (void);
87 static int number_of_first_bit_set (int);
88 static void replace_symbols_in_block (tree, rtx, rtx);
89 static void thumb_exit (FILE *, int, rtx);
90 static void thumb_pushpop (FILE *, int, int, int *, int);
91 static rtx is_jump_table (rtx);
92 static HOST_WIDE_INT get_jump_table_size (rtx);
93 static Mnode *move_minipool_fix_forward_ref (Mnode *, Mnode *, HOST_WIDE_INT);
94 static Mnode *add_minipool_forward_ref (Mfix *);
95 static Mnode *move_minipool_fix_backward_ref (Mnode *, Mnode *, HOST_WIDE_INT);
96 static Mnode *add_minipool_backward_ref (Mfix *);
97 static void assign_minipool_offsets (Mfix *);
98 static void arm_print_value (FILE *, rtx);
99 static void dump_minipool (rtx);
100 static int arm_barrier_cost (rtx);
101 static Mfix *create_fix_barrier (Mfix *, HOST_WIDE_INT);
102 static void push_minipool_barrier (rtx, HOST_WIDE_INT);
103 static void push_minipool_fix (rtx, HOST_WIDE_INT, rtx *, enum machine_mode,
104 rtx);
105 static void arm_reorg (void);
106 static bool note_invalid_constants (rtx, HOST_WIDE_INT, int);
107 static int current_file_function_operand (rtx);
108 static unsigned long arm_compute_save_reg0_reg12_mask (void);
109 static unsigned long arm_compute_save_reg_mask (void);
110 static unsigned long arm_isr_value (tree);
111 static unsigned long arm_compute_func_type (void);
112 static tree arm_handle_fndecl_attribute (tree *, tree, tree, int, bool *);
113 static tree arm_handle_isr_attribute (tree *, tree, tree, int, bool *);
114 static void arm_output_function_epilogue (FILE *, HOST_WIDE_INT);
115 static void arm_output_function_prologue (FILE *, HOST_WIDE_INT);
116 static void thumb_output_function_prologue (FILE *, HOST_WIDE_INT);
117 static int arm_comp_type_attributes (tree, tree);
118 static void arm_set_default_type_attributes (tree);
119 static int arm_adjust_cost (rtx, rtx, rtx, int);
120 static int arm_use_dfa_pipeline_interface (void);
121 static int count_insns_for_constant (HOST_WIDE_INT, int);
122 static int arm_get_strip_length (int);
123 static bool arm_function_ok_for_sibcall (tree, tree);
124 static void arm_internal_label (FILE *, const char *, unsigned long);
125 static void arm_output_mi_thunk (FILE *, tree, HOST_WIDE_INT, HOST_WIDE_INT,
126 tree);
127 static int arm_rtx_costs_1 (rtx, enum rtx_code, enum rtx_code);
128 static bool arm_slowmul_rtx_costs (rtx, int, int, int *);
129 static bool arm_fastmul_rtx_costs (rtx, int, int, int *);
130 static bool arm_xscale_rtx_costs (rtx, int, int, int *);
131 static bool arm_9e_rtx_costs (rtx, int, int, int *);
132 static int arm_address_cost (rtx);
133 static bool arm_memory_load_p (rtx);
134 static bool arm_cirrus_insn_p (rtx);
135 static void cirrus_reorg (rtx);
136 static void arm_init_builtins (void);
137 static rtx arm_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
138 static void arm_init_iwmmxt_builtins (void);
139 static rtx safe_vector_operand (rtx, enum machine_mode);
140 static rtx arm_expand_binop_builtin (enum insn_code, tree, rtx);
141 static rtx arm_expand_unop_builtin (enum insn_code, tree, rtx, int);
142 static rtx arm_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
143 static void emit_constant_insn (rtx cond, rtx pattern);
145 #ifdef OBJECT_FORMAT_ELF
146 static void arm_elf_asm_named_section (const char *, unsigned int);
147 #endif
148 #ifndef ARM_PE
149 static void arm_encode_section_info (tree, rtx, int);
150 #endif
151 #ifdef AOF_ASSEMBLER
152 static void aof_globalize_label (FILE *, const char *);
153 static void aof_dump_imports (FILE *);
154 static void aof_dump_pic_table (FILE *);
155 static void aof_file_start (void);
156 static void aof_file_end (void);
157 #endif
158 static rtx arm_struct_value_rtx (tree, int);
159 static void arm_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
160 tree, int *, int);
163 /* Initialize the GCC target structure. */
164 #ifdef TARGET_DLLIMPORT_DECL_ATTRIBUTES
165 #undef TARGET_MERGE_DECL_ATTRIBUTES
166 #define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
167 #endif
169 #undef TARGET_ATTRIBUTE_TABLE
170 #define TARGET_ATTRIBUTE_TABLE arm_attribute_table
172 #ifdef AOF_ASSEMBLER
173 #undef TARGET_ASM_BYTE_OP
174 #define TARGET_ASM_BYTE_OP "\tDCB\t"
175 #undef TARGET_ASM_ALIGNED_HI_OP
176 #define TARGET_ASM_ALIGNED_HI_OP "\tDCW\t"
177 #undef TARGET_ASM_ALIGNED_SI_OP
178 #define TARGET_ASM_ALIGNED_SI_OP "\tDCD\t"
179 #undef TARGET_ASM_GLOBALIZE_LABEL
180 #define TARGET_ASM_GLOBALIZE_LABEL aof_globalize_label
181 #undef TARGET_ASM_FILE_START
182 #define TARGET_ASM_FILE_START aof_file_start
183 #undef TARGET_ASM_FILE_END
184 #define TARGET_ASM_FILE_END aof_file_end
185 #else
186 #undef TARGET_ASM_ALIGNED_SI_OP
187 #define TARGET_ASM_ALIGNED_SI_OP NULL
188 #undef TARGET_ASM_INTEGER
189 #define TARGET_ASM_INTEGER arm_assemble_integer
190 #endif
192 #undef TARGET_ASM_FUNCTION_PROLOGUE
193 #define TARGET_ASM_FUNCTION_PROLOGUE arm_output_function_prologue
195 #undef TARGET_ASM_FUNCTION_EPILOGUE
196 #define TARGET_ASM_FUNCTION_EPILOGUE arm_output_function_epilogue
198 #undef TARGET_COMP_TYPE_ATTRIBUTES
199 #define TARGET_COMP_TYPE_ATTRIBUTES arm_comp_type_attributes
201 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
202 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES arm_set_default_type_attributes
204 #undef TARGET_SCHED_ADJUST_COST
205 #define TARGET_SCHED_ADJUST_COST arm_adjust_cost
207 #undef TARGET_SCHED_USE_DFA_PIPELINE_INTERFACE
208 #define TARGET_SCHED_USE_DFA_PIPELINE_INTERFACE arm_use_dfa_pipeline_interface
210 #undef TARGET_ENCODE_SECTION_INFO
211 #ifdef ARM_PE
212 #define TARGET_ENCODE_SECTION_INFO arm_pe_encode_section_info
213 #else
214 #define TARGET_ENCODE_SECTION_INFO arm_encode_section_info
215 #endif
217 #undef TARGET_STRIP_NAME_ENCODING
218 #define TARGET_STRIP_NAME_ENCODING arm_strip_name_encoding
220 #undef TARGET_ASM_INTERNAL_LABEL
221 #define TARGET_ASM_INTERNAL_LABEL arm_internal_label
223 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
224 #define TARGET_FUNCTION_OK_FOR_SIBCALL arm_function_ok_for_sibcall
226 #undef TARGET_ASM_OUTPUT_MI_THUNK
227 #define TARGET_ASM_OUTPUT_MI_THUNK arm_output_mi_thunk
228 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
229 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
231 /* This will be overridden in arm_override_options. */
232 #undef TARGET_RTX_COSTS
233 #define TARGET_RTX_COSTS arm_slowmul_rtx_costs
234 #undef TARGET_ADDRESS_COST
235 #define TARGET_ADDRESS_COST arm_address_cost
237 #undef TARGET_MACHINE_DEPENDENT_REORG
238 #define TARGET_MACHINE_DEPENDENT_REORG arm_reorg
240 #undef TARGET_INIT_BUILTINS
241 #define TARGET_INIT_BUILTINS arm_init_builtins
242 #undef TARGET_EXPAND_BUILTIN
243 #define TARGET_EXPAND_BUILTIN arm_expand_builtin
245 #undef TARGET_PROMOTE_FUNCTION_ARGS
246 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
247 #undef TARGET_PROMOTE_FUNCTION_RETURN
248 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
249 #undef TARGET_PROMOTE_PROTOTYPES
250 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_false
252 #undef TARGET_STRUCT_VALUE_RTX
253 #define TARGET_STRUCT_VALUE_RTX arm_struct_value_rtx
255 #undef TARGET_SETUP_INCOMING_VARARGS
256 #define TARGET_SETUP_INCOMING_VARARGS arm_setup_incoming_varargs
258 struct gcc_target targetm = TARGET_INITIALIZER;
260 /* Obstack for minipool constant handling. */
261 static struct obstack minipool_obstack;
262 static char * minipool_startobj;
264 /* The maximum number of insns skipped which
265 will be conditionalised if possible. */
266 static int max_insns_skipped = 5;
268 extern FILE * asm_out_file;
270 /* True if we are currently building a constant table. */
271 int making_const_table;
273 /* Define the information needed to generate branch insns. This is
274 stored from the compare operation. */
275 rtx arm_compare_op0, arm_compare_op1;
277 /* The processor for which instructions should be scheduled. */
278 enum processor_type arm_tune = arm_none;
280 /* Which floating point model to use. */
281 enum arm_fp_model arm_fp_model;
283 /* Which floating point hardware is available. */
284 enum fputype arm_fpu_arch;
286 /* Which floating point hardware to schedule for. */
287 enum fputype arm_fpu_tune;
289 /* Whether to use floating point hardware. */
290 enum float_abi_type arm_float_abi;
292 /* Which ABI to use. */
293 enum arm_abi_type arm_abi;
295 /* What program mode is the cpu running in? 26-bit mode or 32-bit mode. */
296 enum prog_mode_type arm_prgmode;
298 /* Set by the -mfpu=... option. */
299 const char * target_fpu_name = NULL;
301 /* Set by the -mfpe=... option. */
302 const char * target_fpe_name = NULL;
304 /* Set by the -mfloat-abi=... option. */
305 const char * target_float_abi_name = NULL;
307 /* Set by the -mabi=... option. */
308 const char * target_abi_name = NULL;
310 /* Used to parse -mstructure_size_boundary command line option. */
311 const char * structure_size_string = NULL;
312 int arm_structure_size_boundary = DEFAULT_STRUCTURE_SIZE_BOUNDARY;
314 /* Bit values used to identify processor capabilities. */
315 #define FL_CO_PROC (1 << 0) /* Has external co-processor bus */
316 #define FL_ARCH3M (1 << 1) /* Extended multiply */
317 #define FL_MODE26 (1 << 2) /* 26-bit mode support */
318 #define FL_MODE32 (1 << 3) /* 32-bit mode support */
319 #define FL_ARCH4 (1 << 4) /* Architecture rel 4 */
320 #define FL_ARCH5 (1 << 5) /* Architecture rel 5 */
321 #define FL_THUMB (1 << 6) /* Thumb aware */
322 #define FL_LDSCHED (1 << 7) /* Load scheduling necessary */
323 #define FL_STRONG (1 << 8) /* StrongARM */
324 #define FL_ARCH5E (1 << 9) /* DSP extensions to v5 */
325 #define FL_XSCALE (1 << 10) /* XScale */
326 #define FL_CIRRUS (1 << 11) /* Cirrus/DSP. */
327 #define FL_ARCH6 (1 << 12) /* Architecture rel 6. Adds
328 media instructions. */
329 #define FL_VFPV2 (1 << 13) /* Vector Floating Point V2. */
331 #define FL_IWMMXT (1 << 29) /* XScale v2 or "Intel Wireless MMX technology". */
333 /* The bits in this mask specify which
334 instructions we are allowed to generate. */
335 static unsigned long insn_flags = 0;
337 /* The bits in this mask specify which instruction scheduling options should
338 be used. */
339 static unsigned long tune_flags = 0;
341 /* The following are used in the arm.md file as equivalents to bits
342 in the above two flag variables. */
344 /* Nonzero if this chip supports the ARM Architecture 3M extensions. */
345 int arm_arch3m = 0;
347 /* Nonzero if this chip supports the ARM Architecture 4 extensions. */
348 int arm_arch4 = 0;
350 /* Nonzero if this chip supports the ARM Architecture 5 extensions. */
351 int arm_arch5 = 0;
353 /* Nonzero if this chip supports the ARM Architecture 5E extensions. */
354 int arm_arch5e = 0;
356 /* Nonzero if this chip supports the ARM Architecture 6 extensions. */
357 int arm_arch6 = 0;
359 /* Nonzero if this chip can benefit from load scheduling. */
360 int arm_ld_sched = 0;
362 /* Nonzero if this chip is a StrongARM. */
363 int arm_is_strong = 0;
365 /* Nonzero if this chip supports Intel Wireless MMX technology. */
366 int arm_arch_iwmmxt = 0;
368 /* Nonzero if this chip is an XScale. */
369 int arm_arch_xscale = 0;
371 /* Nonzero if tuning for XScale */
372 int arm_tune_xscale = 0;
374 /* Nonzero if this chip is an ARM6 or an ARM7. */
375 int arm_is_6_or_7 = 0;
377 /* Nonzero if generating Thumb instructions. */
378 int thumb_code = 0;
380 /* In case of a PRE_INC, POST_INC, PRE_DEC, POST_DEC memory reference, we
381 must report the mode of the memory reference from PRINT_OPERAND to
382 PRINT_OPERAND_ADDRESS. */
383 enum machine_mode output_memory_reference_mode;
385 /* The register number to be used for the PIC offset register. */
386 const char * arm_pic_register_string = NULL;
387 int arm_pic_register = INVALID_REGNUM;
389 /* Set to 1 when a return insn is output, this means that the epilogue
390 is not needed. */
391 int return_used_this_function;
393 /* Set to 1 after arm_reorg has started. Reset to start at the start of
394 the next function. */
395 static int after_arm_reorg = 0;
397 /* The maximum number of insns to be used when loading a constant. */
398 static int arm_constant_limit = 3;
400 /* For an explanation of these variables, see final_prescan_insn below. */
401 int arm_ccfsm_state;
402 enum arm_cond_code arm_current_cc;
403 rtx arm_target_insn;
404 int arm_target_label;
406 /* The condition codes of the ARM, and the inverse function. */
407 static const char * const arm_condition_codes[] =
409 "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc",
410 "hi", "ls", "ge", "lt", "gt", "le", "al", "nv"
413 #define streq(string1, string2) (strcmp (string1, string2) == 0)
415 /* Initialization code. */
417 struct processors
419 const char *const name;
420 enum processor_type core;
421 const unsigned long flags;
422 bool (* rtx_costs) (rtx, int, int, int *);
425 /* Not all of these give usefully different compilation alternatives,
426 but there is no simple way of generalizing them. */
427 static const struct processors all_cores[] =
429 /* ARM Cores */
430 #define ARM_CORE(NAME, FLAGS, COSTS) \
431 {#NAME, arm_none, FLAGS, arm_##COSTS##_rtx_costs},
432 #include "arm-cores.def"
433 #undef ARM_CORE
434 {NULL, arm_none, 0, NULL}
437 static const struct processors all_architectures[] =
439 /* ARM Architectures */
440 /* We don't specify rtx_costs here as it will be figured out
441 from the core. */
443 { "armv2", arm2, FL_CO_PROC | FL_MODE26 , NULL},
444 { "armv2a", arm2, FL_CO_PROC | FL_MODE26 , NULL},
445 { "armv3", arm6, FL_CO_PROC | FL_MODE26 | FL_MODE32 , NULL},
446 { "armv3m", arm7m, FL_CO_PROC | FL_MODE26 | FL_MODE32 | FL_ARCH3M , NULL},
447 { "armv4", arm7tdmi, FL_CO_PROC | FL_MODE26 | FL_MODE32 | FL_ARCH3M | FL_ARCH4 , NULL},
448 /* Strictly, FL_MODE26 is a permitted option for v4t, but there are no
449 implementations that support it, so we will leave it out for now. */
450 { "armv4t", arm7tdmi, FL_CO_PROC | FL_MODE32 | FL_ARCH3M | FL_ARCH4 | FL_THUMB , NULL},
451 { "armv5", arm10tdmi, FL_CO_PROC | FL_MODE32 | FL_ARCH3M | FL_ARCH4 | FL_THUMB | FL_ARCH5 , NULL},
452 { "armv5t", arm10tdmi, FL_CO_PROC | FL_MODE32 | FL_ARCH3M | FL_ARCH4 | FL_THUMB | FL_ARCH5 , NULL},
453 { "armv5te", arm1026ejs, FL_CO_PROC | FL_MODE32 | FL_ARCH3M | FL_ARCH4 | FL_THUMB | FL_ARCH5 | FL_ARCH5E , NULL},
454 { "armv6", arm1136js, FL_CO_PROC | FL_MODE32 | FL_ARCH3M | FL_ARCH4 | FL_THUMB | FL_ARCH5 | FL_ARCH5E | FL_ARCH6 , NULL},
455 { "armv6j", arm1136js, FL_CO_PROC | FL_MODE32 | FL_ARCH3M | FL_ARCH4 | FL_THUMB | FL_ARCH5 | FL_ARCH5E | FL_ARCH6 , NULL},
456 { "ep9312", ep9312, FL_MODE32 | FL_ARCH3M | FL_ARCH4 | FL_LDSCHED | FL_CIRRUS , NULL},
457 {"iwmmxt", iwmmxt, FL_MODE32 | FL_ARCH3M | FL_ARCH4 | FL_THUMB | FL_LDSCHED | FL_STRONG | FL_ARCH5 | FL_ARCH5E | FL_XSCALE | FL_IWMMXT , NULL},
458 { NULL, arm_none, 0 , NULL}
461 /* This is a magic structure. The 'string' field is magically filled in
462 with a pointer to the value specified by the user on the command line
463 assuming that the user has specified such a value. */
465 struct arm_cpu_select arm_select[] =
467 /* string name processors */
468 { NULL, "-mcpu=", all_cores },
469 { NULL, "-march=", all_architectures },
470 { NULL, "-mtune=", all_cores }
473 struct fpu_desc
475 const char * name;
476 enum fputype fpu;
480 /* Available values for for -mfpu=. */
482 static const struct fpu_desc all_fpus[] =
484 {"fpa", FPUTYPE_FPA},
485 {"fpe2", FPUTYPE_FPA_EMU2},
486 {"fpe3", FPUTYPE_FPA_EMU2},
487 {"maverick", FPUTYPE_MAVERICK},
488 {"vfp", FPUTYPE_VFP}
492 /* Floating point models used by the different hardware.
493 See fputype in arm.h. */
495 static const enum fputype fp_model_for_fpu[] =
497 /* No FP hardware. */
498 ARM_FP_MODEL_UNKNOWN, /* FPUTYPE_NONE */
499 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA */
500 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA_EMU2 */
501 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA_EMU3 */
502 ARM_FP_MODEL_MAVERICK, /* FPUTYPE_MAVERICK */
503 ARM_FP_MODEL_VFP /* FPUTYPE_VFP */
507 struct float_abi
509 const char * name;
510 enum float_abi_type abi_type;
514 /* Available values for -mfloat-abi=. */
516 static const struct float_abi all_float_abis[] =
518 {"soft", ARM_FLOAT_ABI_SOFT},
519 {"softfp", ARM_FLOAT_ABI_SOFTFP},
520 {"hard", ARM_FLOAT_ABI_HARD}
524 struct abi_name
526 const char *name;
527 enum arm_abi_type abi_type;
531 /* Available values for -mabi=. */
533 static const struct abi_name arm_all_abis[] =
535 {"apcs-gnu", ARM_ABI_APCS},
536 {"atpcs", ARM_ABI_ATPCS},
537 {"aapcs", ARM_ABI_AAPCS},
538 {"iwmmxt", ARM_ABI_IWMMXT}
541 /* Return the number of bits set in VALUE. */
542 static unsigned
543 bit_count (unsigned long value)
545 unsigned long count = 0;
547 while (value)
549 count++;
550 value &= value - 1; /* Clear the least-significant set bit. */
553 return count;
556 /* Fix up any incompatible options that the user has specified.
557 This has now turned into a maze. */
558 void
559 arm_override_options (void)
561 unsigned i;
563 /* Set up the flags based on the cpu/architecture selected by the user. */
564 for (i = ARRAY_SIZE (arm_select); i--;)
566 struct arm_cpu_select * ptr = arm_select + i;
568 if (ptr->string != NULL && ptr->string[0] != '\0')
570 const struct processors * sel;
572 for (sel = ptr->processors; sel->name != NULL; sel++)
573 if (streq (ptr->string, sel->name))
575 /* Determine the processor core for which we should
576 tune code-generation. */
577 if (/* -mcpu= is a sensible default. */
578 i == 0
579 /* If -march= is used, and -mcpu= has not been used,
580 assume that we should tune for a representative
581 CPU from that architecture. */
582 || i == 1
583 /* -mtune= overrides -mcpu= and -march=. */
584 || i == 2)
585 arm_tune = (enum processor_type) (sel - ptr->processors);
587 if (i != 2)
589 /* If we have been given an architecture and a processor
590 make sure that they are compatible. We only generate
591 a warning though, and we prefer the CPU over the
592 architecture. */
593 if (insn_flags != 0 && (insn_flags ^ sel->flags))
594 warning ("switch -mcpu=%s conflicts with -march= switch",
595 ptr->string);
597 insn_flags = sel->flags;
600 break;
603 if (sel->name == NULL)
604 error ("bad value (%s) for %s switch", ptr->string, ptr->name);
608 /* If the user did not specify a processor, choose one for them. */
609 if (insn_flags == 0)
611 const struct processors * sel;
612 unsigned int sought;
613 static const struct cpu_default
615 const int cpu;
616 const char *const name;
618 cpu_defaults[] =
620 { TARGET_CPU_arm2, "arm2" },
621 { TARGET_CPU_arm6, "arm6" },
622 { TARGET_CPU_arm610, "arm610" },
623 { TARGET_CPU_arm710, "arm710" },
624 { TARGET_CPU_arm7m, "arm7m" },
625 { TARGET_CPU_arm7500fe, "arm7500fe" },
626 { TARGET_CPU_arm7tdmi, "arm7tdmi" },
627 { TARGET_CPU_arm8, "arm8" },
628 { TARGET_CPU_arm810, "arm810" },
629 { TARGET_CPU_arm9, "arm9" },
630 { TARGET_CPU_strongarm, "strongarm" },
631 { TARGET_CPU_xscale, "xscale" },
632 { TARGET_CPU_ep9312, "ep9312" },
633 { TARGET_CPU_iwmmxt, "iwmmxt" },
634 { TARGET_CPU_arm926ejs, "arm926ejs" },
635 { TARGET_CPU_arm1026ejs, "arm1026ejs" },
636 { TARGET_CPU_arm1136js, "arm1136js" },
637 { TARGET_CPU_arm1136jfs, "arm1136jfs" },
638 { TARGET_CPU_generic, "arm" },
639 { 0, 0 }
641 const struct cpu_default * def;
643 /* Find the default. */
644 for (def = cpu_defaults; def->name; def++)
645 if (def->cpu == TARGET_CPU_DEFAULT)
646 break;
648 /* Make sure we found the default CPU. */
649 if (def->name == NULL)
650 abort ();
652 /* Find the default CPU's flags. */
653 for (sel = all_cores; sel->name != NULL; sel++)
654 if (streq (def->name, sel->name))
655 break;
657 if (sel->name == NULL)
658 abort ();
660 insn_flags = sel->flags;
662 /* Now check to see if the user has specified some command line
663 switch that require certain abilities from the cpu. */
664 sought = 0;
666 if (TARGET_INTERWORK || TARGET_THUMB)
668 sought |= (FL_THUMB | FL_MODE32);
670 /* Force apcs-32 to be used for interworking. */
671 target_flags |= ARM_FLAG_APCS_32;
673 /* There are no ARM processors that support both APCS-26 and
674 interworking. Therefore we force FL_MODE26 to be removed
675 from insn_flags here (if it was set), so that the search
676 below will always be able to find a compatible processor. */
677 insn_flags &= ~FL_MODE26;
679 else if (!TARGET_APCS_32)
680 sought |= FL_MODE26;
682 if (sought != 0 && ((sought & insn_flags) != sought))
684 /* Try to locate a CPU type that supports all of the abilities
685 of the default CPU, plus the extra abilities requested by
686 the user. */
687 for (sel = all_cores; sel->name != NULL; sel++)
688 if ((sel->flags & sought) == (sought | insn_flags))
689 break;
691 if (sel->name == NULL)
693 unsigned current_bit_count = 0;
694 const struct processors * best_fit = NULL;
696 /* Ideally we would like to issue an error message here
697 saying that it was not possible to find a CPU compatible
698 with the default CPU, but which also supports the command
699 line options specified by the programmer, and so they
700 ought to use the -mcpu=<name> command line option to
701 override the default CPU type.
703 Unfortunately this does not work with multilibing. We
704 need to be able to support multilibs for -mapcs-26 and for
705 -mthumb-interwork and there is no CPU that can support both
706 options. Instead if we cannot find a cpu that has both the
707 characteristics of the default cpu and the given command line
708 options we scan the array again looking for a best match. */
709 for (sel = all_cores; sel->name != NULL; sel++)
710 if ((sel->flags & sought) == sought)
712 unsigned count;
714 count = bit_count (sel->flags & insn_flags);
716 if (count >= current_bit_count)
718 best_fit = sel;
719 current_bit_count = count;
723 if (best_fit == NULL)
724 abort ();
725 else
726 sel = best_fit;
729 insn_flags = sel->flags;
731 if (arm_tune == arm_none)
732 arm_tune = (enum processor_type) (sel - all_cores);
735 /* The processor for which we should tune should now have been
736 chosen. */
737 if (arm_tune == arm_none)
738 abort ();
740 tune_flags = all_cores[(int)arm_tune].flags;
741 targetm.rtx_costs = all_cores[(int)arm_tune].rtx_costs;
743 /* Make sure that the processor choice does not conflict with any of the
744 other command line choices. */
745 if (TARGET_APCS_32 && !(insn_flags & FL_MODE32))
747 /* If APCS-32 was not the default then it must have been set by the
748 user, so issue a warning message. If the user has specified
749 "-mapcs-32 -mcpu=arm2" then we loose here. */
750 if ((TARGET_DEFAULT & ARM_FLAG_APCS_32) == 0)
751 warning ("target CPU does not support APCS-32" );
752 target_flags &= ~ARM_FLAG_APCS_32;
754 else if (!TARGET_APCS_32 && !(insn_flags & FL_MODE26))
756 warning ("target CPU does not support APCS-26" );
757 target_flags |= ARM_FLAG_APCS_32;
760 if (TARGET_INTERWORK && !(insn_flags & FL_THUMB))
762 warning ("target CPU does not support interworking" );
763 target_flags &= ~ARM_FLAG_INTERWORK;
766 if (TARGET_THUMB && !(insn_flags & FL_THUMB))
768 warning ("target CPU does not support THUMB instructions");
769 target_flags &= ~ARM_FLAG_THUMB;
772 if (TARGET_APCS_FRAME && TARGET_THUMB)
774 /* warning ("ignoring -mapcs-frame because -mthumb was used"); */
775 target_flags &= ~ARM_FLAG_APCS_FRAME;
778 /* TARGET_BACKTRACE calls leaf_function_p, which causes a crash if done
779 from here where no function is being compiled currently. */
780 if ((target_flags & (THUMB_FLAG_LEAF_BACKTRACE | THUMB_FLAG_BACKTRACE))
781 && TARGET_ARM)
782 warning ("enabling backtrace support is only meaningful when compiling for the Thumb");
784 if (TARGET_ARM && TARGET_CALLEE_INTERWORKING)
785 warning ("enabling callee interworking support is only meaningful when compiling for the Thumb");
787 if (TARGET_ARM && TARGET_CALLER_INTERWORKING)
788 warning ("enabling caller interworking support is only meaningful when compiling for the Thumb");
790 /* If interworking is enabled then APCS-32 must be selected as well. */
791 if (TARGET_INTERWORK)
793 if (!TARGET_APCS_32)
794 warning ("interworking forces APCS-32 to be used" );
795 target_flags |= ARM_FLAG_APCS_32;
798 if (TARGET_APCS_STACK && !TARGET_APCS_FRAME)
800 warning ("-mapcs-stack-check incompatible with -mno-apcs-frame");
801 target_flags |= ARM_FLAG_APCS_FRAME;
804 if (TARGET_POKE_FUNCTION_NAME)
805 target_flags |= ARM_FLAG_APCS_FRAME;
807 if (TARGET_APCS_REENT && flag_pic)
808 error ("-fpic and -mapcs-reent are incompatible");
810 if (TARGET_APCS_REENT)
811 warning ("APCS reentrant code not supported. Ignored");
813 /* If this target is normally configured to use APCS frames, warn if they
814 are turned off and debugging is turned on. */
815 if (TARGET_ARM
816 && write_symbols != NO_DEBUG
817 && !TARGET_APCS_FRAME
818 && (TARGET_DEFAULT & ARM_FLAG_APCS_FRAME))
819 warning ("-g with -mno-apcs-frame may not give sensible debugging");
821 /* If stack checking is disabled, we can use r10 as the PIC register,
822 which keeps r9 available. */
823 if (flag_pic)
824 arm_pic_register = TARGET_APCS_STACK ? 9 : 10;
826 if (TARGET_APCS_FLOAT)
827 warning ("passing floating point arguments in fp regs not yet supported");
829 /* Initialize boolean versions of the flags, for use in the arm.md file. */
830 arm_arch3m = (insn_flags & FL_ARCH3M) != 0;
831 arm_arch4 = (insn_flags & FL_ARCH4) != 0;
832 arm_arch5 = (insn_flags & FL_ARCH5) != 0;
833 arm_arch5e = (insn_flags & FL_ARCH5E) != 0;
834 arm_arch6 = (insn_flags & FL_ARCH6) != 0;
835 arm_arch_xscale = (insn_flags & FL_XSCALE) != 0;
837 arm_ld_sched = (tune_flags & FL_LDSCHED) != 0;
838 arm_is_strong = (tune_flags & FL_STRONG) != 0;
839 thumb_code = (TARGET_ARM == 0);
840 arm_is_6_or_7 = (((tune_flags & (FL_MODE26 | FL_MODE32))
841 && !(tune_flags & FL_ARCH4))) != 0;
842 arm_tune_xscale = (tune_flags & FL_XSCALE) != 0;
843 arm_arch_iwmmxt = (insn_flags & FL_IWMMXT) != 0;
845 if (target_abi_name)
847 for (i = 0; i < ARRAY_SIZE (arm_all_abis); i++)
849 if (streq (arm_all_abis[i].name, target_abi_name))
851 arm_abi = arm_all_abis[i].abi_type;
852 break;
855 if (i == ARRAY_SIZE (arm_all_abis))
856 error ("invalid ABI option: -mabi=%s", target_abi_name);
858 else
859 arm_abi = ARM_DEFAULT_ABI;
861 if (TARGET_IWMMXT && !ARM_DOUBLEWORD_ALIGN)
862 error ("iwmmxt requires an AAPCS compatible ABI for proper operation");
864 if (TARGET_IWMMXT_ABI && !TARGET_IWMMXT)
865 error ("iwmmxt abi requires an iwmmxt capable cpu");
867 arm_fp_model = ARM_FP_MODEL_UNKNOWN;
868 if (target_fpu_name == NULL && target_fpe_name != NULL)
870 if (streq (target_fpe_name, "2"))
871 target_fpu_name = "fpe2";
872 else if (streq (target_fpe_name, "3"))
873 target_fpu_name = "fpe3";
874 else
875 error ("invalid floating point emulation option: -mfpe=%s",
876 target_fpe_name);
878 if (target_fpu_name != NULL)
880 /* The user specified a FPU. */
881 for (i = 0; i < ARRAY_SIZE (all_fpus); i++)
883 if (streq (all_fpus[i].name, target_fpu_name))
885 arm_fpu_arch = all_fpus[i].fpu;
886 arm_fpu_tune = arm_fpu_arch;
887 arm_fp_model = fp_model_for_fpu[arm_fpu_arch];
888 break;
891 if (arm_fp_model == ARM_FP_MODEL_UNKNOWN)
892 error ("invalid floating point option: -mfpu=%s", target_fpu_name);
894 else
896 #ifdef FPUTYPE_DEFAULT
897 /* Use the default is it is specified for this platform. */
898 arm_fpu_arch = FPUTYPE_DEFAULT;
899 arm_fpu_tune = FPUTYPE_DEFAULT;
900 #else
901 /* Pick one based on CPU type. */
902 if ((insn_flags & FL_VFP) != 0)
903 arm_fpu_arch = FPUTYPE_VFP;
904 else if (insn_flags & FL_CIRRUS)
905 arm_fpu_arch = FPUTYPE_MAVERICK;
906 else
907 arm_fpu_arch = FPUTYPE_FPA_EMU2;
908 #endif
909 if (tune_flags & FL_CO_PROC && arm_fpu_arch == FPUTYPE_FPA_EMU2)
910 arm_fpu_tune = FPUTYPE_FPA;
911 else
912 arm_fpu_tune = arm_fpu_arch;
913 arm_fp_model = fp_model_for_fpu[arm_fpu_arch];
914 if (arm_fp_model == ARM_FP_MODEL_UNKNOWN)
915 abort ();
918 if (target_float_abi_name != NULL)
920 /* The user specified a FP ABI. */
921 for (i = 0; i < ARRAY_SIZE (all_float_abis); i++)
923 if (streq (all_float_abis[i].name, target_float_abi_name))
925 arm_float_abi = all_float_abis[i].abi_type;
926 break;
929 if (i == ARRAY_SIZE (all_float_abis))
930 error ("invalid floating point abi: -mfloat-abi=%s",
931 target_float_abi_name);
933 else
935 /* Use soft-float target flag. */
936 if (target_flags & ARM_FLAG_SOFT_FLOAT)
937 arm_float_abi = ARM_FLOAT_ABI_SOFT;
938 else
939 arm_float_abi = ARM_FLOAT_ABI_HARD;
942 if (arm_float_abi == ARM_FLOAT_ABI_SOFTFP)
943 sorry ("-mfloat-abi=softfp");
944 /* If soft-float is specified then don't use FPU. */
945 if (TARGET_SOFT_FLOAT)
946 arm_fpu_arch = FPUTYPE_NONE;
948 /* For arm2/3 there is no need to do any scheduling if there is only
949 a floating point emulator, or we are doing software floating-point. */
950 if ((TARGET_SOFT_FLOAT
951 || arm_fpu_tune == FPUTYPE_FPA_EMU2
952 || arm_fpu_tune == FPUTYPE_FPA_EMU3)
953 && (tune_flags & FL_MODE32) == 0)
954 flag_schedule_insns = flag_schedule_insns_after_reload = 0;
956 arm_prgmode = TARGET_APCS_32 ? PROG_MODE_PROG32 : PROG_MODE_PROG26;
958 /* Override the default structure alignment for AAPCS ABI. */
959 if (arm_abi == ARM_ABI_AAPCS)
960 arm_structure_size_boundary = 8;
962 if (structure_size_string != NULL)
964 int size = strtol (structure_size_string, NULL, 0);
966 if (size == 8 || size == 32
967 || (ARM_DOUBLEWORD_ALIGN && size == 64))
968 arm_structure_size_boundary = size;
969 else
970 warning ("structure size boundary can only be set to %s",
971 ARM_DOUBLEWORD_ALIGN ? "8, 32 or 64": "8 or 32");
974 if (arm_pic_register_string != NULL)
976 int pic_register = decode_reg_name (arm_pic_register_string);
978 if (!flag_pic)
979 warning ("-mpic-register= is useless without -fpic");
981 /* Prevent the user from choosing an obviously stupid PIC register. */
982 else if (pic_register < 0 || call_used_regs[pic_register]
983 || pic_register == HARD_FRAME_POINTER_REGNUM
984 || pic_register == STACK_POINTER_REGNUM
985 || pic_register >= PC_REGNUM)
986 error ("unable to use '%s' for PIC register", arm_pic_register_string);
987 else
988 arm_pic_register = pic_register;
991 if (TARGET_THUMB && flag_schedule_insns)
993 /* Don't warn since it's on by default in -O2. */
994 flag_schedule_insns = 0;
997 if (optimize_size)
999 /* There's some dispute as to whether this should be 1 or 2. However,
1000 experiments seem to show that in pathological cases a setting of
1001 1 degrades less severely than a setting of 2. This could change if
1002 other parts of the compiler change their behavior. */
1003 arm_constant_limit = 1;
1005 /* If optimizing for size, bump the number of instructions that we
1006 are prepared to conditionally execute (even on a StrongARM). */
1007 max_insns_skipped = 6;
1009 else
1011 /* For processors with load scheduling, it never costs more than
1012 2 cycles to load a constant, and the load scheduler may well
1013 reduce that to 1. */
1014 if (tune_flags & FL_LDSCHED)
1015 arm_constant_limit = 1;
1017 /* On XScale the longer latency of a load makes it more difficult
1018 to achieve a good schedule, so it's faster to synthesize
1019 constants that can be done in two insns. */
1020 if (arm_tune_xscale)
1021 arm_constant_limit = 2;
1023 /* StrongARM has early execution of branches, so a sequence
1024 that is worth skipping is shorter. */
1025 if (arm_is_strong)
1026 max_insns_skipped = 3;
1029 /* Register global variables with the garbage collector. */
1030 arm_add_gc_roots ();
1033 static void
1034 arm_add_gc_roots (void)
1036 gcc_obstack_init(&minipool_obstack);
1037 minipool_startobj = (char *) obstack_alloc (&minipool_obstack, 0);
1040 /* A table of known ARM exception types.
1041 For use with the interrupt function attribute. */
1043 typedef struct
1045 const char *const arg;
1046 const unsigned long return_value;
1048 isr_attribute_arg;
1050 static const isr_attribute_arg isr_attribute_args [] =
1052 { "IRQ", ARM_FT_ISR },
1053 { "irq", ARM_FT_ISR },
1054 { "FIQ", ARM_FT_FIQ },
1055 { "fiq", ARM_FT_FIQ },
1056 { "ABORT", ARM_FT_ISR },
1057 { "abort", ARM_FT_ISR },
1058 { "ABORT", ARM_FT_ISR },
1059 { "abort", ARM_FT_ISR },
1060 { "UNDEF", ARM_FT_EXCEPTION },
1061 { "undef", ARM_FT_EXCEPTION },
1062 { "SWI", ARM_FT_EXCEPTION },
1063 { "swi", ARM_FT_EXCEPTION },
1064 { NULL, ARM_FT_NORMAL }
1067 /* Returns the (interrupt) function type of the current
1068 function, or ARM_FT_UNKNOWN if the type cannot be determined. */
1070 static unsigned long
1071 arm_isr_value (tree argument)
1073 const isr_attribute_arg * ptr;
1074 const char * arg;
1076 /* No argument - default to IRQ. */
1077 if (argument == NULL_TREE)
1078 return ARM_FT_ISR;
1080 /* Get the value of the argument. */
1081 if (TREE_VALUE (argument) == NULL_TREE
1082 || TREE_CODE (TREE_VALUE (argument)) != STRING_CST)
1083 return ARM_FT_UNKNOWN;
1085 arg = TREE_STRING_POINTER (TREE_VALUE (argument));
1087 /* Check it against the list of known arguments. */
1088 for (ptr = isr_attribute_args; ptr->arg != NULL; ptr++)
1089 if (streq (arg, ptr->arg))
1090 return ptr->return_value;
1092 /* An unrecognized interrupt type. */
1093 return ARM_FT_UNKNOWN;
1096 /* Computes the type of the current function. */
1098 static unsigned long
1099 arm_compute_func_type (void)
1101 unsigned long type = ARM_FT_UNKNOWN;
1102 tree a;
1103 tree attr;
1105 if (TREE_CODE (current_function_decl) != FUNCTION_DECL)
1106 abort ();
1108 /* Decide if the current function is volatile. Such functions
1109 never return, and many memory cycles can be saved by not storing
1110 register values that will never be needed again. This optimization
1111 was added to speed up context switching in a kernel application. */
1112 if (optimize > 0
1113 && current_function_nothrow
1114 && TREE_THIS_VOLATILE (current_function_decl))
1115 type |= ARM_FT_VOLATILE;
1117 if (current_function_needs_context)
1118 type |= ARM_FT_NESTED;
1120 attr = DECL_ATTRIBUTES (current_function_decl);
1122 a = lookup_attribute ("naked", attr);
1123 if (a != NULL_TREE)
1124 type |= ARM_FT_NAKED;
1126 if (cfun->machine->eh_epilogue_sp_ofs != NULL_RTX)
1127 type |= ARM_FT_EXCEPTION_HANDLER;
1128 else
1130 a = lookup_attribute ("isr", attr);
1131 if (a == NULL_TREE)
1132 a = lookup_attribute ("interrupt", attr);
1134 if (a == NULL_TREE)
1135 type |= TARGET_INTERWORK ? ARM_FT_INTERWORKED : ARM_FT_NORMAL;
1136 else
1137 type |= arm_isr_value (TREE_VALUE (a));
1140 return type;
1143 /* Returns the type of the current function. */
1145 unsigned long
1146 arm_current_func_type (void)
1148 if (ARM_FUNC_TYPE (cfun->machine->func_type) == ARM_FT_UNKNOWN)
1149 cfun->machine->func_type = arm_compute_func_type ();
1151 return cfun->machine->func_type;
1154 /* Return 1 if it is possible to return using a single instruction.
1155 If SIBLING is non-null, this is a test for a return before a sibling
1156 call. SIBLING is the call insn, so we can examine its register usage. */
1159 use_return_insn (int iscond, rtx sibling)
1161 int regno;
1162 unsigned int func_type;
1163 unsigned long saved_int_regs;
1164 unsigned HOST_WIDE_INT stack_adjust;
1165 arm_stack_offsets *offsets;
1167 /* Never use a return instruction before reload has run. */
1168 if (!reload_completed)
1169 return 0;
1171 func_type = arm_current_func_type ();
1173 /* Naked functions and volatile functions need special
1174 consideration. */
1175 if (func_type & (ARM_FT_VOLATILE | ARM_FT_NAKED))
1176 return 0;
1178 /* So do interrupt functions that use the frame pointer. */
1179 if (IS_INTERRUPT (func_type) && frame_pointer_needed)
1180 return 0;
1182 offsets = arm_get_frame_offsets ();
1183 stack_adjust = offsets->outgoing_args - offsets->saved_regs;
1185 /* As do variadic functions. */
1186 if (current_function_pretend_args_size
1187 || cfun->machine->uses_anonymous_args
1188 /* Or if the function calls __builtin_eh_return () */
1189 || ARM_FUNC_TYPE (func_type) == ARM_FT_EXCEPTION_HANDLER
1190 /* Or if the function calls alloca */
1191 || current_function_calls_alloca
1192 /* Or if there is a stack adjustment. However, if the stack pointer
1193 is saved on the stack, we can use a pre-incrementing stack load. */
1194 || !(stack_adjust == 0 || (frame_pointer_needed && stack_adjust == 4)))
1195 return 0;
1197 saved_int_regs = arm_compute_save_reg_mask ();
1199 /* Unfortunately, the insn
1201 ldmib sp, {..., sp, ...}
1203 triggers a bug on most SA-110 based devices, such that the stack
1204 pointer won't be correctly restored if the instruction takes a
1205 page fault. We work around this problem by popping r3 along with
1206 the other registers, since that is never slower than executing
1207 another instruction.
1209 We test for !arm_arch5 here, because code for any architecture
1210 less than this could potentially be run on one of the buggy
1211 chips. */
1212 if (stack_adjust == 4 && !arm_arch5)
1214 /* Validate that r3 is a call-clobbered register (always true in
1215 the default abi) ... */
1216 if (!call_used_regs[3])
1217 return 0;
1219 /* ... that it isn't being used for a return value (always true
1220 until we implement return-in-regs), or for a tail-call
1221 argument ... */
1222 if (sibling)
1224 if (GET_CODE (sibling) != CALL_INSN)
1225 abort ();
1227 if (find_regno_fusage (sibling, USE, 3))
1228 return 0;
1231 /* ... and that there are no call-saved registers in r0-r2
1232 (always true in the default ABI). */
1233 if (saved_int_regs & 0x7)
1234 return 0;
1237 /* Can't be done if interworking with Thumb, and any registers have been
1238 stacked. */
1239 if (TARGET_INTERWORK && saved_int_regs != 0)
1240 return 0;
1242 /* On StrongARM, conditional returns are expensive if they aren't
1243 taken and multiple registers have been stacked. */
1244 if (iscond && arm_is_strong)
1246 /* Conditional return when just the LR is stored is a simple
1247 conditional-load instruction, that's not expensive. */
1248 if (saved_int_regs != 0 && saved_int_regs != (1 << LR_REGNUM))
1249 return 0;
1251 if (flag_pic && regs_ever_live[PIC_OFFSET_TABLE_REGNUM])
1252 return 0;
1255 /* If there are saved registers but the LR isn't saved, then we need
1256 two instructions for the return. */
1257 if (saved_int_regs && !(saved_int_regs & (1 << LR_REGNUM)))
1258 return 0;
1260 /* Can't be done if any of the FPA regs are pushed,
1261 since this also requires an insn. */
1262 if (TARGET_HARD_FLOAT && TARGET_FPA)
1263 for (regno = FIRST_FPA_REGNUM; regno <= LAST_FPA_REGNUM; regno++)
1264 if (regs_ever_live[regno] && !call_used_regs[regno])
1265 return 0;
1267 /* Likewise VFP regs. */
1268 if (TARGET_HARD_FLOAT && TARGET_VFP)
1269 for (regno = FIRST_VFP_REGNUM; regno <= LAST_VFP_REGNUM; regno++)
1270 if (regs_ever_live[regno] && !call_used_regs[regno])
1271 return 0;
1273 if (TARGET_REALLY_IWMMXT)
1274 for (regno = FIRST_IWMMXT_REGNUM; regno <= LAST_IWMMXT_REGNUM; regno++)
1275 if (regs_ever_live[regno] && ! call_used_regs [regno])
1276 return 0;
1278 return 1;
1281 /* Return TRUE if int I is a valid immediate ARM constant. */
1284 const_ok_for_arm (HOST_WIDE_INT i)
1286 unsigned HOST_WIDE_INT mask = ~(unsigned HOST_WIDE_INT)0xFF;
1288 /* For machines with >32 bit HOST_WIDE_INT, the bits above bit 31 must
1289 be all zero, or all one. */
1290 if ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff) != 0
1291 && ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff)
1292 != ((~(unsigned HOST_WIDE_INT) 0)
1293 & ~(unsigned HOST_WIDE_INT) 0xffffffff)))
1294 return FALSE;
1296 /* Fast return for 0 and powers of 2 */
1297 if ((i & (i - 1)) == 0)
1298 return TRUE;
1302 if ((i & mask & (unsigned HOST_WIDE_INT) 0xffffffff) == 0)
1303 return TRUE;
1304 mask =
1305 (mask << 2) | ((mask & (unsigned HOST_WIDE_INT) 0xffffffff)
1306 >> (32 - 2)) | ~(unsigned HOST_WIDE_INT) 0xffffffff;
1308 while (mask != ~(unsigned HOST_WIDE_INT) 0xFF);
1310 return FALSE;
1313 /* Return true if I is a valid constant for the operation CODE. */
1314 static int
1315 const_ok_for_op (HOST_WIDE_INT i, enum rtx_code code)
1317 if (const_ok_for_arm (i))
1318 return 1;
1320 switch (code)
1322 case PLUS:
1323 return const_ok_for_arm (ARM_SIGN_EXTEND (-i));
1325 case MINUS: /* Should only occur with (MINUS I reg) => rsb */
1326 case XOR:
1327 case IOR:
1328 return 0;
1330 case AND:
1331 return const_ok_for_arm (ARM_SIGN_EXTEND (~i));
1333 default:
1334 abort ();
1338 /* Emit a sequence of insns to handle a large constant.
1339 CODE is the code of the operation required, it can be any of SET, PLUS,
1340 IOR, AND, XOR, MINUS;
1341 MODE is the mode in which the operation is being performed;
1342 VAL is the integer to operate on;
1343 SOURCE is the other operand (a register, or a null-pointer for SET);
1344 SUBTARGETS means it is safe to create scratch registers if that will
1345 either produce a simpler sequence, or we will want to cse the values.
1346 Return value is the number of insns emitted. */
1349 arm_split_constant (enum rtx_code code, enum machine_mode mode, rtx insn,
1350 HOST_WIDE_INT val, rtx target, rtx source, int subtargets)
1352 rtx cond;
1354 if (insn && GET_CODE (PATTERN (insn)) == COND_EXEC)
1355 cond = COND_EXEC_TEST (PATTERN (insn));
1356 else
1357 cond = NULL_RTX;
1359 if (subtargets || code == SET
1360 || (GET_CODE (target) == REG && GET_CODE (source) == REG
1361 && REGNO (target) != REGNO (source)))
1363 /* After arm_reorg has been called, we can't fix up expensive
1364 constants by pushing them into memory so we must synthesize
1365 them in-line, regardless of the cost. This is only likely to
1366 be more costly on chips that have load delay slots and we are
1367 compiling without running the scheduler (so no splitting
1368 occurred before the final instruction emission).
1370 Ref: gcc -O1 -mcpu=strongarm gcc.c-torture/compile/980506-2.c
1372 if (!after_arm_reorg
1373 && !cond
1374 && (arm_gen_constant (code, mode, NULL_RTX, val, target, source,
1375 1, 0)
1376 > arm_constant_limit + (code != SET)))
1378 if (code == SET)
1380 /* Currently SET is the only monadic value for CODE, all
1381 the rest are diadic. */
1382 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (val)));
1383 return 1;
1385 else
1387 rtx temp = subtargets ? gen_reg_rtx (mode) : target;
1389 emit_insn (gen_rtx_SET (VOIDmode, temp, GEN_INT (val)));
1390 /* For MINUS, the value is subtracted from, since we never
1391 have subtraction of a constant. */
1392 if (code == MINUS)
1393 emit_insn (gen_rtx_SET (VOIDmode, target,
1394 gen_rtx_MINUS (mode, temp, source)));
1395 else
1396 emit_insn (gen_rtx_SET (VOIDmode, target,
1397 gen_rtx_fmt_ee (code, mode, source, temp)));
1398 return 2;
1403 return arm_gen_constant (code, mode, cond, val, target, source, subtargets,
1407 static int
1408 count_insns_for_constant (HOST_WIDE_INT remainder, int i)
1410 HOST_WIDE_INT temp1;
1411 int num_insns = 0;
1414 int end;
1416 if (i <= 0)
1417 i += 32;
1418 if (remainder & (3 << (i - 2)))
1420 end = i - 8;
1421 if (end < 0)
1422 end += 32;
1423 temp1 = remainder & ((0x0ff << end)
1424 | ((i < end) ? (0xff >> (32 - end)) : 0));
1425 remainder &= ~temp1;
1426 num_insns++;
1427 i -= 6;
1429 i -= 2;
1430 } while (remainder);
1431 return num_insns;
1434 /* Emit an instruction with the indicated PATTERN. If COND is
1435 non-NULL, conditionalize the execution of the instruction on COND
1436 being true. */
1438 static void
1439 emit_constant_insn (rtx cond, rtx pattern)
1441 if (cond)
1442 pattern = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (cond), pattern);
1443 emit_insn (pattern);
1446 /* As above, but extra parameter GENERATE which, if clear, suppresses
1447 RTL generation. */
1449 static int
1450 arm_gen_constant (enum rtx_code code, enum machine_mode mode, rtx cond,
1451 HOST_WIDE_INT val, rtx target, rtx source, int subtargets,
1452 int generate)
1454 int can_invert = 0;
1455 int can_negate = 0;
1456 int can_negate_initial = 0;
1457 int can_shift = 0;
1458 int i;
1459 int num_bits_set = 0;
1460 int set_sign_bit_copies = 0;
1461 int clear_sign_bit_copies = 0;
1462 int clear_zero_bit_copies = 0;
1463 int set_zero_bit_copies = 0;
1464 int insns = 0;
1465 unsigned HOST_WIDE_INT temp1, temp2;
1466 unsigned HOST_WIDE_INT remainder = val & 0xffffffff;
1468 /* Find out which operations are safe for a given CODE. Also do a quick
1469 check for degenerate cases; these can occur when DImode operations
1470 are split. */
1471 switch (code)
1473 case SET:
1474 can_invert = 1;
1475 can_shift = 1;
1476 can_negate = 1;
1477 break;
1479 case PLUS:
1480 can_negate = 1;
1481 can_negate_initial = 1;
1482 break;
1484 case IOR:
1485 if (remainder == 0xffffffff)
1487 if (generate)
1488 emit_constant_insn (cond,
1489 gen_rtx_SET (VOIDmode, target,
1490 GEN_INT (ARM_SIGN_EXTEND (val))));
1491 return 1;
1493 if (remainder == 0)
1495 if (reload_completed && rtx_equal_p (target, source))
1496 return 0;
1497 if (generate)
1498 emit_constant_insn (cond,
1499 gen_rtx_SET (VOIDmode, target, source));
1500 return 1;
1502 break;
1504 case AND:
1505 if (remainder == 0)
1507 if (generate)
1508 emit_constant_insn (cond,
1509 gen_rtx_SET (VOIDmode, target, const0_rtx));
1510 return 1;
1512 if (remainder == 0xffffffff)
1514 if (reload_completed && rtx_equal_p (target, source))
1515 return 0;
1516 if (generate)
1517 emit_constant_insn (cond,
1518 gen_rtx_SET (VOIDmode, target, source));
1519 return 1;
1521 can_invert = 1;
1522 break;
1524 case XOR:
1525 if (remainder == 0)
1527 if (reload_completed && rtx_equal_p (target, source))
1528 return 0;
1529 if (generate)
1530 emit_constant_insn (cond,
1531 gen_rtx_SET (VOIDmode, target, source));
1532 return 1;
1534 if (remainder == 0xffffffff)
1536 if (generate)
1537 emit_constant_insn (cond,
1538 gen_rtx_SET (VOIDmode, target,
1539 gen_rtx_NOT (mode, source)));
1540 return 1;
1543 /* We don't know how to handle this yet below. */
1544 abort ();
1546 case MINUS:
1547 /* We treat MINUS as (val - source), since (source - val) is always
1548 passed as (source + (-val)). */
1549 if (remainder == 0)
1551 if (generate)
1552 emit_constant_insn (cond,
1553 gen_rtx_SET (VOIDmode, target,
1554 gen_rtx_NEG (mode, source)));
1555 return 1;
1557 if (const_ok_for_arm (val))
1559 if (generate)
1560 emit_constant_insn (cond,
1561 gen_rtx_SET (VOIDmode, target,
1562 gen_rtx_MINUS (mode, GEN_INT (val),
1563 source)));
1564 return 1;
1566 can_negate = 1;
1568 break;
1570 default:
1571 abort ();
1574 /* If we can do it in one insn get out quickly. */
1575 if (const_ok_for_arm (val)
1576 || (can_negate_initial && const_ok_for_arm (-val))
1577 || (can_invert && const_ok_for_arm (~val)))
1579 if (generate)
1580 emit_constant_insn (cond,
1581 gen_rtx_SET (VOIDmode, target,
1582 (source
1583 ? gen_rtx_fmt_ee (code, mode, source,
1584 GEN_INT (val))
1585 : GEN_INT (val))));
1586 return 1;
1589 /* Calculate a few attributes that may be useful for specific
1590 optimizations. */
1591 for (i = 31; i >= 0; i--)
1593 if ((remainder & (1 << i)) == 0)
1594 clear_sign_bit_copies++;
1595 else
1596 break;
1599 for (i = 31; i >= 0; i--)
1601 if ((remainder & (1 << i)) != 0)
1602 set_sign_bit_copies++;
1603 else
1604 break;
1607 for (i = 0; i <= 31; i++)
1609 if ((remainder & (1 << i)) == 0)
1610 clear_zero_bit_copies++;
1611 else
1612 break;
1615 for (i = 0; i <= 31; i++)
1617 if ((remainder & (1 << i)) != 0)
1618 set_zero_bit_copies++;
1619 else
1620 break;
1623 switch (code)
1625 case SET:
1626 /* See if we can do this by sign_extending a constant that is known
1627 to be negative. This is a good, way of doing it, since the shift
1628 may well merge into a subsequent insn. */
1629 if (set_sign_bit_copies > 1)
1631 if (const_ok_for_arm
1632 (temp1 = ARM_SIGN_EXTEND (remainder
1633 << (set_sign_bit_copies - 1))))
1635 if (generate)
1637 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1638 emit_constant_insn (cond,
1639 gen_rtx_SET (VOIDmode, new_src,
1640 GEN_INT (temp1)));
1641 emit_constant_insn (cond,
1642 gen_ashrsi3 (target, new_src,
1643 GEN_INT (set_sign_bit_copies - 1)));
1645 return 2;
1647 /* For an inverted constant, we will need to set the low bits,
1648 these will be shifted out of harm's way. */
1649 temp1 |= (1 << (set_sign_bit_copies - 1)) - 1;
1650 if (const_ok_for_arm (~temp1))
1652 if (generate)
1654 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1655 emit_constant_insn (cond,
1656 gen_rtx_SET (VOIDmode, new_src,
1657 GEN_INT (temp1)));
1658 emit_constant_insn (cond,
1659 gen_ashrsi3 (target, new_src,
1660 GEN_INT (set_sign_bit_copies - 1)));
1662 return 2;
1666 /* See if we can generate this by setting the bottom (or the top)
1667 16 bits, and then shifting these into the other half of the
1668 word. We only look for the simplest cases, to do more would cost
1669 too much. Be careful, however, not to generate this when the
1670 alternative would take fewer insns. */
1671 if (val & 0xffff0000)
1673 temp1 = remainder & 0xffff0000;
1674 temp2 = remainder & 0x0000ffff;
1676 /* Overlaps outside this range are best done using other methods. */
1677 for (i = 9; i < 24; i++)
1679 if ((((temp2 | (temp2 << i)) & 0xffffffff) == remainder)
1680 && !const_ok_for_arm (temp2))
1682 rtx new_src = (subtargets
1683 ? (generate ? gen_reg_rtx (mode) : NULL_RTX)
1684 : target);
1685 insns = arm_gen_constant (code, mode, cond, temp2, new_src,
1686 source, subtargets, generate);
1687 source = new_src;
1688 if (generate)
1689 emit_constant_insn
1690 (cond,
1691 gen_rtx_SET
1692 (VOIDmode, target,
1693 gen_rtx_IOR (mode,
1694 gen_rtx_ASHIFT (mode, source,
1695 GEN_INT (i)),
1696 source)));
1697 return insns + 1;
1701 /* Don't duplicate cases already considered. */
1702 for (i = 17; i < 24; i++)
1704 if (((temp1 | (temp1 >> i)) == remainder)
1705 && !const_ok_for_arm (temp1))
1707 rtx new_src = (subtargets
1708 ? (generate ? gen_reg_rtx (mode) : NULL_RTX)
1709 : target);
1710 insns = arm_gen_constant (code, mode, cond, temp1, new_src,
1711 source, subtargets, generate);
1712 source = new_src;
1713 if (generate)
1714 emit_constant_insn
1715 (cond,
1716 gen_rtx_SET (VOIDmode, target,
1717 gen_rtx_IOR
1718 (mode,
1719 gen_rtx_LSHIFTRT (mode, source,
1720 GEN_INT (i)),
1721 source)));
1722 return insns + 1;
1726 break;
1728 case IOR:
1729 case XOR:
1730 /* If we have IOR or XOR, and the constant can be loaded in a
1731 single instruction, and we can find a temporary to put it in,
1732 then this can be done in two instructions instead of 3-4. */
1733 if (subtargets
1734 /* TARGET can't be NULL if SUBTARGETS is 0 */
1735 || (reload_completed && !reg_mentioned_p (target, source)))
1737 if (const_ok_for_arm (ARM_SIGN_EXTEND (~val)))
1739 if (generate)
1741 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1743 emit_constant_insn (cond,
1744 gen_rtx_SET (VOIDmode, sub,
1745 GEN_INT (val)));
1746 emit_constant_insn (cond,
1747 gen_rtx_SET (VOIDmode, target,
1748 gen_rtx_fmt_ee (code, mode,
1749 source, sub)));
1751 return 2;
1755 if (code == XOR)
1756 break;
1758 if (set_sign_bit_copies > 8
1759 && (val & (-1 << (32 - set_sign_bit_copies))) == val)
1761 if (generate)
1763 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1764 rtx shift = GEN_INT (set_sign_bit_copies);
1766 emit_constant_insn
1767 (cond,
1768 gen_rtx_SET (VOIDmode, sub,
1769 gen_rtx_NOT (mode,
1770 gen_rtx_ASHIFT (mode,
1771 source,
1772 shift))));
1773 emit_constant_insn
1774 (cond,
1775 gen_rtx_SET (VOIDmode, target,
1776 gen_rtx_NOT (mode,
1777 gen_rtx_LSHIFTRT (mode, sub,
1778 shift))));
1780 return 2;
1783 if (set_zero_bit_copies > 8
1784 && (remainder & ((1 << set_zero_bit_copies) - 1)) == remainder)
1786 if (generate)
1788 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1789 rtx shift = GEN_INT (set_zero_bit_copies);
1791 emit_constant_insn
1792 (cond,
1793 gen_rtx_SET (VOIDmode, sub,
1794 gen_rtx_NOT (mode,
1795 gen_rtx_LSHIFTRT (mode,
1796 source,
1797 shift))));
1798 emit_constant_insn
1799 (cond,
1800 gen_rtx_SET (VOIDmode, target,
1801 gen_rtx_NOT (mode,
1802 gen_rtx_ASHIFT (mode, sub,
1803 shift))));
1805 return 2;
1808 if (const_ok_for_arm (temp1 = ARM_SIGN_EXTEND (~val)))
1810 if (generate)
1812 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1813 emit_constant_insn (cond,
1814 gen_rtx_SET (VOIDmode, sub,
1815 gen_rtx_NOT (mode, source)));
1816 source = sub;
1817 if (subtargets)
1818 sub = gen_reg_rtx (mode);
1819 emit_constant_insn (cond,
1820 gen_rtx_SET (VOIDmode, sub,
1821 gen_rtx_AND (mode, source,
1822 GEN_INT (temp1))));
1823 emit_constant_insn (cond,
1824 gen_rtx_SET (VOIDmode, target,
1825 gen_rtx_NOT (mode, sub)));
1827 return 3;
1829 break;
1831 case AND:
1832 /* See if two shifts will do 2 or more insn's worth of work. */
1833 if (clear_sign_bit_copies >= 16 && clear_sign_bit_copies < 24)
1835 HOST_WIDE_INT shift_mask = ((0xffffffff
1836 << (32 - clear_sign_bit_copies))
1837 & 0xffffffff);
1839 if ((remainder | shift_mask) != 0xffffffff)
1841 if (generate)
1843 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1844 insns = arm_gen_constant (AND, mode, cond,
1845 remainder | shift_mask,
1846 new_src, source, subtargets, 1);
1847 source = new_src;
1849 else
1851 rtx targ = subtargets ? NULL_RTX : target;
1852 insns = arm_gen_constant (AND, mode, cond,
1853 remainder | shift_mask,
1854 targ, source, subtargets, 0);
1858 if (generate)
1860 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1861 rtx shift = GEN_INT (clear_sign_bit_copies);
1863 emit_insn (gen_ashlsi3 (new_src, source, shift));
1864 emit_insn (gen_lshrsi3 (target, new_src, shift));
1867 return insns + 2;
1870 if (clear_zero_bit_copies >= 16 && clear_zero_bit_copies < 24)
1872 HOST_WIDE_INT shift_mask = (1 << clear_zero_bit_copies) - 1;
1874 if ((remainder | shift_mask) != 0xffffffff)
1876 if (generate)
1878 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1880 insns = arm_gen_constant (AND, mode, cond,
1881 remainder | shift_mask,
1882 new_src, source, subtargets, 1);
1883 source = new_src;
1885 else
1887 rtx targ = subtargets ? NULL_RTX : target;
1889 insns = arm_gen_constant (AND, mode, cond,
1890 remainder | shift_mask,
1891 targ, source, subtargets, 0);
1895 if (generate)
1897 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1898 rtx shift = GEN_INT (clear_zero_bit_copies);
1900 emit_insn (gen_lshrsi3 (new_src, source, shift));
1901 emit_insn (gen_ashlsi3 (target, new_src, shift));
1904 return insns + 2;
1907 break;
1909 default:
1910 break;
1913 for (i = 0; i < 32; i++)
1914 if (remainder & (1 << i))
1915 num_bits_set++;
1917 if (code == AND || (can_invert && num_bits_set > 16))
1918 remainder = (~remainder) & 0xffffffff;
1919 else if (code == PLUS && num_bits_set > 16)
1920 remainder = (-remainder) & 0xffffffff;
1921 else
1923 can_invert = 0;
1924 can_negate = 0;
1927 /* Now try and find a way of doing the job in either two or three
1928 instructions.
1929 We start by looking for the largest block of zeros that are aligned on
1930 a 2-bit boundary, we then fill up the temps, wrapping around to the
1931 top of the word when we drop off the bottom.
1932 In the worst case this code should produce no more than four insns. */
1934 int best_start = 0;
1935 int best_consecutive_zeros = 0;
1937 for (i = 0; i < 32; i += 2)
1939 int consecutive_zeros = 0;
1941 if (!(remainder & (3 << i)))
1943 while ((i < 32) && !(remainder & (3 << i)))
1945 consecutive_zeros += 2;
1946 i += 2;
1948 if (consecutive_zeros > best_consecutive_zeros)
1950 best_consecutive_zeros = consecutive_zeros;
1951 best_start = i - consecutive_zeros;
1953 i -= 2;
1957 /* So long as it won't require any more insns to do so, it's
1958 desirable to emit a small constant (in bits 0...9) in the last
1959 insn. This way there is more chance that it can be combined with
1960 a later addressing insn to form a pre-indexed load or store
1961 operation. Consider:
1963 *((volatile int *)0xe0000100) = 1;
1964 *((volatile int *)0xe0000110) = 2;
1966 We want this to wind up as:
1968 mov rA, #0xe0000000
1969 mov rB, #1
1970 str rB, [rA, #0x100]
1971 mov rB, #2
1972 str rB, [rA, #0x110]
1974 rather than having to synthesize both large constants from scratch.
1976 Therefore, we calculate how many insns would be required to emit
1977 the constant starting from `best_start', and also starting from
1978 zero (ie with bit 31 first to be output). If `best_start' doesn't
1979 yield a shorter sequence, we may as well use zero. */
1980 if (best_start != 0
1981 && ((((unsigned HOST_WIDE_INT) 1) << best_start) < remainder)
1982 && (count_insns_for_constant (remainder, 0) <=
1983 count_insns_for_constant (remainder, best_start)))
1984 best_start = 0;
1986 /* Now start emitting the insns. */
1987 i = best_start;
1990 int end;
1992 if (i <= 0)
1993 i += 32;
1994 if (remainder & (3 << (i - 2)))
1996 end = i - 8;
1997 if (end < 0)
1998 end += 32;
1999 temp1 = remainder & ((0x0ff << end)
2000 | ((i < end) ? (0xff >> (32 - end)) : 0));
2001 remainder &= ~temp1;
2003 if (generate)
2005 rtx new_src, temp1_rtx;
2007 if (code == SET || code == MINUS)
2009 new_src = (subtargets ? gen_reg_rtx (mode) : target);
2010 if (can_invert && code != MINUS)
2011 temp1 = ~temp1;
2013 else
2015 if (remainder && subtargets)
2016 new_src = gen_reg_rtx (mode);
2017 else
2018 new_src = target;
2019 if (can_invert)
2020 temp1 = ~temp1;
2021 else if (can_negate)
2022 temp1 = -temp1;
2025 temp1 = trunc_int_for_mode (temp1, mode);
2026 temp1_rtx = GEN_INT (temp1);
2028 if (code == SET)
2030 else if (code == MINUS)
2031 temp1_rtx = gen_rtx_MINUS (mode, temp1_rtx, source);
2032 else
2033 temp1_rtx = gen_rtx_fmt_ee (code, mode, source, temp1_rtx);
2035 emit_constant_insn (cond,
2036 gen_rtx_SET (VOIDmode, new_src,
2037 temp1_rtx));
2038 source = new_src;
2041 if (code == SET)
2043 can_invert = 0;
2044 code = PLUS;
2046 else if (code == MINUS)
2047 code = PLUS;
2049 insns++;
2050 i -= 6;
2052 i -= 2;
2054 while (remainder);
2057 return insns;
2060 /* Canonicalize a comparison so that we are more likely to recognize it.
2061 This can be done for a few constant compares, where we can make the
2062 immediate value easier to load. */
2064 enum rtx_code
2065 arm_canonicalize_comparison (enum rtx_code code, rtx * op1)
2067 unsigned HOST_WIDE_INT i = INTVAL (*op1);
2069 switch (code)
2071 case EQ:
2072 case NE:
2073 return code;
2075 case GT:
2076 case LE:
2077 if (i != ((((unsigned HOST_WIDE_INT) 1) << (HOST_BITS_PER_WIDE_INT - 1)) - 1)
2078 && (const_ok_for_arm (i + 1) || const_ok_for_arm (-(i + 1))))
2080 *op1 = GEN_INT (i + 1);
2081 return code == GT ? GE : LT;
2083 break;
2085 case GE:
2086 case LT:
2087 if (i != (((unsigned HOST_WIDE_INT) 1) << (HOST_BITS_PER_WIDE_INT - 1))
2088 && (const_ok_for_arm (i - 1) || const_ok_for_arm (-(i - 1))))
2090 *op1 = GEN_INT (i - 1);
2091 return code == GE ? GT : LE;
2093 break;
2095 case GTU:
2096 case LEU:
2097 if (i != ~((unsigned HOST_WIDE_INT) 0)
2098 && (const_ok_for_arm (i + 1) || const_ok_for_arm (-(i + 1))))
2100 *op1 = GEN_INT (i + 1);
2101 return code == GTU ? GEU : LTU;
2103 break;
2105 case GEU:
2106 case LTU:
2107 if (i != 0
2108 && (const_ok_for_arm (i - 1) || const_ok_for_arm (-(i - 1))))
2110 *op1 = GEN_INT (i - 1);
2111 return code == GEU ? GTU : LEU;
2113 break;
2115 default:
2116 abort ();
2119 return code;
2123 /* Define how to find the value returned by a function. */
2125 rtx arm_function_value(tree type, tree func ATTRIBUTE_UNUSED)
2127 enum machine_mode mode;
2128 int unsignedp ATTRIBUTE_UNUSED;
2129 rtx r ATTRIBUTE_UNUSED;
2132 mode = TYPE_MODE (type);
2133 /* Promote integer types. */
2134 if (INTEGRAL_TYPE_P (type))
2135 PROMOTE_FUNCTION_MODE (mode, unsignedp, type);
2136 return LIBCALL_VALUE(mode);
2140 /* Decide whether a type should be returned in memory (true)
2141 or in a register (false). This is called by the macro
2142 RETURN_IN_MEMORY. */
2144 arm_return_in_memory (tree type)
2146 HOST_WIDE_INT size;
2148 if (!AGGREGATE_TYPE_P (type))
2149 /* All simple types are returned in registers. */
2150 return 0;
2152 size = int_size_in_bytes (type);
2154 if (arm_abi != ARM_ABI_APCS)
2156 /* ATPCS and later return aggregate types in memory only if they are
2157 larger than a word (or are variable size). */
2158 return (size < 0 || size > UNITS_PER_WORD);
2161 /* For the arm-wince targets we choose to be compatible with Microsoft's
2162 ARM and Thumb compilers, which always return aggregates in memory. */
2163 #ifndef ARM_WINCE
2164 /* All structures/unions bigger than one word are returned in memory.
2165 Also catch the case where int_size_in_bytes returns -1. In this case
2166 the aggregate is either huge or of variable size, and in either case
2167 we will want to return it via memory and not in a register. */
2168 if (size < 0 || size > UNITS_PER_WORD)
2169 return 1;
2171 if (TREE_CODE (type) == RECORD_TYPE)
2173 tree field;
2175 /* For a struct the APCS says that we only return in a register
2176 if the type is 'integer like' and every addressable element
2177 has an offset of zero. For practical purposes this means
2178 that the structure can have at most one non bit-field element
2179 and that this element must be the first one in the structure. */
2181 /* Find the first field, ignoring non FIELD_DECL things which will
2182 have been created by C++. */
2183 for (field = TYPE_FIELDS (type);
2184 field && TREE_CODE (field) != FIELD_DECL;
2185 field = TREE_CHAIN (field))
2186 continue;
2188 if (field == NULL)
2189 return 0; /* An empty structure. Allowed by an extension to ANSI C. */
2191 /* Check that the first field is valid for returning in a register. */
2193 /* ... Floats are not allowed */
2194 if (FLOAT_TYPE_P (TREE_TYPE (field)))
2195 return 1;
2197 /* ... Aggregates that are not themselves valid for returning in
2198 a register are not allowed. */
2199 if (RETURN_IN_MEMORY (TREE_TYPE (field)))
2200 return 1;
2202 /* Now check the remaining fields, if any. Only bitfields are allowed,
2203 since they are not addressable. */
2204 for (field = TREE_CHAIN (field);
2205 field;
2206 field = TREE_CHAIN (field))
2208 if (TREE_CODE (field) != FIELD_DECL)
2209 continue;
2211 if (!DECL_BIT_FIELD_TYPE (field))
2212 return 1;
2215 return 0;
2218 if (TREE_CODE (type) == UNION_TYPE)
2220 tree field;
2222 /* Unions can be returned in registers if every element is
2223 integral, or can be returned in an integer register. */
2224 for (field = TYPE_FIELDS (type);
2225 field;
2226 field = TREE_CHAIN (field))
2228 if (TREE_CODE (field) != FIELD_DECL)
2229 continue;
2231 if (FLOAT_TYPE_P (TREE_TYPE (field)))
2232 return 1;
2234 if (RETURN_IN_MEMORY (TREE_TYPE (field)))
2235 return 1;
2238 return 0;
2240 #endif /* not ARM_WINCE */
2242 /* Return all other types in memory. */
2243 return 1;
2246 /* Indicate whether or not words of a double are in big-endian order. */
2249 arm_float_words_big_endian (void)
2251 if (TARGET_MAVERICK)
2252 return 0;
2254 /* For FPA, float words are always big-endian. For VFP, floats words
2255 follow the memory system mode. */
2257 if (TARGET_FPA)
2259 return 1;
2262 if (TARGET_VFP)
2263 return (TARGET_BIG_END ? 1 : 0);
2265 return 1;
2268 /* Initialize a variable CUM of type CUMULATIVE_ARGS
2269 for a call to a function whose data type is FNTYPE.
2270 For a library call, FNTYPE is NULL. */
2271 void
2272 arm_init_cumulative_args (CUMULATIVE_ARGS *pcum, tree fntype,
2273 rtx libname ATTRIBUTE_UNUSED,
2274 tree fndecl ATTRIBUTE_UNUSED)
2276 /* On the ARM, the offset starts at 0. */
2277 pcum->nregs = ((fntype && aggregate_value_p (TREE_TYPE (fntype), fntype)) ? 1 : 0);
2278 pcum->iwmmxt_nregs = 0;
2279 pcum->can_split = true;
2281 pcum->call_cookie = CALL_NORMAL;
2283 if (TARGET_LONG_CALLS)
2284 pcum->call_cookie = CALL_LONG;
2286 /* Check for long call/short call attributes. The attributes
2287 override any command line option. */
2288 if (fntype)
2290 if (lookup_attribute ("short_call", TYPE_ATTRIBUTES (fntype)))
2291 pcum->call_cookie = CALL_SHORT;
2292 else if (lookup_attribute ("long_call", TYPE_ATTRIBUTES (fntype)))
2293 pcum->call_cookie = CALL_LONG;
2296 /* Varargs vectors are treated the same as long long.
2297 named_count avoids having to change the way arm handles 'named' */
2298 pcum->named_count = 0;
2299 pcum->nargs = 0;
2301 if (TARGET_REALLY_IWMMXT && fntype)
2303 tree fn_arg;
2305 for (fn_arg = TYPE_ARG_TYPES (fntype);
2306 fn_arg;
2307 fn_arg = TREE_CHAIN (fn_arg))
2308 pcum->named_count += 1;
2310 if (! pcum->named_count)
2311 pcum->named_count = INT_MAX;
2316 /* Return true if mode/type need doubleword alignment. */
2317 bool
2318 arm_needs_doubleword_align (enum machine_mode mode, tree type)
2320 return (mode == DImode
2321 || mode == DFmode
2322 || VECTOR_MODE_SUPPORTED_P (mode)
2323 || (mode == BLKmode
2324 && TYPE_ALIGN (type) > PARM_BOUNDARY));
2328 /* Determine where to put an argument to a function.
2329 Value is zero to push the argument on the stack,
2330 or a hard register in which to store the argument.
2332 MODE is the argument's machine mode.
2333 TYPE is the data type of the argument (as a tree).
2334 This is null for libcalls where that information may
2335 not be available.
2336 CUM is a variable of type CUMULATIVE_ARGS which gives info about
2337 the preceding args and about the function being called.
2338 NAMED is nonzero if this argument is a named parameter
2339 (otherwise it is an extra parameter matching an ellipsis). */
2342 arm_function_arg (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
2343 tree type, int named)
2345 int nregs;
2347 /* Varargs vectors are treated the same as long long.
2348 named_count avoids having to change the way arm handles 'named' */
2349 if (TARGET_IWMMXT_ABI
2350 && VECTOR_MODE_SUPPORTED_P (mode)
2351 && pcum->named_count > pcum->nargs + 1)
2353 if (pcum->iwmmxt_nregs <= 9)
2354 return gen_rtx_REG (mode, pcum->iwmmxt_nregs + FIRST_IWMMXT_REGNUM);
2355 else
2357 pcum->can_split = false;
2358 return NULL_RTX;
2362 /* Put doubleword aligned quantities in even register pairs. */
2363 if (pcum->nregs & 1
2364 && ARM_DOUBLEWORD_ALIGN
2365 && arm_needs_doubleword_align (mode, type))
2366 pcum->nregs++;
2368 if (mode == VOIDmode)
2369 /* Compute operand 2 of the call insn. */
2370 return GEN_INT (pcum->call_cookie);
2372 /* Only allow splitting an arg between regs and memory if all preceding
2373 args were allocated to regs. For args passed by reference we only count
2374 the reference pointer. */
2375 if (pcum->can_split)
2376 nregs = 1;
2377 else
2378 nregs = ARM_NUM_REGS2 (mode, type);
2380 if (!named || pcum->nregs + nregs > NUM_ARG_REGS)
2381 return NULL_RTX;
2383 return gen_rtx_REG (mode, pcum->nregs);
2386 /* Variable sized types are passed by reference. This is a GCC
2387 extension to the ARM ABI. */
2390 arm_function_arg_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
2391 enum machine_mode mode ATTRIBUTE_UNUSED,
2392 tree type, int named ATTRIBUTE_UNUSED)
2394 return type && TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST;
2397 /* Implement va_arg. */
2400 arm_va_arg (tree valist, tree type)
2402 int align;
2404 /* Variable sized types are passed by reference. */
2405 if (TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
2407 rtx addr = std_expand_builtin_va_arg (valist, build_pointer_type (type));
2408 return gen_rtx_MEM (ptr_mode, force_reg (Pmode, addr));
2411 align = FUNCTION_ARG_BOUNDARY (TYPE_MODE (type), type);
2412 if (align > PARM_BOUNDARY)
2414 tree mask;
2415 tree t;
2417 /* Maintain 64-bit alignment of the valist pointer by
2418 constructing: valist = ((valist + (8 - 1)) & -8). */
2419 mask = build_int_2 (- (align / BITS_PER_UNIT), -1);
2420 t = build_int_2 ((align / BITS_PER_UNIT) - 1, 0);
2421 t = build (PLUS_EXPR, TREE_TYPE (valist), valist, t);
2422 t = build (BIT_AND_EXPR, TREE_TYPE (t), t, mask);
2423 t = build (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
2424 TREE_SIDE_EFFECTS (t) = 1;
2425 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
2427 /* This is to stop the combine pass optimizing
2428 away the alignment adjustment. */
2429 mark_reg_pointer (arg_pointer_rtx, PARM_BOUNDARY);
2432 return std_expand_builtin_va_arg (valist, type);
2435 /* Encode the current state of the #pragma [no_]long_calls. */
2436 typedef enum
2438 OFF, /* No #pramgma [no_]long_calls is in effect. */
2439 LONG, /* #pragma long_calls is in effect. */
2440 SHORT /* #pragma no_long_calls is in effect. */
2441 } arm_pragma_enum;
2443 static arm_pragma_enum arm_pragma_long_calls = OFF;
2445 void
2446 arm_pr_long_calls (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2448 arm_pragma_long_calls = LONG;
2451 void
2452 arm_pr_no_long_calls (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2454 arm_pragma_long_calls = SHORT;
2457 void
2458 arm_pr_long_calls_off (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2460 arm_pragma_long_calls = OFF;
2463 /* Table of machine attributes. */
2464 const struct attribute_spec arm_attribute_table[] =
2466 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
2467 /* Function calls made to this symbol must be done indirectly, because
2468 it may lie outside of the 26 bit addressing range of a normal function
2469 call. */
2470 { "long_call", 0, 0, false, true, true, NULL },
2471 /* Whereas these functions are always known to reside within the 26 bit
2472 addressing range. */
2473 { "short_call", 0, 0, false, true, true, NULL },
2474 /* Interrupt Service Routines have special prologue and epilogue requirements. */
2475 { "isr", 0, 1, false, false, false, arm_handle_isr_attribute },
2476 { "interrupt", 0, 1, false, false, false, arm_handle_isr_attribute },
2477 { "naked", 0, 0, true, false, false, arm_handle_fndecl_attribute },
2478 #ifdef ARM_PE
2479 /* ARM/PE has three new attributes:
2480 interfacearm - ?
2481 dllexport - for exporting a function/variable that will live in a dll
2482 dllimport - for importing a function/variable from a dll
2484 Microsoft allows multiple declspecs in one __declspec, separating
2485 them with spaces. We do NOT support this. Instead, use __declspec
2486 multiple times.
2488 { "dllimport", 0, 0, true, false, false, NULL },
2489 { "dllexport", 0, 0, true, false, false, NULL },
2490 { "interfacearm", 0, 0, true, false, false, arm_handle_fndecl_attribute },
2491 #endif
2492 { NULL, 0, 0, false, false, false, NULL }
2495 /* Handle an attribute requiring a FUNCTION_DECL;
2496 arguments as in struct attribute_spec.handler. */
2497 static tree
2498 arm_handle_fndecl_attribute (tree *node, tree name, tree args ATTRIBUTE_UNUSED,
2499 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
2501 if (TREE_CODE (*node) != FUNCTION_DECL)
2503 warning ("`%s' attribute only applies to functions",
2504 IDENTIFIER_POINTER (name));
2505 *no_add_attrs = true;
2508 return NULL_TREE;
2511 /* Handle an "interrupt" or "isr" attribute;
2512 arguments as in struct attribute_spec.handler. */
2513 static tree
2514 arm_handle_isr_attribute (tree *node, tree name, tree args, int flags,
2515 bool *no_add_attrs)
2517 if (DECL_P (*node))
2519 if (TREE_CODE (*node) != FUNCTION_DECL)
2521 warning ("`%s' attribute only applies to functions",
2522 IDENTIFIER_POINTER (name));
2523 *no_add_attrs = true;
2525 /* FIXME: the argument if any is checked for type attributes;
2526 should it be checked for decl ones? */
2528 else
2530 if (TREE_CODE (*node) == FUNCTION_TYPE
2531 || TREE_CODE (*node) == METHOD_TYPE)
2533 if (arm_isr_value (args) == ARM_FT_UNKNOWN)
2535 warning ("`%s' attribute ignored", IDENTIFIER_POINTER (name));
2536 *no_add_attrs = true;
2539 else if (TREE_CODE (*node) == POINTER_TYPE
2540 && (TREE_CODE (TREE_TYPE (*node)) == FUNCTION_TYPE
2541 || TREE_CODE (TREE_TYPE (*node)) == METHOD_TYPE)
2542 && arm_isr_value (args) != ARM_FT_UNKNOWN)
2544 *node = build_type_copy (*node);
2545 TREE_TYPE (*node) = build_type_attribute_variant
2546 (TREE_TYPE (*node),
2547 tree_cons (name, args, TYPE_ATTRIBUTES (TREE_TYPE (*node))));
2548 *no_add_attrs = true;
2550 else
2552 /* Possibly pass this attribute on from the type to a decl. */
2553 if (flags & ((int) ATTR_FLAG_DECL_NEXT
2554 | (int) ATTR_FLAG_FUNCTION_NEXT
2555 | (int) ATTR_FLAG_ARRAY_NEXT))
2557 *no_add_attrs = true;
2558 return tree_cons (name, args, NULL_TREE);
2560 else
2562 warning ("`%s' attribute ignored", IDENTIFIER_POINTER (name));
2567 return NULL_TREE;
2570 /* Return 0 if the attributes for two types are incompatible, 1 if they
2571 are compatible, and 2 if they are nearly compatible (which causes a
2572 warning to be generated). */
2573 static int
2574 arm_comp_type_attributes (tree type1, tree type2)
2576 int l1, l2, s1, s2;
2578 /* Check for mismatch of non-default calling convention. */
2579 if (TREE_CODE (type1) != FUNCTION_TYPE)
2580 return 1;
2582 /* Check for mismatched call attributes. */
2583 l1 = lookup_attribute ("long_call", TYPE_ATTRIBUTES (type1)) != NULL;
2584 l2 = lookup_attribute ("long_call", TYPE_ATTRIBUTES (type2)) != NULL;
2585 s1 = lookup_attribute ("short_call", TYPE_ATTRIBUTES (type1)) != NULL;
2586 s2 = lookup_attribute ("short_call", TYPE_ATTRIBUTES (type2)) != NULL;
2588 /* Only bother to check if an attribute is defined. */
2589 if (l1 | l2 | s1 | s2)
2591 /* If one type has an attribute, the other must have the same attribute. */
2592 if ((l1 != l2) || (s1 != s2))
2593 return 0;
2595 /* Disallow mixed attributes. */
2596 if ((l1 & s2) || (l2 & s1))
2597 return 0;
2600 /* Check for mismatched ISR attribute. */
2601 l1 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type1)) != NULL;
2602 if (! l1)
2603 l1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type1)) != NULL;
2604 l2 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type2)) != NULL;
2605 if (! l2)
2606 l1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type2)) != NULL;
2607 if (l1 != l2)
2608 return 0;
2610 return 1;
2613 /* Encode long_call or short_call attribute by prefixing
2614 symbol name in DECL with a special character FLAG. */
2615 void
2616 arm_encode_call_attribute (tree decl, int flag)
2618 const char * str = XSTR (XEXP (DECL_RTL (decl), 0), 0);
2619 int len = strlen (str);
2620 char * newstr;
2622 /* Do not allow weak functions to be treated as short call. */
2623 if (DECL_WEAK (decl) && flag == SHORT_CALL_FLAG_CHAR)
2624 return;
2626 newstr = alloca (len + 2);
2627 newstr[0] = flag;
2628 strcpy (newstr + 1, str);
2630 newstr = (char *) ggc_alloc_string (newstr, len + 1);
2631 XSTR (XEXP (DECL_RTL (decl), 0), 0) = newstr;
2634 /* Assigns default attributes to newly defined type. This is used to
2635 set short_call/long_call attributes for function types of
2636 functions defined inside corresponding #pragma scopes. */
2637 static void
2638 arm_set_default_type_attributes (tree type)
2640 /* Add __attribute__ ((long_call)) to all functions, when
2641 inside #pragma long_calls or __attribute__ ((short_call)),
2642 when inside #pragma no_long_calls. */
2643 if (TREE_CODE (type) == FUNCTION_TYPE || TREE_CODE (type) == METHOD_TYPE)
2645 tree type_attr_list, attr_name;
2646 type_attr_list = TYPE_ATTRIBUTES (type);
2648 if (arm_pragma_long_calls == LONG)
2649 attr_name = get_identifier ("long_call");
2650 else if (arm_pragma_long_calls == SHORT)
2651 attr_name = get_identifier ("short_call");
2652 else
2653 return;
2655 type_attr_list = tree_cons (attr_name, NULL_TREE, type_attr_list);
2656 TYPE_ATTRIBUTES (type) = type_attr_list;
2660 /* Return 1 if the operand is a SYMBOL_REF for a function known to be
2661 defined within the current compilation unit. If this cannot be
2662 determined, then 0 is returned. */
2663 static int
2664 current_file_function_operand (rtx sym_ref)
2666 /* This is a bit of a fib. A function will have a short call flag
2667 applied to its name if it has the short call attribute, or it has
2668 already been defined within the current compilation unit. */
2669 if (ENCODED_SHORT_CALL_ATTR_P (XSTR (sym_ref, 0)))
2670 return 1;
2672 /* The current function is always defined within the current compilation
2673 unit. if it s a weak definition however, then this may not be the real
2674 definition of the function, and so we have to say no. */
2675 if (sym_ref == XEXP (DECL_RTL (current_function_decl), 0)
2676 && !DECL_WEAK (current_function_decl))
2677 return 1;
2679 /* We cannot make the determination - default to returning 0. */
2680 return 0;
2683 /* Return nonzero if a 32 bit "long_call" should be generated for
2684 this call. We generate a long_call if the function:
2686 a. has an __attribute__((long call))
2687 or b. is within the scope of a #pragma long_calls
2688 or c. the -mlong-calls command line switch has been specified
2690 However we do not generate a long call if the function:
2692 d. has an __attribute__ ((short_call))
2693 or e. is inside the scope of a #pragma no_long_calls
2694 or f. has an __attribute__ ((section))
2695 or g. is defined within the current compilation unit.
2697 This function will be called by C fragments contained in the machine
2698 description file. CALL_REF and CALL_COOKIE correspond to the matched
2699 rtl operands. CALL_SYMBOL is used to distinguish between
2700 two different callers of the function. It is set to 1 in the
2701 "call_symbol" and "call_symbol_value" patterns and to 0 in the "call"
2702 and "call_value" patterns. This is because of the difference in the
2703 SYM_REFs passed by these patterns. */
2705 arm_is_longcall_p (rtx sym_ref, int call_cookie, int call_symbol)
2707 if (!call_symbol)
2709 if (GET_CODE (sym_ref) != MEM)
2710 return 0;
2712 sym_ref = XEXP (sym_ref, 0);
2715 if (GET_CODE (sym_ref) != SYMBOL_REF)
2716 return 0;
2718 if (call_cookie & CALL_SHORT)
2719 return 0;
2721 if (TARGET_LONG_CALLS && flag_function_sections)
2722 return 1;
2724 if (current_file_function_operand (sym_ref))
2725 return 0;
2727 return (call_cookie & CALL_LONG)
2728 || ENCODED_LONG_CALL_ATTR_P (XSTR (sym_ref, 0))
2729 || TARGET_LONG_CALLS;
2732 /* Return nonzero if it is ok to make a tail-call to DECL. */
2733 static bool
2734 arm_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
2736 int call_type = TARGET_LONG_CALLS ? CALL_LONG : CALL_NORMAL;
2738 if (cfun->machine->sibcall_blocked)
2739 return false;
2741 /* Never tailcall something for which we have no decl, or if we
2742 are in Thumb mode. */
2743 if (decl == NULL || TARGET_THUMB)
2744 return false;
2746 /* Get the calling method. */
2747 if (lookup_attribute ("short_call", TYPE_ATTRIBUTES (TREE_TYPE (decl))))
2748 call_type = CALL_SHORT;
2749 else if (lookup_attribute ("long_call", TYPE_ATTRIBUTES (TREE_TYPE (decl))))
2750 call_type = CALL_LONG;
2752 /* Cannot tail-call to long calls, since these are out of range of
2753 a branch instruction. However, if not compiling PIC, we know
2754 we can reach the symbol if it is in this compilation unit. */
2755 if (call_type == CALL_LONG && (flag_pic || !TREE_ASM_WRITTEN (decl)))
2756 return false;
2758 /* If we are interworking and the function is not declared static
2759 then we can't tail-call it unless we know that it exists in this
2760 compilation unit (since it might be a Thumb routine). */
2761 if (TARGET_INTERWORK && TREE_PUBLIC (decl) && !TREE_ASM_WRITTEN (decl))
2762 return false;
2764 /* Never tailcall from an ISR routine - it needs a special exit sequence. */
2765 if (IS_INTERRUPT (arm_current_func_type ()))
2766 return false;
2768 /* Everything else is ok. */
2769 return true;
2773 /* Addressing mode support functions. */
2775 /* Return nonzero if X is a legitimate immediate operand when compiling
2776 for PIC. */
2778 legitimate_pic_operand_p (rtx x)
2780 if (CONSTANT_P (x)
2781 && flag_pic
2782 && (GET_CODE (x) == SYMBOL_REF
2783 || (GET_CODE (x) == CONST
2784 && GET_CODE (XEXP (x, 0)) == PLUS
2785 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF)))
2786 return 0;
2788 return 1;
2792 legitimize_pic_address (rtx orig, enum machine_mode mode, rtx reg)
2794 if (GET_CODE (orig) == SYMBOL_REF
2795 || GET_CODE (orig) == LABEL_REF)
2797 #ifndef AOF_ASSEMBLER
2798 rtx pic_ref, address;
2799 #endif
2800 rtx insn;
2801 int subregs = 0;
2803 if (reg == 0)
2805 if (no_new_pseudos)
2806 abort ();
2807 else
2808 reg = gen_reg_rtx (Pmode);
2810 subregs = 1;
2813 #ifdef AOF_ASSEMBLER
2814 /* The AOF assembler can generate relocations for these directly, and
2815 understands that the PIC register has to be added into the offset. */
2816 insn = emit_insn (gen_pic_load_addr_based (reg, orig));
2817 #else
2818 if (subregs)
2819 address = gen_reg_rtx (Pmode);
2820 else
2821 address = reg;
2823 if (TARGET_ARM)
2824 emit_insn (gen_pic_load_addr_arm (address, orig));
2825 else
2826 emit_insn (gen_pic_load_addr_thumb (address, orig));
2828 if ((GET_CODE (orig) == LABEL_REF
2829 || (GET_CODE (orig) == SYMBOL_REF &&
2830 SYMBOL_REF_LOCAL_P (orig)))
2831 && NEED_GOT_RELOC)
2832 pic_ref = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, address);
2833 else
2835 pic_ref = gen_rtx_MEM (Pmode,
2836 gen_rtx_PLUS (Pmode, pic_offset_table_rtx,
2837 address));
2838 RTX_UNCHANGING_P (pic_ref) = 1;
2841 insn = emit_move_insn (reg, pic_ref);
2842 #endif
2843 current_function_uses_pic_offset_table = 1;
2844 /* Put a REG_EQUAL note on this insn, so that it can be optimized
2845 by loop. */
2846 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EQUAL, orig,
2847 REG_NOTES (insn));
2848 return reg;
2850 else if (GET_CODE (orig) == CONST)
2852 rtx base, offset;
2854 if (GET_CODE (XEXP (orig, 0)) == PLUS
2855 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
2856 return orig;
2858 if (reg == 0)
2860 if (no_new_pseudos)
2861 abort ();
2862 else
2863 reg = gen_reg_rtx (Pmode);
2866 if (GET_CODE (XEXP (orig, 0)) == PLUS)
2868 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
2869 offset = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
2870 base == reg ? 0 : reg);
2872 else
2873 abort ();
2875 if (GET_CODE (offset) == CONST_INT)
2877 /* The base register doesn't really matter, we only want to
2878 test the index for the appropriate mode. */
2879 if (!arm_legitimate_index_p (mode, offset, SET, 0))
2881 if (!no_new_pseudos)
2882 offset = force_reg (Pmode, offset);
2883 else
2884 abort ();
2887 if (GET_CODE (offset) == CONST_INT)
2888 return plus_constant (base, INTVAL (offset));
2891 if (GET_MODE_SIZE (mode) > 4
2892 && (GET_MODE_CLASS (mode) == MODE_INT
2893 || TARGET_SOFT_FLOAT))
2895 emit_insn (gen_addsi3 (reg, base, offset));
2896 return reg;
2899 return gen_rtx_PLUS (Pmode, base, offset);
2902 return orig;
2905 /* Generate code to load the PIC register. PROLOGUE is true if
2906 called from arm_expand_prologue (in which case we want the
2907 generated insns at the start of the function); false if called
2908 by an exception receiver that needs the PIC register reloaded
2909 (in which case the insns are just dumped at the current location). */
2910 void
2911 arm_finalize_pic (int prologue ATTRIBUTE_UNUSED)
2913 #ifndef AOF_ASSEMBLER
2914 rtx l1, pic_tmp, pic_tmp2, seq, pic_rtx;
2915 rtx global_offset_table;
2917 if (current_function_uses_pic_offset_table == 0 || TARGET_SINGLE_PIC_BASE)
2918 return;
2920 if (!flag_pic)
2921 abort ();
2923 start_sequence ();
2924 l1 = gen_label_rtx ();
2926 global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
2927 /* On the ARM the PC register contains 'dot + 8' at the time of the
2928 addition, on the Thumb it is 'dot + 4'. */
2929 pic_tmp = plus_constant (gen_rtx_LABEL_REF (Pmode, l1), TARGET_ARM ? 8 : 4);
2930 if (GOT_PCREL)
2931 pic_tmp2 = gen_rtx_CONST (VOIDmode,
2932 gen_rtx_PLUS (Pmode, global_offset_table, pc_rtx));
2933 else
2934 pic_tmp2 = gen_rtx_CONST (VOIDmode, global_offset_table);
2936 pic_rtx = gen_rtx_CONST (Pmode, gen_rtx_MINUS (Pmode, pic_tmp2, pic_tmp));
2938 if (TARGET_ARM)
2940 emit_insn (gen_pic_load_addr_arm (pic_offset_table_rtx, pic_rtx));
2941 emit_insn (gen_pic_add_dot_plus_eight (pic_offset_table_rtx, l1));
2943 else
2945 emit_insn (gen_pic_load_addr_thumb (pic_offset_table_rtx, pic_rtx));
2946 emit_insn (gen_pic_add_dot_plus_four (pic_offset_table_rtx, l1));
2949 seq = get_insns ();
2950 end_sequence ();
2951 if (prologue)
2952 emit_insn_after (seq, get_insns ());
2953 else
2954 emit_insn (seq);
2956 /* Need to emit this whether or not we obey regdecls,
2957 since setjmp/longjmp can cause life info to screw up. */
2958 emit_insn (gen_rtx_USE (VOIDmode, pic_offset_table_rtx));
2959 #endif /* AOF_ASSEMBLER */
2962 /* Return nonzero if X is valid as an ARM state addressing register. */
2963 static int
2964 arm_address_register_rtx_p (rtx x, int strict_p)
2966 int regno;
2968 if (GET_CODE (x) != REG)
2969 return 0;
2971 regno = REGNO (x);
2973 if (strict_p)
2974 return ARM_REGNO_OK_FOR_BASE_P (regno);
2976 return (regno <= LAST_ARM_REGNUM
2977 || regno >= FIRST_PSEUDO_REGISTER
2978 || regno == FRAME_POINTER_REGNUM
2979 || regno == ARG_POINTER_REGNUM);
2982 /* Return nonzero if X is a valid ARM state address operand. */
2984 arm_legitimate_address_p (enum machine_mode mode, rtx x, RTX_CODE outer,
2985 int strict_p)
2987 if (arm_address_register_rtx_p (x, strict_p))
2988 return 1;
2990 else if (GET_CODE (x) == POST_INC || GET_CODE (x) == PRE_DEC)
2991 return arm_address_register_rtx_p (XEXP (x, 0), strict_p);
2993 else if ((GET_CODE (x) == POST_MODIFY || GET_CODE (x) == PRE_MODIFY)
2994 && GET_MODE_SIZE (mode) <= 4
2995 && arm_address_register_rtx_p (XEXP (x, 0), strict_p)
2996 && GET_CODE (XEXP (x, 1)) == PLUS
2997 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
2998 return arm_legitimate_index_p (mode, XEXP (XEXP (x, 1), 1), outer,
2999 strict_p);
3001 /* After reload constants split into minipools will have addresses
3002 from a LABEL_REF. */
3003 else if (reload_completed
3004 && (GET_CODE (x) == LABEL_REF
3005 || (GET_CODE (x) == CONST
3006 && GET_CODE (XEXP (x, 0)) == PLUS
3007 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
3008 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
3009 return 1;
3011 else if (mode == TImode)
3012 return 0;
3014 else if (mode == DImode || (TARGET_SOFT_FLOAT && mode == DFmode))
3016 if (GET_CODE (x) == PLUS
3017 && arm_address_register_rtx_p (XEXP (x, 0), strict_p)
3018 && GET_CODE (XEXP (x, 1)) == CONST_INT)
3020 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
3022 if (val == 4 || val == -4 || val == -8)
3023 return 1;
3027 else if (TARGET_HARD_FLOAT && TARGET_VFP && mode == DFmode)
3029 if (GET_CODE (x) == PLUS
3030 && arm_address_register_rtx_p (XEXP (x, 0), strict_p)
3031 && GET_CODE (XEXP (x, 1)) == CONST_INT)
3033 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
3035 /* ??? valid arm offsets are a subset of VFP offsets.
3036 For now only allow this subset. Proper fix is to add an
3037 additional memory constraint for arm address modes.
3038 Alternatively allow full vfp addressing and let
3039 output_move_double fix it up with a sub-optimal sequence. */
3040 if (val == 4 || val == -4 || val == -8)
3041 return 1;
3045 else if (GET_CODE (x) == PLUS)
3047 rtx xop0 = XEXP (x, 0);
3048 rtx xop1 = XEXP (x, 1);
3050 return ((arm_address_register_rtx_p (xop0, strict_p)
3051 && arm_legitimate_index_p (mode, xop1, outer, strict_p))
3052 || (arm_address_register_rtx_p (xop1, strict_p)
3053 && arm_legitimate_index_p (mode, xop0, outer, strict_p)));
3056 #if 0
3057 /* Reload currently can't handle MINUS, so disable this for now */
3058 else if (GET_CODE (x) == MINUS)
3060 rtx xop0 = XEXP (x, 0);
3061 rtx xop1 = XEXP (x, 1);
3063 return (arm_address_register_rtx_p (xop0, strict_p)
3064 && arm_legitimate_index_p (mode, xop1, outer, strict_p));
3066 #endif
3068 else if (GET_MODE_CLASS (mode) != MODE_FLOAT
3069 && GET_CODE (x) == SYMBOL_REF
3070 && CONSTANT_POOL_ADDRESS_P (x)
3071 && ! (flag_pic
3072 && symbol_mentioned_p (get_pool_constant (x))))
3073 return 1;
3075 else if ((GET_CODE (x) == PRE_INC || GET_CODE (x) == POST_DEC)
3076 && (GET_MODE_SIZE (mode) <= 4)
3077 && arm_address_register_rtx_p (XEXP (x, 0), strict_p))
3078 return 1;
3080 return 0;
3083 /* Return nonzero if INDEX is valid for an address index operand in
3084 ARM state. */
3085 static int
3086 arm_legitimate_index_p (enum machine_mode mode, rtx index, RTX_CODE outer,
3087 int strict_p)
3089 HOST_WIDE_INT range;
3090 enum rtx_code code = GET_CODE (index);
3092 if (TARGET_HARD_FLOAT && TARGET_FPA && GET_MODE_CLASS (mode) == MODE_FLOAT)
3093 return (code == CONST_INT && INTVAL (index) < 1024
3094 && INTVAL (index) > -1024
3095 && (INTVAL (index) & 3) == 0);
3097 if (TARGET_HARD_FLOAT && TARGET_MAVERICK
3098 && (GET_MODE_CLASS (mode) == MODE_FLOAT || mode == DImode))
3099 return (code == CONST_INT
3100 && INTVAL (index) < 255
3101 && INTVAL (index) > -255);
3103 if (arm_address_register_rtx_p (index, strict_p)
3104 && GET_MODE_SIZE (mode) <= 4)
3105 return 1;
3107 if (TARGET_REALLY_IWMMXT && VALID_IWMMXT_REG_MODE (mode))
3108 return (code == CONST_INT
3109 && INTVAL (index) < 256
3110 && INTVAL (index) > -256);
3112 if (GET_MODE_SIZE (mode) <= 4
3113 && ! (arm_arch4
3114 && (mode == HImode
3115 || (mode == QImode && outer == SIGN_EXTEND))))
3117 if (code == MULT)
3119 rtx xiop0 = XEXP (index, 0);
3120 rtx xiop1 = XEXP (index, 1);
3122 return ((arm_address_register_rtx_p (xiop0, strict_p)
3123 && power_of_two_operand (xiop1, SImode))
3124 || (arm_address_register_rtx_p (xiop1, strict_p)
3125 && power_of_two_operand (xiop0, SImode)));
3127 else if (code == LSHIFTRT || code == ASHIFTRT
3128 || code == ASHIFT || code == ROTATERT)
3130 rtx op = XEXP (index, 1);
3132 return (arm_address_register_rtx_p (XEXP (index, 0), strict_p)
3133 && GET_CODE (op) == CONST_INT
3134 && INTVAL (op) > 0
3135 && INTVAL (op) <= 31);
3139 /* For ARM v4 we may be doing a sign-extend operation during the
3140 load. */
3141 if (arm_arch4)
3143 if (mode == HImode || (outer == SIGN_EXTEND && mode == QImode))
3144 range = 256;
3145 else
3146 range = 4096;
3148 else
3149 range = (mode == HImode) ? 4095 : 4096;
3151 return (code == CONST_INT
3152 && INTVAL (index) < range
3153 && INTVAL (index) > -range);
3156 /* Return nonzero if X is valid as a Thumb state base register. */
3157 static int
3158 thumb_base_register_rtx_p (rtx x, enum machine_mode mode, int strict_p)
3160 int regno;
3162 if (GET_CODE (x) != REG)
3163 return 0;
3165 regno = REGNO (x);
3167 if (strict_p)
3168 return THUMB_REGNO_MODE_OK_FOR_BASE_P (regno, mode);
3170 return (regno <= LAST_LO_REGNUM
3171 || regno > LAST_VIRTUAL_REGISTER
3172 || regno == FRAME_POINTER_REGNUM
3173 || (GET_MODE_SIZE (mode) >= 4
3174 && (regno == STACK_POINTER_REGNUM
3175 || regno >= FIRST_PSEUDO_REGISTER
3176 || x == hard_frame_pointer_rtx
3177 || x == arg_pointer_rtx)));
3180 /* Return nonzero if x is a legitimate index register. This is the case
3181 for any base register that can access a QImode object. */
3182 inline static int
3183 thumb_index_register_rtx_p (rtx x, int strict_p)
3185 return thumb_base_register_rtx_p (x, QImode, strict_p);
3188 /* Return nonzero if x is a legitimate Thumb-state address.
3190 The AP may be eliminated to either the SP or the FP, so we use the
3191 least common denominator, e.g. SImode, and offsets from 0 to 64.
3193 ??? Verify whether the above is the right approach.
3195 ??? Also, the FP may be eliminated to the SP, so perhaps that
3196 needs special handling also.
3198 ??? Look at how the mips16 port solves this problem. It probably uses
3199 better ways to solve some of these problems.
3201 Although it is not incorrect, we don't accept QImode and HImode
3202 addresses based on the frame pointer or arg pointer until the
3203 reload pass starts. This is so that eliminating such addresses
3204 into stack based ones won't produce impossible code. */
3206 thumb_legitimate_address_p (enum machine_mode mode, rtx x, int strict_p)
3208 /* ??? Not clear if this is right. Experiment. */
3209 if (GET_MODE_SIZE (mode) < 4
3210 && !(reload_in_progress || reload_completed)
3211 && (reg_mentioned_p (frame_pointer_rtx, x)
3212 || reg_mentioned_p (arg_pointer_rtx, x)
3213 || reg_mentioned_p (virtual_incoming_args_rtx, x)
3214 || reg_mentioned_p (virtual_outgoing_args_rtx, x)
3215 || reg_mentioned_p (virtual_stack_dynamic_rtx, x)
3216 || reg_mentioned_p (virtual_stack_vars_rtx, x)))
3217 return 0;
3219 /* Accept any base register. SP only in SImode or larger. */
3220 else if (thumb_base_register_rtx_p (x, mode, strict_p))
3221 return 1;
3223 /* This is PC relative data before arm_reorg runs. */
3224 else if (GET_MODE_SIZE (mode) >= 4 && CONSTANT_P (x)
3225 && GET_CODE (x) == SYMBOL_REF
3226 && CONSTANT_POOL_ADDRESS_P (x) && ! flag_pic)
3227 return 1;
3229 /* This is PC relative data after arm_reorg runs. */
3230 else if (GET_MODE_SIZE (mode) >= 4 && reload_completed
3231 && (GET_CODE (x) == LABEL_REF
3232 || (GET_CODE (x) == CONST
3233 && GET_CODE (XEXP (x, 0)) == PLUS
3234 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
3235 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
3236 return 1;
3238 /* Post-inc indexing only supported for SImode and larger. */
3239 else if (GET_CODE (x) == POST_INC && GET_MODE_SIZE (mode) >= 4
3240 && thumb_index_register_rtx_p (XEXP (x, 0), strict_p))
3241 return 1;
3243 else if (GET_CODE (x) == PLUS)
3245 /* REG+REG address can be any two index registers. */
3246 /* We disallow FRAME+REG addressing since we know that FRAME
3247 will be replaced with STACK, and SP relative addressing only
3248 permits SP+OFFSET. */
3249 if (GET_MODE_SIZE (mode) <= 4
3250 && XEXP (x, 0) != frame_pointer_rtx
3251 && XEXP (x, 1) != frame_pointer_rtx
3252 && thumb_index_register_rtx_p (XEXP (x, 0), strict_p)
3253 && thumb_index_register_rtx_p (XEXP (x, 1), strict_p))
3254 return 1;
3256 /* REG+const has 5-7 bit offset for non-SP registers. */
3257 else if ((thumb_index_register_rtx_p (XEXP (x, 0), strict_p)
3258 || XEXP (x, 0) == arg_pointer_rtx)
3259 && GET_CODE (XEXP (x, 1)) == CONST_INT
3260 && thumb_legitimate_offset_p (mode, INTVAL (XEXP (x, 1))))
3261 return 1;
3263 /* REG+const has 10 bit offset for SP, but only SImode and
3264 larger is supported. */
3265 /* ??? Should probably check for DI/DFmode overflow here
3266 just like GO_IF_LEGITIMATE_OFFSET does. */
3267 else if (GET_CODE (XEXP (x, 0)) == REG
3268 && REGNO (XEXP (x, 0)) == STACK_POINTER_REGNUM
3269 && GET_MODE_SIZE (mode) >= 4
3270 && GET_CODE (XEXP (x, 1)) == CONST_INT
3271 && INTVAL (XEXP (x, 1)) >= 0
3272 && INTVAL (XEXP (x, 1)) + GET_MODE_SIZE (mode) <= 1024
3273 && (INTVAL (XEXP (x, 1)) & 3) == 0)
3274 return 1;
3276 else if (GET_CODE (XEXP (x, 0)) == REG
3277 && REGNO (XEXP (x, 0)) == FRAME_POINTER_REGNUM
3278 && GET_MODE_SIZE (mode) >= 4
3279 && GET_CODE (XEXP (x, 1)) == CONST_INT
3280 && (INTVAL (XEXP (x, 1)) & 3) == 0)
3281 return 1;
3284 else if (GET_MODE_CLASS (mode) != MODE_FLOAT
3285 && GET_MODE_SIZE (mode) == 4
3286 && GET_CODE (x) == SYMBOL_REF
3287 && CONSTANT_POOL_ADDRESS_P (x)
3288 && !(flag_pic
3289 && symbol_mentioned_p (get_pool_constant (x))))
3290 return 1;
3292 return 0;
3295 /* Return nonzero if VAL can be used as an offset in a Thumb-state address
3296 instruction of mode MODE. */
3298 thumb_legitimate_offset_p (enum machine_mode mode, HOST_WIDE_INT val)
3300 switch (GET_MODE_SIZE (mode))
3302 case 1:
3303 return val >= 0 && val < 32;
3305 case 2:
3306 return val >= 0 && val < 64 && (val & 1) == 0;
3308 default:
3309 return (val >= 0
3310 && (val + GET_MODE_SIZE (mode)) <= 128
3311 && (val & 3) == 0);
3315 /* Try machine-dependent ways of modifying an illegitimate address
3316 to be legitimate. If we find one, return the new, valid address. */
3318 arm_legitimize_address (rtx x, rtx orig_x, enum machine_mode mode)
3320 if (GET_CODE (x) == PLUS)
3322 rtx xop0 = XEXP (x, 0);
3323 rtx xop1 = XEXP (x, 1);
3325 if (CONSTANT_P (xop0) && !symbol_mentioned_p (xop0))
3326 xop0 = force_reg (SImode, xop0);
3328 if (CONSTANT_P (xop1) && !symbol_mentioned_p (xop1))
3329 xop1 = force_reg (SImode, xop1);
3331 if (ARM_BASE_REGISTER_RTX_P (xop0)
3332 && GET_CODE (xop1) == CONST_INT)
3334 HOST_WIDE_INT n, low_n;
3335 rtx base_reg, val;
3336 n = INTVAL (xop1);
3338 /* VFP addressing modes actually allow greater offsets, but for
3339 now we just stick with the lowest common denominator. */
3340 if (mode == DImode
3341 || ((TARGET_SOFT_FLOAT || TARGET_VFP) && mode == DFmode))
3343 low_n = n & 0x0f;
3344 n &= ~0x0f;
3345 if (low_n > 4)
3347 n += 16;
3348 low_n -= 16;
3351 else
3353 low_n = ((mode) == TImode ? 0
3354 : n >= 0 ? (n & 0xfff) : -((-n) & 0xfff));
3355 n -= low_n;
3358 base_reg = gen_reg_rtx (SImode);
3359 val = force_operand (gen_rtx_PLUS (SImode, xop0,
3360 GEN_INT (n)), NULL_RTX);
3361 emit_move_insn (base_reg, val);
3362 x = (low_n == 0 ? base_reg
3363 : gen_rtx_PLUS (SImode, base_reg, GEN_INT (low_n)));
3365 else if (xop0 != XEXP (x, 0) || xop1 != XEXP (x, 1))
3366 x = gen_rtx_PLUS (SImode, xop0, xop1);
3369 /* XXX We don't allow MINUS any more -- see comment in
3370 arm_legitimate_address_p (). */
3371 else if (GET_CODE (x) == MINUS)
3373 rtx xop0 = XEXP (x, 0);
3374 rtx xop1 = XEXP (x, 1);
3376 if (CONSTANT_P (xop0))
3377 xop0 = force_reg (SImode, xop0);
3379 if (CONSTANT_P (xop1) && ! symbol_mentioned_p (xop1))
3380 xop1 = force_reg (SImode, xop1);
3382 if (xop0 != XEXP (x, 0) || xop1 != XEXP (x, 1))
3383 x = gen_rtx_MINUS (SImode, xop0, xop1);
3386 if (flag_pic)
3388 /* We need to find and carefully transform any SYMBOL and LABEL
3389 references; so go back to the original address expression. */
3390 rtx new_x = legitimize_pic_address (orig_x, mode, NULL_RTX);
3392 if (new_x != orig_x)
3393 x = new_x;
3396 return x;
3400 /* Try machine-dependent ways of modifying an illegitimate Thumb address
3401 to be legitimate. If we find one, return the new, valid address. */
3403 thumb_legitimize_address (rtx x, rtx orig_x, enum machine_mode mode)
3405 if (GET_CODE (x) == PLUS
3406 && GET_CODE (XEXP (x, 1)) == CONST_INT
3407 && (INTVAL (XEXP (x, 1)) >= 32 * GET_MODE_SIZE (mode)
3408 || INTVAL (XEXP (x, 1)) < 0))
3410 rtx xop0 = XEXP (x, 0);
3411 rtx xop1 = XEXP (x, 1);
3412 HOST_WIDE_INT offset = INTVAL (xop1);
3414 /* Try and fold the offset into a biasing of the base register and
3415 then offsetting that. Don't do this when optimizing for space
3416 since it can cause too many CSEs. */
3417 if (optimize_size && offset >= 0
3418 && offset < 256 + 31 * GET_MODE_SIZE (mode))
3420 HOST_WIDE_INT delta;
3422 if (offset >= 256)
3423 delta = offset - (256 - GET_MODE_SIZE (mode));
3424 else if (offset < 32 * GET_MODE_SIZE (mode) + 8)
3425 delta = 31 * GET_MODE_SIZE (mode);
3426 else
3427 delta = offset & (~31 * GET_MODE_SIZE (mode));
3429 xop0 = force_operand (plus_constant (xop0, offset - delta),
3430 NULL_RTX);
3431 x = plus_constant (xop0, delta);
3433 else if (offset < 0 && offset > -256)
3434 /* Small negative offsets are best done with a subtract before the
3435 dereference, forcing these into a register normally takes two
3436 instructions. */
3437 x = force_operand (x, NULL_RTX);
3438 else
3440 /* For the remaining cases, force the constant into a register. */
3441 xop1 = force_reg (SImode, xop1);
3442 x = gen_rtx_PLUS (SImode, xop0, xop1);
3445 else if (GET_CODE (x) == PLUS
3446 && s_register_operand (XEXP (x, 1), SImode)
3447 && !s_register_operand (XEXP (x, 0), SImode))
3449 rtx xop0 = force_operand (XEXP (x, 0), NULL_RTX);
3451 x = gen_rtx_PLUS (SImode, xop0, XEXP (x, 1));
3454 if (flag_pic)
3456 /* We need to find and carefully transform any SYMBOL and LABEL
3457 references; so go back to the original address expression. */
3458 rtx new_x = legitimize_pic_address (orig_x, mode, NULL_RTX);
3460 if (new_x != orig_x)
3461 x = new_x;
3464 return x;
3469 #define REG_OR_SUBREG_REG(X) \
3470 (GET_CODE (X) == REG \
3471 || (GET_CODE (X) == SUBREG && GET_CODE (SUBREG_REG (X)) == REG))
3473 #define REG_OR_SUBREG_RTX(X) \
3474 (GET_CODE (X) == REG ? (X) : SUBREG_REG (X))
3476 #ifndef COSTS_N_INSNS
3477 #define COSTS_N_INSNS(N) ((N) * 4 - 2)
3478 #endif
3479 static inline int
3480 thumb_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer)
3482 enum machine_mode mode = GET_MODE (x);
3484 switch (code)
3486 case ASHIFT:
3487 case ASHIFTRT:
3488 case LSHIFTRT:
3489 case ROTATERT:
3490 case PLUS:
3491 case MINUS:
3492 case COMPARE:
3493 case NEG:
3494 case NOT:
3495 return COSTS_N_INSNS (1);
3497 case MULT:
3498 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
3500 int cycles = 0;
3501 unsigned HOST_WIDE_INT i = INTVAL (XEXP (x, 1));
3503 while (i)
3505 i >>= 2;
3506 cycles++;
3508 return COSTS_N_INSNS (2) + cycles;
3510 return COSTS_N_INSNS (1) + 16;
3512 case SET:
3513 return (COSTS_N_INSNS (1)
3514 + 4 * ((GET_CODE (SET_SRC (x)) == MEM)
3515 + GET_CODE (SET_DEST (x)) == MEM));
3517 case CONST_INT:
3518 if (outer == SET)
3520 if ((unsigned HOST_WIDE_INT) INTVAL (x) < 256)
3521 return 0;
3522 if (thumb_shiftable_const (INTVAL (x)))
3523 return COSTS_N_INSNS (2);
3524 return COSTS_N_INSNS (3);
3526 else if ((outer == PLUS || outer == COMPARE)
3527 && INTVAL (x) < 256 && INTVAL (x) > -256)
3528 return 0;
3529 else if (outer == AND
3530 && INTVAL (x) < 256 && INTVAL (x) >= -256)
3531 return COSTS_N_INSNS (1);
3532 else if (outer == ASHIFT || outer == ASHIFTRT
3533 || outer == LSHIFTRT)
3534 return 0;
3535 return COSTS_N_INSNS (2);
3537 case CONST:
3538 case CONST_DOUBLE:
3539 case LABEL_REF:
3540 case SYMBOL_REF:
3541 return COSTS_N_INSNS (3);
3543 case UDIV:
3544 case UMOD:
3545 case DIV:
3546 case MOD:
3547 return 100;
3549 case TRUNCATE:
3550 return 99;
3552 case AND:
3553 case XOR:
3554 case IOR:
3555 /* XXX guess. */
3556 return 8;
3558 case ADDRESSOF:
3559 case MEM:
3560 /* XXX another guess. */
3561 /* Memory costs quite a lot for the first word, but subsequent words
3562 load at the equivalent of a single insn each. */
3563 return (10 + 4 * ((GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD)
3564 + ((GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
3565 ? 4 : 0));
3567 case IF_THEN_ELSE:
3568 /* XXX a guess. */
3569 if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
3570 return 14;
3571 return 2;
3573 case ZERO_EXTEND:
3574 /* XXX still guessing. */
3575 switch (GET_MODE (XEXP (x, 0)))
3577 case QImode:
3578 return (1 + (mode == DImode ? 4 : 0)
3579 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3581 case HImode:
3582 return (4 + (mode == DImode ? 4 : 0)
3583 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3585 case SImode:
3586 return (1 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3588 default:
3589 return 99;
3592 default:
3593 return 99;
3598 /* Worker routine for arm_rtx_costs. */
3599 static inline int
3600 arm_rtx_costs_1 (rtx x, enum rtx_code code, enum rtx_code outer)
3602 enum machine_mode mode = GET_MODE (x);
3603 enum rtx_code subcode;
3604 int extra_cost;
3606 switch (code)
3608 case MEM:
3609 /* Memory costs quite a lot for the first word, but subsequent words
3610 load at the equivalent of a single insn each. */
3611 return (10 + 4 * ((GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD)
3612 + (GET_CODE (x) == SYMBOL_REF
3613 && CONSTANT_POOL_ADDRESS_P (x) ? 4 : 0));
3615 case DIV:
3616 case MOD:
3617 case UDIV:
3618 case UMOD:
3619 return optimize_size ? COSTS_N_INSNS (2) : 100;
3621 case ROTATE:
3622 if (mode == SImode && GET_CODE (XEXP (x, 1)) == REG)
3623 return 4;
3624 /* Fall through */
3625 case ROTATERT:
3626 if (mode != SImode)
3627 return 8;
3628 /* Fall through */
3629 case ASHIFT: case LSHIFTRT: case ASHIFTRT:
3630 if (mode == DImode)
3631 return (8 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : 8)
3632 + ((GET_CODE (XEXP (x, 0)) == REG
3633 || (GET_CODE (XEXP (x, 0)) == SUBREG
3634 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG))
3635 ? 0 : 8));
3636 return (1 + ((GET_CODE (XEXP (x, 0)) == REG
3637 || (GET_CODE (XEXP (x, 0)) == SUBREG
3638 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG))
3639 ? 0 : 4)
3640 + ((GET_CODE (XEXP (x, 1)) == REG
3641 || (GET_CODE (XEXP (x, 1)) == SUBREG
3642 && GET_CODE (SUBREG_REG (XEXP (x, 1))) == REG)
3643 || (GET_CODE (XEXP (x, 1)) == CONST_INT))
3644 ? 0 : 4));
3646 case MINUS:
3647 if (mode == DImode)
3648 return (4 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 8)
3649 + ((REG_OR_SUBREG_REG (XEXP (x, 0))
3650 || (GET_CODE (XEXP (x, 0)) == CONST_INT
3651 && const_ok_for_arm (INTVAL (XEXP (x, 0)))))
3652 ? 0 : 8));
3654 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
3655 return (2 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
3656 || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
3657 && arm_const_double_rtx (XEXP (x, 1))))
3658 ? 0 : 8)
3659 + ((REG_OR_SUBREG_REG (XEXP (x, 0))
3660 || (GET_CODE (XEXP (x, 0)) == CONST_DOUBLE
3661 && arm_const_double_rtx (XEXP (x, 0))))
3662 ? 0 : 8));
3664 if (((GET_CODE (XEXP (x, 0)) == CONST_INT
3665 && const_ok_for_arm (INTVAL (XEXP (x, 0)))
3666 && REG_OR_SUBREG_REG (XEXP (x, 1))))
3667 || (((subcode = GET_CODE (XEXP (x, 1))) == ASHIFT
3668 || subcode == ASHIFTRT || subcode == LSHIFTRT
3669 || subcode == ROTATE || subcode == ROTATERT
3670 || (subcode == MULT
3671 && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
3672 && ((INTVAL (XEXP (XEXP (x, 1), 1)) &
3673 (INTVAL (XEXP (XEXP (x, 1), 1)) - 1)) == 0)))
3674 && REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 0))
3675 && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 1))
3676 || GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT)
3677 && REG_OR_SUBREG_REG (XEXP (x, 0))))
3678 return 1;
3679 /* Fall through */
3681 case PLUS:
3682 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
3683 return (2 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8)
3684 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
3685 || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
3686 && arm_const_double_rtx (XEXP (x, 1))))
3687 ? 0 : 8));
3689 /* Fall through */
3690 case AND: case XOR: case IOR:
3691 extra_cost = 0;
3693 /* Normally the frame registers will be spilt into reg+const during
3694 reload, so it is a bad idea to combine them with other instructions,
3695 since then they might not be moved outside of loops. As a compromise
3696 we allow integration with ops that have a constant as their second
3697 operand. */
3698 if ((REG_OR_SUBREG_REG (XEXP (x, 0))
3699 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0)))
3700 && GET_CODE (XEXP (x, 1)) != CONST_INT)
3701 || (REG_OR_SUBREG_REG (XEXP (x, 0))
3702 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0)))))
3703 extra_cost = 4;
3705 if (mode == DImode)
3706 return (4 + extra_cost + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8)
3707 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
3708 || (GET_CODE (XEXP (x, 1)) == CONST_INT
3709 && const_ok_for_op (INTVAL (XEXP (x, 1)), code)))
3710 ? 0 : 8));
3712 if (REG_OR_SUBREG_REG (XEXP (x, 0)))
3713 return (1 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : extra_cost)
3714 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
3715 || (GET_CODE (XEXP (x, 1)) == CONST_INT
3716 && const_ok_for_op (INTVAL (XEXP (x, 1)), code)))
3717 ? 0 : 4));
3719 else if (REG_OR_SUBREG_REG (XEXP (x, 1)))
3720 return (1 + extra_cost
3721 + ((((subcode = GET_CODE (XEXP (x, 0))) == ASHIFT
3722 || subcode == LSHIFTRT || subcode == ASHIFTRT
3723 || subcode == ROTATE || subcode == ROTATERT
3724 || (subcode == MULT
3725 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
3726 && ((INTVAL (XEXP (XEXP (x, 0), 1)) &
3727 (INTVAL (XEXP (XEXP (x, 0), 1)) - 1)) == 0)))
3728 && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 0)))
3729 && ((REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 1)))
3730 || GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT))
3731 ? 0 : 4));
3733 return 8;
3735 case MULT:
3736 /* This should have been handled by the CPU specific routines. */
3737 abort ();
3739 case TRUNCATE:
3740 if (arm_arch3m && mode == SImode
3741 && GET_CODE (XEXP (x, 0)) == LSHIFTRT
3742 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
3743 && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0))
3744 == GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)))
3745 && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == ZERO_EXTEND
3746 || GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == SIGN_EXTEND))
3747 return 8;
3748 return 99;
3750 case NEG:
3751 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
3752 return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 6);
3753 /* Fall through */
3754 case NOT:
3755 if (mode == DImode)
3756 return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4);
3758 return 1 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4);
3760 case IF_THEN_ELSE:
3761 if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
3762 return 14;
3763 return 2;
3765 case COMPARE:
3766 return 1;
3768 case ABS:
3769 return 4 + (mode == DImode ? 4 : 0);
3771 case SIGN_EXTEND:
3772 if (GET_MODE (XEXP (x, 0)) == QImode)
3773 return (4 + (mode == DImode ? 4 : 0)
3774 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3775 /* Fall through */
3776 case ZERO_EXTEND:
3777 switch (GET_MODE (XEXP (x, 0)))
3779 case QImode:
3780 return (1 + (mode == DImode ? 4 : 0)
3781 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3783 case HImode:
3784 return (4 + (mode == DImode ? 4 : 0)
3785 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3787 case SImode:
3788 return (1 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3790 case V8QImode:
3791 case V4HImode:
3792 case V2SImode:
3793 case V4QImode:
3794 case V2HImode:
3795 return 1;
3797 default:
3798 break;
3800 abort ();
3802 case CONST_INT:
3803 if (const_ok_for_arm (INTVAL (x)))
3804 return outer == SET ? 2 : -1;
3805 else if (outer == AND
3806 && const_ok_for_arm (~INTVAL (x)))
3807 return -1;
3808 else if ((outer == COMPARE
3809 || outer == PLUS || outer == MINUS)
3810 && const_ok_for_arm (-INTVAL (x)))
3811 return -1;
3812 else
3813 return 5;
3815 case CONST:
3816 case LABEL_REF:
3817 case SYMBOL_REF:
3818 return 6;
3820 case CONST_DOUBLE:
3821 if (arm_const_double_rtx (x))
3822 return outer == SET ? 2 : -1;
3823 else if ((outer == COMPARE || outer == PLUS)
3824 && neg_const_double_rtx_ok_for_fpa (x))
3825 return -1;
3826 return 7;
3828 default:
3829 return 99;
3833 /* RTX costs for cores with a slow MUL implementation. */
3835 static bool
3836 arm_slowmul_rtx_costs (rtx x, int code, int outer_code, int *total)
3838 enum machine_mode mode = GET_MODE (x);
3840 if (TARGET_THUMB)
3842 *total = thumb_rtx_costs (x, code, outer_code);
3843 return true;
3846 switch (code)
3848 case MULT:
3849 if (GET_MODE_CLASS (mode) == MODE_FLOAT
3850 || mode == DImode)
3852 *total = 30;
3853 return true;
3856 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
3858 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
3859 & (unsigned HOST_WIDE_INT) 0xffffffff);
3860 int cost, const_ok = const_ok_for_arm (i);
3861 int j, booth_unit_size;
3863 /* Tune as appropriate. */
3864 cost = const_ok ? 4 : 8;
3865 booth_unit_size = 2;
3866 for (j = 0; i && j < 32; j += booth_unit_size)
3868 i >>= booth_unit_size;
3869 cost += 2;
3872 *total = cost;
3873 return true;
3876 *total = 30 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
3877 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
3878 return true;
3880 default:
3881 *total = arm_rtx_costs_1 (x, code, outer_code);
3882 return true;
3887 /* RTX cost for cores with a fast multiply unit (M variants). */
3889 static bool
3890 arm_fastmul_rtx_costs (rtx x, int code, int outer_code, int *total)
3892 enum machine_mode mode = GET_MODE (x);
3894 if (TARGET_THUMB)
3896 *total = thumb_rtx_costs (x, code, outer_code);
3897 return true;
3900 switch (code)
3902 case MULT:
3903 /* There is no point basing this on the tuning, since it is always the
3904 fast variant if it exists at all. */
3905 if (mode == DImode
3906 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
3907 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
3908 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
3910 *total = 8;
3911 return true;
3915 if (GET_MODE_CLASS (mode) == MODE_FLOAT
3916 || mode == DImode)
3918 *total = 30;
3919 return true;
3922 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
3924 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
3925 & (unsigned HOST_WIDE_INT) 0xffffffff);
3926 int cost, const_ok = const_ok_for_arm (i);
3927 int j, booth_unit_size;
3929 /* Tune as appropriate. */
3930 cost = const_ok ? 4 : 8;
3931 booth_unit_size = 8;
3932 for (j = 0; i && j < 32; j += booth_unit_size)
3934 i >>= booth_unit_size;
3935 cost += 2;
3938 *total = cost;
3939 return true;
3942 *total = 8 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
3943 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
3944 return true;
3946 default:
3947 *total = arm_rtx_costs_1 (x, code, outer_code);
3948 return true;
3953 /* RTX cost for XScale CPUs. */
3955 static bool
3956 arm_xscale_rtx_costs (rtx x, int code, int outer_code, int *total)
3958 enum machine_mode mode = GET_MODE (x);
3960 if (TARGET_THUMB)
3962 *total = thumb_rtx_costs (x, code, outer_code);
3963 return true;
3966 switch (code)
3968 case MULT:
3969 /* There is no point basing this on the tuning, since it is always the
3970 fast variant if it exists at all. */
3971 if (mode == DImode
3972 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
3973 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
3974 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
3976 *total = 8;
3977 return true;
3981 if (GET_MODE_CLASS (mode) == MODE_FLOAT
3982 || mode == DImode)
3984 *total = 30;
3985 return true;
3988 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
3990 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
3991 & (unsigned HOST_WIDE_INT) 0xffffffff);
3992 int cost, const_ok = const_ok_for_arm (i);
3993 unsigned HOST_WIDE_INT masked_const;
3995 /* The cost will be related to two insns.
3996 First a load of the constant (MOV or LDR), then a multiply. */
3997 cost = 2;
3998 if (! const_ok)
3999 cost += 1; /* LDR is probably more expensive because
4000 of longer result latency. */
4001 masked_const = i & 0xffff8000;
4002 if (masked_const != 0 && masked_const != 0xffff8000)
4004 masked_const = i & 0xf8000000;
4005 if (masked_const == 0 || masked_const == 0xf8000000)
4006 cost += 1;
4007 else
4008 cost += 2;
4010 *total = cost;
4011 return true;
4014 *total = 8 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
4015 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
4016 return true;
4018 default:
4019 *total = arm_rtx_costs_1 (x, code, outer_code);
4020 return true;
4025 /* RTX costs for 9e (and later) cores. */
4027 static bool
4028 arm_9e_rtx_costs (rtx x, int code, int outer_code, int *total)
4030 enum machine_mode mode = GET_MODE (x);
4031 int nonreg_cost;
4032 int cost;
4034 if (TARGET_THUMB)
4036 switch (code)
4038 case MULT:
4039 *total = COSTS_N_INSNS (3);
4040 return true;
4042 default:
4043 *total = thumb_rtx_costs (x, code, outer_code);
4044 return true;
4048 switch (code)
4050 case MULT:
4051 /* There is no point basing this on the tuning, since it is always the
4052 fast variant if it exists at all. */
4053 if (mode == DImode
4054 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
4055 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
4056 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
4058 *total = 3;
4059 return true;
4063 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
4065 *total = 30;
4066 return true;
4068 if (mode == DImode)
4070 cost = 7;
4071 nonreg_cost = 8;
4073 else
4075 cost = 2;
4076 nonreg_cost = 4;
4080 *total = cost + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : nonreg_cost)
4081 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : nonreg_cost);
4082 return true;
4084 default:
4085 *total = arm_rtx_costs_1 (x, code, outer_code);
4086 return true;
4089 /* All address computations that can be done are free, but rtx cost returns
4090 the same for practically all of them. So we weight the different types
4091 of address here in the order (most pref first):
4092 PRE/POST_INC/DEC, SHIFT or NON-INT sum, INT sum, REG, MEM or LABEL. */
4093 static inline int
4094 arm_arm_address_cost (rtx x)
4096 enum rtx_code c = GET_CODE (x);
4098 if (c == PRE_INC || c == PRE_DEC || c == POST_INC || c == POST_DEC)
4099 return 0;
4100 if (c == MEM || c == LABEL_REF || c == SYMBOL_REF)
4101 return 10;
4103 if (c == PLUS || c == MINUS)
4105 if (GET_CODE (XEXP (x, 0)) == CONST_INT)
4106 return 2;
4108 if (ARITHMETIC_P (XEXP (x, 0)) || ARITHMETIC_P (XEXP (x, 1)))
4109 return 3;
4111 return 4;
4114 return 6;
4117 static inline int
4118 arm_thumb_address_cost (rtx x)
4120 enum rtx_code c = GET_CODE (x);
4122 if (c == REG)
4123 return 1;
4124 if (c == PLUS
4125 && GET_CODE (XEXP (x, 0)) == REG
4126 && GET_CODE (XEXP (x, 1)) == CONST_INT)
4127 return 1;
4129 return 2;
4132 static int
4133 arm_address_cost (rtx x)
4135 return TARGET_ARM ? arm_arm_address_cost (x) : arm_thumb_address_cost (x);
4138 static int
4139 arm_use_dfa_pipeline_interface (void)
4141 return true;
4144 static int
4145 arm_adjust_cost (rtx insn, rtx link, rtx dep, int cost)
4147 rtx i_pat, d_pat;
4149 /* Some true dependencies can have a higher cost depending
4150 on precisely how certain input operands are used. */
4151 if (arm_tune_xscale
4152 && REG_NOTE_KIND (link) == 0
4153 && recog_memoized (insn) >= 0
4154 && recog_memoized (dep) >= 0)
4156 int shift_opnum = get_attr_shift (insn);
4157 enum attr_type attr_type = get_attr_type (dep);
4159 /* If nonzero, SHIFT_OPNUM contains the operand number of a shifted
4160 operand for INSN. If we have a shifted input operand and the
4161 instruction we depend on is another ALU instruction, then we may
4162 have to account for an additional stall. */
4163 if (shift_opnum != 0
4164 && (attr_type == TYPE_ALU_SHIFT || attr_type == TYPE_ALU_SHIFT_REG))
4166 rtx shifted_operand;
4167 int opno;
4169 /* Get the shifted operand. */
4170 extract_insn (insn);
4171 shifted_operand = recog_data.operand[shift_opnum];
4173 /* Iterate over all the operands in DEP. If we write an operand
4174 that overlaps with SHIFTED_OPERAND, then we have increase the
4175 cost of this dependency. */
4176 extract_insn (dep);
4177 preprocess_constraints ();
4178 for (opno = 0; opno < recog_data.n_operands; opno++)
4180 /* We can ignore strict inputs. */
4181 if (recog_data.operand_type[opno] == OP_IN)
4182 continue;
4184 if (reg_overlap_mentioned_p (recog_data.operand[opno],
4185 shifted_operand))
4186 return 2;
4191 /* XXX This is not strictly true for the FPA. */
4192 if (REG_NOTE_KIND (link) == REG_DEP_ANTI
4193 || REG_NOTE_KIND (link) == REG_DEP_OUTPUT)
4194 return 0;
4196 /* Call insns don't incur a stall, even if they follow a load. */
4197 if (REG_NOTE_KIND (link) == 0
4198 && GET_CODE (insn) == CALL_INSN)
4199 return 1;
4201 if ((i_pat = single_set (insn)) != NULL
4202 && GET_CODE (SET_SRC (i_pat)) == MEM
4203 && (d_pat = single_set (dep)) != NULL
4204 && GET_CODE (SET_DEST (d_pat)) == MEM)
4206 rtx src_mem = XEXP (SET_SRC (i_pat), 0);
4207 /* This is a load after a store, there is no conflict if the load reads
4208 from a cached area. Assume that loads from the stack, and from the
4209 constant pool are cached, and that others will miss. This is a
4210 hack. */
4212 if ((GET_CODE (src_mem) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (src_mem))
4213 || reg_mentioned_p (stack_pointer_rtx, src_mem)
4214 || reg_mentioned_p (frame_pointer_rtx, src_mem)
4215 || reg_mentioned_p (hard_frame_pointer_rtx, src_mem))
4216 return 1;
4219 return cost;
4222 static int fp_consts_inited = 0;
4224 /* Only zero is valid for VFP. Other values are also valid for FPA. */
4225 static const char * const strings_fp[8] =
4227 "0", "1", "2", "3",
4228 "4", "5", "0.5", "10"
4231 static REAL_VALUE_TYPE values_fp[8];
4233 static void
4234 init_fp_table (void)
4236 int i;
4237 REAL_VALUE_TYPE r;
4239 if (TARGET_VFP)
4240 fp_consts_inited = 1;
4241 else
4242 fp_consts_inited = 8;
4244 for (i = 0; i < fp_consts_inited; i++)
4246 r = REAL_VALUE_ATOF (strings_fp[i], DFmode);
4247 values_fp[i] = r;
4251 /* Return TRUE if rtx X is a valid immediate FP constant. */
4253 arm_const_double_rtx (rtx x)
4255 REAL_VALUE_TYPE r;
4256 int i;
4258 if (!fp_consts_inited)
4259 init_fp_table ();
4261 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
4262 if (REAL_VALUE_MINUS_ZERO (r))
4263 return 0;
4265 for (i = 0; i < fp_consts_inited; i++)
4266 if (REAL_VALUES_EQUAL (r, values_fp[i]))
4267 return 1;
4269 return 0;
4272 /* Return TRUE if rtx X is a valid immediate FPA constant. */
4274 neg_const_double_rtx_ok_for_fpa (rtx x)
4276 REAL_VALUE_TYPE r;
4277 int i;
4279 if (!fp_consts_inited)
4280 init_fp_table ();
4282 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
4283 r = REAL_VALUE_NEGATE (r);
4284 if (REAL_VALUE_MINUS_ZERO (r))
4285 return 0;
4287 for (i = 0; i < 8; i++)
4288 if (REAL_VALUES_EQUAL (r, values_fp[i]))
4289 return 1;
4291 return 0;
4294 /* Predicates for `match_operand' and `match_operator'. */
4296 /* s_register_operand is the same as register_operand, but it doesn't accept
4297 (SUBREG (MEM)...).
4299 This function exists because at the time it was put in it led to better
4300 code. SUBREG(MEM) always needs a reload in the places where
4301 s_register_operand is used, and this seemed to lead to excessive
4302 reloading. */
4304 s_register_operand (rtx op, enum machine_mode mode)
4306 if (GET_MODE (op) != mode && mode != VOIDmode)
4307 return 0;
4309 if (GET_CODE (op) == SUBREG)
4310 op = SUBREG_REG (op);
4312 /* We don't consider registers whose class is NO_REGS
4313 to be a register operand. */
4314 /* XXX might have to check for lo regs only for thumb ??? */
4315 return (GET_CODE (op) == REG
4316 && (REGNO (op) >= FIRST_PSEUDO_REGISTER
4317 || REGNO_REG_CLASS (REGNO (op)) != NO_REGS));
4320 /* A hard register operand (even before reload. */
4322 arm_hard_register_operand (rtx op, enum machine_mode mode)
4324 if (GET_MODE (op) != mode && mode != VOIDmode)
4325 return 0;
4327 return (GET_CODE (op) == REG
4328 && REGNO (op) < FIRST_PSEUDO_REGISTER);
4331 /* An arm register operand. */
4333 arm_general_register_operand (rtx op, enum machine_mode mode)
4335 if (GET_MODE (op) != mode && mode != VOIDmode)
4336 return 0;
4338 if (GET_CODE (op) == SUBREG)
4339 op = SUBREG_REG (op);
4341 return (GET_CODE (op) == REG
4342 && (REGNO (op) <= LAST_ARM_REGNUM
4343 || REGNO (op) >= FIRST_PSEUDO_REGISTER));
4346 /* Only accept reg, subreg(reg), const_int. */
4348 reg_or_int_operand (rtx op, enum machine_mode mode)
4350 if (GET_CODE (op) == CONST_INT)
4351 return 1;
4353 if (GET_MODE (op) != mode && mode != VOIDmode)
4354 return 0;
4356 if (GET_CODE (op) == SUBREG)
4357 op = SUBREG_REG (op);
4359 /* We don't consider registers whose class is NO_REGS
4360 to be a register operand. */
4361 return (GET_CODE (op) == REG
4362 && (REGNO (op) >= FIRST_PSEUDO_REGISTER
4363 || REGNO_REG_CLASS (REGNO (op)) != NO_REGS));
4366 /* Return 1 if OP is an item in memory, given that we are in reload. */
4368 arm_reload_memory_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
4370 int regno = true_regnum (op);
4372 return (!CONSTANT_P (op)
4373 && (regno == -1
4374 || (GET_CODE (op) == REG
4375 && REGNO (op) >= FIRST_PSEUDO_REGISTER)));
4378 /* Return TRUE for valid operands for the rhs of an ARM instruction. */
4380 arm_rhs_operand (rtx op, enum machine_mode mode)
4382 return (s_register_operand (op, mode)
4383 || (GET_CODE (op) == CONST_INT && const_ok_for_arm (INTVAL (op))));
4386 /* Return TRUE for valid operands for the
4387 rhs of an ARM instruction, or a load. */
4389 arm_rhsm_operand (rtx op, enum machine_mode mode)
4391 return (s_register_operand (op, mode)
4392 || (GET_CODE (op) == CONST_INT && const_ok_for_arm (INTVAL (op)))
4393 || memory_operand (op, mode));
4396 /* Return TRUE for valid operands for the rhs of an ARM instruction, or if a
4397 constant that is valid when negated. */
4399 arm_add_operand (rtx op, enum machine_mode mode)
4401 if (TARGET_THUMB)
4402 return thumb_cmp_operand (op, mode);
4404 return (s_register_operand (op, mode)
4405 || (GET_CODE (op) == CONST_INT
4406 && (const_ok_for_arm (INTVAL (op))
4407 || const_ok_for_arm (-INTVAL (op)))));
4410 /* Return TRUE for valid ARM constants (or when valid if negated). */
4412 arm_addimm_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
4414 return (GET_CODE (op) == CONST_INT
4415 && (const_ok_for_arm (INTVAL (op))
4416 || const_ok_for_arm (-INTVAL (op))));
4420 arm_not_operand (rtx op, enum machine_mode mode)
4422 return (s_register_operand (op, mode)
4423 || (GET_CODE (op) == CONST_INT
4424 && (const_ok_for_arm (INTVAL (op))
4425 || const_ok_for_arm (~INTVAL (op)))));
4428 /* Return TRUE if the operand is a memory reference which contains an
4429 offsettable address. */
4431 offsettable_memory_operand (rtx op, enum machine_mode mode)
4433 if (mode == VOIDmode)
4434 mode = GET_MODE (op);
4436 return (mode == GET_MODE (op)
4437 && GET_CODE (op) == MEM
4438 && offsettable_address_p (reload_completed | reload_in_progress,
4439 mode, XEXP (op, 0)));
4442 /* Return TRUE if the operand is a memory reference which is, or can be
4443 made word aligned by adjusting the offset. */
4445 alignable_memory_operand (rtx op, enum machine_mode mode)
4447 rtx reg;
4449 if (mode == VOIDmode)
4450 mode = GET_MODE (op);
4452 if (mode != GET_MODE (op) || GET_CODE (op) != MEM)
4453 return 0;
4455 op = XEXP (op, 0);
4457 return ((GET_CODE (reg = op) == REG
4458 || (GET_CODE (op) == SUBREG
4459 && GET_CODE (reg = SUBREG_REG (op)) == REG)
4460 || (GET_CODE (op) == PLUS
4461 && GET_CODE (XEXP (op, 1)) == CONST_INT
4462 && (GET_CODE (reg = XEXP (op, 0)) == REG
4463 || (GET_CODE (XEXP (op, 0)) == SUBREG
4464 && GET_CODE (reg = SUBREG_REG (XEXP (op, 0))) == REG))))
4465 && REGNO_POINTER_ALIGN (REGNO (reg)) >= 32);
4468 /* Similar to s_register_operand, but does not allow hard integer
4469 registers. */
4471 f_register_operand (rtx op, enum machine_mode mode)
4473 if (GET_MODE (op) != mode && mode != VOIDmode)
4474 return 0;
4476 if (GET_CODE (op) == SUBREG)
4477 op = SUBREG_REG (op);
4479 /* We don't consider registers whose class is NO_REGS
4480 to be a register operand. */
4481 return (GET_CODE (op) == REG
4482 && (REGNO (op) >= FIRST_PSEUDO_REGISTER
4483 || REGNO_REG_CLASS (REGNO (op)) == FPA_REGS));
4486 /* Return TRUE for valid operands for the rhs of an floating point insns.
4487 Allows regs or certain consts on FPA, just regs for everything else. */
4489 arm_float_rhs_operand (rtx op, enum machine_mode mode)
4491 if (s_register_operand (op, mode))
4492 return TRUE;
4494 if (GET_MODE (op) != mode && mode != VOIDmode)
4495 return FALSE;
4497 if (TARGET_FPA && GET_CODE (op) == CONST_DOUBLE)
4498 return arm_const_double_rtx (op);
4500 return FALSE;
4504 arm_float_add_operand (rtx op, enum machine_mode mode)
4506 if (s_register_operand (op, mode))
4507 return TRUE;
4509 if (GET_MODE (op) != mode && mode != VOIDmode)
4510 return FALSE;
4512 if (TARGET_FPA && GET_CODE (op) == CONST_DOUBLE)
4513 return (arm_const_double_rtx (op)
4514 || neg_const_double_rtx_ok_for_fpa (op));
4516 return FALSE;
4520 /* Return TRUE if OP is suitable for the rhs of a floating point comparison.
4521 Depends which fpu we are targeting. */
4524 arm_float_compare_operand (rtx op, enum machine_mode mode)
4526 if (TARGET_VFP)
4527 return vfp_compare_operand (op, mode);
4528 else
4529 return arm_float_rhs_operand (op, mode);
4533 /* Return nonzero if OP is a valid Cirrus memory address pattern. */
4535 cirrus_memory_offset (rtx op)
4537 /* Reject eliminable registers. */
4538 if (! (reload_in_progress || reload_completed)
4539 && ( reg_mentioned_p (frame_pointer_rtx, op)
4540 || reg_mentioned_p (arg_pointer_rtx, op)
4541 || reg_mentioned_p (virtual_incoming_args_rtx, op)
4542 || reg_mentioned_p (virtual_outgoing_args_rtx, op)
4543 || reg_mentioned_p (virtual_stack_dynamic_rtx, op)
4544 || reg_mentioned_p (virtual_stack_vars_rtx, op)))
4545 return 0;
4547 if (GET_CODE (op) == MEM)
4549 rtx ind;
4551 ind = XEXP (op, 0);
4553 /* Match: (mem (reg)). */
4554 if (GET_CODE (ind) == REG)
4555 return 1;
4557 /* Match:
4558 (mem (plus (reg)
4559 (const))). */
4560 if (GET_CODE (ind) == PLUS
4561 && GET_CODE (XEXP (ind, 0)) == REG
4562 && REG_MODE_OK_FOR_BASE_P (XEXP (ind, 0), VOIDmode)
4563 && GET_CODE (XEXP (ind, 1)) == CONST_INT)
4564 return 1;
4567 return 0;
4571 arm_extendqisi_mem_op (rtx op, enum machine_mode mode)
4573 if (!memory_operand (op, mode))
4574 return 0;
4576 return arm_legitimate_address_p (mode, XEXP (op, 0), SIGN_EXTEND, 0);
4579 /* Return nonzero if OP is a Cirrus or general register. */
4581 cirrus_register_operand (rtx op, enum machine_mode mode)
4583 if (GET_MODE (op) != mode && mode != VOIDmode)
4584 return FALSE;
4586 if (GET_CODE (op) == SUBREG)
4587 op = SUBREG_REG (op);
4589 return (GET_CODE (op) == REG
4590 && (REGNO_REG_CLASS (REGNO (op)) == CIRRUS_REGS
4591 || REGNO_REG_CLASS (REGNO (op)) == GENERAL_REGS));
4594 /* Return nonzero if OP is a cirrus FP register. */
4596 cirrus_fp_register (rtx op, enum machine_mode mode)
4598 if (GET_MODE (op) != mode && mode != VOIDmode)
4599 return FALSE;
4601 if (GET_CODE (op) == SUBREG)
4602 op = SUBREG_REG (op);
4604 return (GET_CODE (op) == REG
4605 && (REGNO (op) >= FIRST_PSEUDO_REGISTER
4606 || REGNO_REG_CLASS (REGNO (op)) == CIRRUS_REGS));
4609 /* Return nonzero if OP is a 6bit constant (0..63). */
4611 cirrus_shift_const (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
4613 return (GET_CODE (op) == CONST_INT
4614 && INTVAL (op) >= 0
4615 && INTVAL (op) < 64);
4619 /* Return TRUE if OP is a valid VFP memory address pattern. */
4620 /* Copied from cirrus_memory_offset but with restricted offset range. */
4623 vfp_mem_operand (rtx op)
4625 /* Reject eliminable registers. */
4627 if (! (reload_in_progress || reload_completed)
4628 && ( reg_mentioned_p (frame_pointer_rtx, op)
4629 || reg_mentioned_p (arg_pointer_rtx, op)
4630 || reg_mentioned_p (virtual_incoming_args_rtx, op)
4631 || reg_mentioned_p (virtual_outgoing_args_rtx, op)
4632 || reg_mentioned_p (virtual_stack_dynamic_rtx, op)
4633 || reg_mentioned_p (virtual_stack_vars_rtx, op)))
4634 return FALSE;
4636 /* Constants are converted into offsets from labels. */
4637 if (GET_CODE (op) == MEM)
4639 rtx ind;
4641 ind = XEXP (op, 0);
4643 if (reload_completed
4644 && (GET_CODE (ind) == LABEL_REF
4645 || (GET_CODE (ind) == CONST
4646 && GET_CODE (XEXP (ind, 0)) == PLUS
4647 && GET_CODE (XEXP (XEXP (ind, 0), 0)) == LABEL_REF
4648 && GET_CODE (XEXP (XEXP (ind, 0), 1)) == CONST_INT)))
4649 return TRUE;
4651 /* Match: (mem (reg)). */
4652 if (GET_CODE (ind) == REG)
4653 return arm_address_register_rtx_p (ind, 0);
4655 /* Match:
4656 (mem (plus (reg)
4657 (const))). */
4658 if (GET_CODE (ind) == PLUS
4659 && GET_CODE (XEXP (ind, 0)) == REG
4660 && REG_MODE_OK_FOR_BASE_P (XEXP (ind, 0), VOIDmode)
4661 && GET_CODE (XEXP (ind, 1)) == CONST_INT
4662 && INTVAL (XEXP (ind, 1)) > -1024
4663 && INTVAL (XEXP (ind, 1)) < 1024)
4664 return TRUE;
4667 return FALSE;
4671 /* Return TRUE if OP is a REG or constant zero. */
4673 vfp_compare_operand (rtx op, enum machine_mode mode)
4675 if (s_register_operand (op, mode))
4676 return TRUE;
4678 return (GET_CODE (op) == CONST_DOUBLE
4679 && arm_const_double_rtx (op));
4683 /* Return GENERAL_REGS if a scratch register required to reload x to/from
4684 VFP registers. Otherwise return NO_REGS. */
4686 enum reg_class
4687 vfp_secondary_reload_class (enum machine_mode mode, rtx x)
4689 if (vfp_mem_operand (x) || s_register_operand (x, mode))
4690 return NO_REGS;
4692 return GENERAL_REGS;
4696 /* Returns TRUE if INSN is an "LDR REG, ADDR" instruction.
4697 Use by the Cirrus Maverick code which has to workaround
4698 a hardware bug triggered by such instructions. */
4699 static bool
4700 arm_memory_load_p (rtx insn)
4702 rtx body, lhs, rhs;;
4704 if (insn == NULL_RTX || GET_CODE (insn) != INSN)
4705 return false;
4707 body = PATTERN (insn);
4709 if (GET_CODE (body) != SET)
4710 return false;
4712 lhs = XEXP (body, 0);
4713 rhs = XEXP (body, 1);
4715 lhs = REG_OR_SUBREG_RTX (lhs);
4717 /* If the destination is not a general purpose
4718 register we do not have to worry. */
4719 if (GET_CODE (lhs) != REG
4720 || REGNO_REG_CLASS (REGNO (lhs)) != GENERAL_REGS)
4721 return false;
4723 /* As well as loads from memory we also have to react
4724 to loads of invalid constants which will be turned
4725 into loads from the minipool. */
4726 return (GET_CODE (rhs) == MEM
4727 || GET_CODE (rhs) == SYMBOL_REF
4728 || note_invalid_constants (insn, -1, false));
4731 /* Return TRUE if INSN is a Cirrus instruction. */
4732 static bool
4733 arm_cirrus_insn_p (rtx insn)
4735 enum attr_cirrus attr;
4737 /* get_attr aborts on USE and CLOBBER. */
4738 if (!insn
4739 || GET_CODE (insn) != INSN
4740 || GET_CODE (PATTERN (insn)) == USE
4741 || GET_CODE (PATTERN (insn)) == CLOBBER)
4742 return 0;
4744 attr = get_attr_cirrus (insn);
4746 return attr != CIRRUS_NOT;
4749 /* Cirrus reorg for invalid instruction combinations. */
4750 static void
4751 cirrus_reorg (rtx first)
4753 enum attr_cirrus attr;
4754 rtx body = PATTERN (first);
4755 rtx t;
4756 int nops;
4758 /* Any branch must be followed by 2 non Cirrus instructions. */
4759 if (GET_CODE (first) == JUMP_INSN && GET_CODE (body) != RETURN)
4761 nops = 0;
4762 t = next_nonnote_insn (first);
4764 if (arm_cirrus_insn_p (t))
4765 ++ nops;
4767 if (arm_cirrus_insn_p (next_nonnote_insn (t)))
4768 ++ nops;
4770 while (nops --)
4771 emit_insn_after (gen_nop (), first);
4773 return;
4776 /* (float (blah)) is in parallel with a clobber. */
4777 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) > 0)
4778 body = XVECEXP (body, 0, 0);
4780 if (GET_CODE (body) == SET)
4782 rtx lhs = XEXP (body, 0), rhs = XEXP (body, 1);
4784 /* cfldrd, cfldr64, cfstrd, cfstr64 must
4785 be followed by a non Cirrus insn. */
4786 if (get_attr_cirrus (first) == CIRRUS_DOUBLE)
4788 if (arm_cirrus_insn_p (next_nonnote_insn (first)))
4789 emit_insn_after (gen_nop (), first);
4791 return;
4793 else if (arm_memory_load_p (first))
4795 unsigned int arm_regno;
4797 /* Any ldr/cfmvdlr, ldr/cfmvdhr, ldr/cfmvsr, ldr/cfmv64lr,
4798 ldr/cfmv64hr combination where the Rd field is the same
4799 in both instructions must be split with a non Cirrus
4800 insn. Example:
4802 ldr r0, blah
4804 cfmvsr mvf0, r0. */
4806 /* Get Arm register number for ldr insn. */
4807 if (GET_CODE (lhs) == REG)
4808 arm_regno = REGNO (lhs);
4809 else if (GET_CODE (rhs) == REG)
4810 arm_regno = REGNO (rhs);
4811 else
4812 abort ();
4814 /* Next insn. */
4815 first = next_nonnote_insn (first);
4817 if (! arm_cirrus_insn_p (first))
4818 return;
4820 body = PATTERN (first);
4822 /* (float (blah)) is in parallel with a clobber. */
4823 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0))
4824 body = XVECEXP (body, 0, 0);
4826 if (GET_CODE (body) == FLOAT)
4827 body = XEXP (body, 0);
4829 if (get_attr_cirrus (first) == CIRRUS_MOVE
4830 && GET_CODE (XEXP (body, 1)) == REG
4831 && arm_regno == REGNO (XEXP (body, 1)))
4832 emit_insn_after (gen_nop (), first);
4834 return;
4838 /* get_attr aborts on USE and CLOBBER. */
4839 if (!first
4840 || GET_CODE (first) != INSN
4841 || GET_CODE (PATTERN (first)) == USE
4842 || GET_CODE (PATTERN (first)) == CLOBBER)
4843 return;
4845 attr = get_attr_cirrus (first);
4847 /* Any coprocessor compare instruction (cfcmps, cfcmpd, ...)
4848 must be followed by a non-coprocessor instruction. */
4849 if (attr == CIRRUS_COMPARE)
4851 nops = 0;
4853 t = next_nonnote_insn (first);
4855 if (arm_cirrus_insn_p (t))
4856 ++ nops;
4858 if (arm_cirrus_insn_p (next_nonnote_insn (t)))
4859 ++ nops;
4861 while (nops --)
4862 emit_insn_after (gen_nop (), first);
4864 return;
4868 /* Return nonzero if OP is a constant power of two. */
4870 power_of_two_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
4872 if (GET_CODE (op) == CONST_INT)
4874 HOST_WIDE_INT value = INTVAL (op);
4876 return value != 0 && (value & (value - 1)) == 0;
4879 return FALSE;
4882 /* Return TRUE for a valid operand of a DImode operation.
4883 Either: REG, SUBREG, CONST_DOUBLE or MEM(DImode_address).
4884 Note that this disallows MEM(REG+REG), but allows
4885 MEM(PRE/POST_INC/DEC(REG)). */
4887 di_operand (rtx op, enum machine_mode mode)
4889 if (s_register_operand (op, mode))
4890 return TRUE;
4892 if (mode != VOIDmode && GET_MODE (op) != VOIDmode && GET_MODE (op) != DImode)
4893 return FALSE;
4895 if (GET_CODE (op) == SUBREG)
4896 op = SUBREG_REG (op);
4898 switch (GET_CODE (op))
4900 case CONST_DOUBLE:
4901 case CONST_INT:
4902 return TRUE;
4904 case MEM:
4905 return memory_address_p (DImode, XEXP (op, 0));
4907 default:
4908 return FALSE;
4912 /* Like di_operand, but don't accept constants. */
4914 nonimmediate_di_operand (rtx op, enum machine_mode mode)
4916 if (s_register_operand (op, mode))
4917 return TRUE;
4919 if (mode != VOIDmode && GET_MODE (op) != VOIDmode && GET_MODE (op) != DImode)
4920 return FALSE;
4922 if (GET_CODE (op) == SUBREG)
4923 op = SUBREG_REG (op);
4925 if (GET_CODE (op) == MEM)
4926 return memory_address_p (DImode, XEXP (op, 0));
4928 return FALSE;
4931 /* Return TRUE for a valid operand of a DFmode operation when soft-float.
4932 Either: REG, SUBREG, CONST_DOUBLE or MEM(DImode_address).
4933 Note that this disallows MEM(REG+REG), but allows
4934 MEM(PRE/POST_INC/DEC(REG)). */
4936 soft_df_operand (rtx op, enum machine_mode mode)
4938 if (s_register_operand (op, mode))
4939 return TRUE;
4941 if (mode != VOIDmode && GET_MODE (op) != mode)
4942 return FALSE;
4944 if (GET_CODE (op) == SUBREG && CONSTANT_P (SUBREG_REG (op)))
4945 return FALSE;
4947 if (GET_CODE (op) == SUBREG)
4948 op = SUBREG_REG (op);
4950 switch (GET_CODE (op))
4952 case CONST_DOUBLE:
4953 return TRUE;
4955 case MEM:
4956 return memory_address_p (DFmode, XEXP (op, 0));
4958 default:
4959 return FALSE;
4963 /* Like soft_df_operand, but don't accept constants. */
4965 nonimmediate_soft_df_operand (rtx op, enum machine_mode mode)
4967 if (s_register_operand (op, mode))
4968 return TRUE;
4970 if (mode != VOIDmode && GET_MODE (op) != mode)
4971 return FALSE;
4973 if (GET_CODE (op) == SUBREG)
4974 op = SUBREG_REG (op);
4976 if (GET_CODE (op) == MEM)
4977 return memory_address_p (DFmode, XEXP (op, 0));
4978 return FALSE;
4981 /* Return TRUE for valid index operands. */
4983 index_operand (rtx op, enum machine_mode mode)
4985 return (s_register_operand (op, mode)
4986 || (immediate_operand (op, mode)
4987 && (GET_CODE (op) != CONST_INT
4988 || (INTVAL (op) < 4096 && INTVAL (op) > -4096))));
4991 /* Return TRUE for valid shifts by a constant. This also accepts any
4992 power of two on the (somewhat overly relaxed) assumption that the
4993 shift operator in this case was a mult. */
4995 const_shift_operand (rtx op, enum machine_mode mode)
4997 return (power_of_two_operand (op, mode)
4998 || (immediate_operand (op, mode)
4999 && (GET_CODE (op) != CONST_INT
5000 || (INTVAL (op) < 32 && INTVAL (op) > 0))));
5003 /* Return TRUE for arithmetic operators which can be combined with a multiply
5004 (shift). */
5006 shiftable_operator (rtx x, enum machine_mode mode)
5008 enum rtx_code code;
5010 if (GET_MODE (x) != mode)
5011 return FALSE;
5013 code = GET_CODE (x);
5015 return (code == PLUS || code == MINUS
5016 || code == IOR || code == XOR || code == AND);
5019 /* Return TRUE for binary logical operators. */
5021 logical_binary_operator (rtx x, enum machine_mode mode)
5023 enum rtx_code code;
5025 if (GET_MODE (x) != mode)
5026 return FALSE;
5028 code = GET_CODE (x);
5030 return (code == IOR || code == XOR || code == AND);
5033 /* Return TRUE for shift operators. */
5035 shift_operator (rtx x,enum machine_mode mode)
5037 enum rtx_code code;
5039 if (GET_MODE (x) != mode)
5040 return FALSE;
5042 code = GET_CODE (x);
5044 if (code == MULT)
5045 return power_of_two_operand (XEXP (x, 1), mode);
5047 return (code == ASHIFT || code == ASHIFTRT || code == LSHIFTRT
5048 || code == ROTATERT);
5051 /* Return TRUE if x is EQ or NE. */
5053 equality_operator (rtx x, enum machine_mode mode ATTRIBUTE_UNUSED)
5055 return GET_CODE (x) == EQ || GET_CODE (x) == NE;
5058 /* Return TRUE if x is a comparison operator other than LTGT or UNEQ. */
5060 arm_comparison_operator (rtx x, enum machine_mode mode)
5062 return (comparison_operator (x, mode)
5063 && GET_CODE (x) != LTGT
5064 && GET_CODE (x) != UNEQ);
5067 /* Return TRUE for SMIN SMAX UMIN UMAX operators. */
5069 minmax_operator (rtx x, enum machine_mode mode)
5071 enum rtx_code code = GET_CODE (x);
5073 if (GET_MODE (x) != mode)
5074 return FALSE;
5076 return code == SMIN || code == SMAX || code == UMIN || code == UMAX;
5079 /* Return TRUE if this is the condition code register, if we aren't given
5080 a mode, accept any class CCmode register. */
5082 cc_register (rtx x, enum machine_mode mode)
5084 if (mode == VOIDmode)
5086 mode = GET_MODE (x);
5088 if (GET_MODE_CLASS (mode) != MODE_CC)
5089 return FALSE;
5092 if ( GET_MODE (x) == mode
5093 && GET_CODE (x) == REG
5094 && REGNO (x) == CC_REGNUM)
5095 return TRUE;
5097 return FALSE;
5100 /* Return TRUE if this is the condition code register, if we aren't given
5101 a mode, accept any class CCmode register which indicates a dominance
5102 expression. */
5104 dominant_cc_register (rtx x, enum machine_mode mode)
5106 if (mode == VOIDmode)
5108 mode = GET_MODE (x);
5110 if (GET_MODE_CLASS (mode) != MODE_CC)
5111 return FALSE;
5114 if (mode != CC_DNEmode && mode != CC_DEQmode
5115 && mode != CC_DLEmode && mode != CC_DLTmode
5116 && mode != CC_DGEmode && mode != CC_DGTmode
5117 && mode != CC_DLEUmode && mode != CC_DLTUmode
5118 && mode != CC_DGEUmode && mode != CC_DGTUmode)
5119 return FALSE;
5121 return cc_register (x, mode);
5124 /* Return TRUE if X references a SYMBOL_REF. */
5126 symbol_mentioned_p (rtx x)
5128 const char * fmt;
5129 int i;
5131 if (GET_CODE (x) == SYMBOL_REF)
5132 return 1;
5134 fmt = GET_RTX_FORMAT (GET_CODE (x));
5136 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5138 if (fmt[i] == 'E')
5140 int j;
5142 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5143 if (symbol_mentioned_p (XVECEXP (x, i, j)))
5144 return 1;
5146 else if (fmt[i] == 'e' && symbol_mentioned_p (XEXP (x, i)))
5147 return 1;
5150 return 0;
5153 /* Return TRUE if X references a LABEL_REF. */
5155 label_mentioned_p (rtx x)
5157 const char * fmt;
5158 int i;
5160 if (GET_CODE (x) == LABEL_REF)
5161 return 1;
5163 fmt = GET_RTX_FORMAT (GET_CODE (x));
5164 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5166 if (fmt[i] == 'E')
5168 int j;
5170 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5171 if (label_mentioned_p (XVECEXP (x, i, j)))
5172 return 1;
5174 else if (fmt[i] == 'e' && label_mentioned_p (XEXP (x, i)))
5175 return 1;
5178 return 0;
5181 enum rtx_code
5182 minmax_code (rtx x)
5184 enum rtx_code code = GET_CODE (x);
5186 if (code == SMAX)
5187 return GE;
5188 else if (code == SMIN)
5189 return LE;
5190 else if (code == UMIN)
5191 return LEU;
5192 else if (code == UMAX)
5193 return GEU;
5195 abort ();
5198 /* Return 1 if memory locations are adjacent. */
5200 adjacent_mem_locations (rtx a, rtx b)
5202 if ((GET_CODE (XEXP (a, 0)) == REG
5203 || (GET_CODE (XEXP (a, 0)) == PLUS
5204 && GET_CODE (XEXP (XEXP (a, 0), 1)) == CONST_INT))
5205 && (GET_CODE (XEXP (b, 0)) == REG
5206 || (GET_CODE (XEXP (b, 0)) == PLUS
5207 && GET_CODE (XEXP (XEXP (b, 0), 1)) == CONST_INT)))
5209 int val0 = 0, val1 = 0;
5210 int reg0, reg1;
5212 if (GET_CODE (XEXP (a, 0)) == PLUS)
5214 reg0 = REGNO (XEXP (XEXP (a, 0), 0));
5215 val0 = INTVAL (XEXP (XEXP (a, 0), 1));
5217 else
5218 reg0 = REGNO (XEXP (a, 0));
5220 if (GET_CODE (XEXP (b, 0)) == PLUS)
5222 reg1 = REGNO (XEXP (XEXP (b, 0), 0));
5223 val1 = INTVAL (XEXP (XEXP (b, 0), 1));
5225 else
5226 reg1 = REGNO (XEXP (b, 0));
5228 /* Don't accept any offset that will require multiple
5229 instructions to handle, since this would cause the
5230 arith_adjacentmem pattern to output an overlong sequence. */
5231 if (!const_ok_for_op (PLUS, val0) || !const_ok_for_op (PLUS, val1))
5232 return 0;
5234 return (reg0 == reg1) && ((val1 - val0) == 4 || (val0 - val1) == 4);
5236 return 0;
5239 /* Return 1 if OP is a load multiple operation. It is known to be
5240 parallel and the first section will be tested. */
5242 load_multiple_operation (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
5244 HOST_WIDE_INT count = XVECLEN (op, 0);
5245 int dest_regno;
5246 rtx src_addr;
5247 HOST_WIDE_INT i = 1, base = 0;
5248 rtx elt;
5250 if (count <= 1
5251 || GET_CODE (XVECEXP (op, 0, 0)) != SET)
5252 return 0;
5254 /* Check to see if this might be a write-back. */
5255 if (GET_CODE (SET_SRC (elt = XVECEXP (op, 0, 0))) == PLUS)
5257 i++;
5258 base = 1;
5260 /* Now check it more carefully. */
5261 if (GET_CODE (SET_DEST (elt)) != REG
5262 || GET_CODE (XEXP (SET_SRC (elt), 0)) != REG
5263 || GET_CODE (XEXP (SET_SRC (elt), 1)) != CONST_INT
5264 || INTVAL (XEXP (SET_SRC (elt), 1)) != (count - 1) * 4)
5265 return 0;
5268 /* Perform a quick check so we don't blow up below. */
5269 if (count <= i
5270 || GET_CODE (XVECEXP (op, 0, i - 1)) != SET
5271 || GET_CODE (SET_DEST (XVECEXP (op, 0, i - 1))) != REG
5272 || GET_CODE (SET_SRC (XVECEXP (op, 0, i - 1))) != MEM)
5273 return 0;
5275 dest_regno = REGNO (SET_DEST (XVECEXP (op, 0, i - 1)));
5276 src_addr = XEXP (SET_SRC (XVECEXP (op, 0, i - 1)), 0);
5278 for (; i < count; i++)
5280 elt = XVECEXP (op, 0, i);
5282 if (GET_CODE (elt) != SET
5283 || GET_CODE (SET_DEST (elt)) != REG
5284 || GET_MODE (SET_DEST (elt)) != SImode
5285 || REGNO (SET_DEST (elt)) != (unsigned int)(dest_regno + i - base)
5286 || GET_CODE (SET_SRC (elt)) != MEM
5287 || GET_MODE (SET_SRC (elt)) != SImode
5288 || GET_CODE (XEXP (SET_SRC (elt), 0)) != PLUS
5289 || !rtx_equal_p (XEXP (XEXP (SET_SRC (elt), 0), 0), src_addr)
5290 || GET_CODE (XEXP (XEXP (SET_SRC (elt), 0), 1)) != CONST_INT
5291 || INTVAL (XEXP (XEXP (SET_SRC (elt), 0), 1)) != (i - base) * 4)
5292 return 0;
5295 return 1;
5298 /* Return 1 if OP is a store multiple operation. It is known to be
5299 parallel and the first section will be tested. */
5301 store_multiple_operation (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
5303 HOST_WIDE_INT count = XVECLEN (op, 0);
5304 int src_regno;
5305 rtx dest_addr;
5306 HOST_WIDE_INT i = 1, base = 0;
5307 rtx elt;
5309 if (count <= 1
5310 || GET_CODE (XVECEXP (op, 0, 0)) != SET)
5311 return 0;
5313 /* Check to see if this might be a write-back. */
5314 if (GET_CODE (SET_SRC (elt = XVECEXP (op, 0, 0))) == PLUS)
5316 i++;
5317 base = 1;
5319 /* Now check it more carefully. */
5320 if (GET_CODE (SET_DEST (elt)) != REG
5321 || GET_CODE (XEXP (SET_SRC (elt), 0)) != REG
5322 || GET_CODE (XEXP (SET_SRC (elt), 1)) != CONST_INT
5323 || INTVAL (XEXP (SET_SRC (elt), 1)) != (count - 1) * 4)
5324 return 0;
5327 /* Perform a quick check so we don't blow up below. */
5328 if (count <= i
5329 || GET_CODE (XVECEXP (op, 0, i - 1)) != SET
5330 || GET_CODE (SET_DEST (XVECEXP (op, 0, i - 1))) != MEM
5331 || GET_CODE (SET_SRC (XVECEXP (op, 0, i - 1))) != REG)
5332 return 0;
5334 src_regno = REGNO (SET_SRC (XVECEXP (op, 0, i - 1)));
5335 dest_addr = XEXP (SET_DEST (XVECEXP (op, 0, i - 1)), 0);
5337 for (; i < count; i++)
5339 elt = XVECEXP (op, 0, i);
5341 if (GET_CODE (elt) != SET
5342 || GET_CODE (SET_SRC (elt)) != REG
5343 || GET_MODE (SET_SRC (elt)) != SImode
5344 || REGNO (SET_SRC (elt)) != (unsigned int)(src_regno + i - base)
5345 || GET_CODE (SET_DEST (elt)) != MEM
5346 || GET_MODE (SET_DEST (elt)) != SImode
5347 || GET_CODE (XEXP (SET_DEST (elt), 0)) != PLUS
5348 || !rtx_equal_p (XEXP (XEXP (SET_DEST (elt), 0), 0), dest_addr)
5349 || GET_CODE (XEXP (XEXP (SET_DEST (elt), 0), 1)) != CONST_INT
5350 || INTVAL (XEXP (XEXP (SET_DEST (elt), 0), 1)) != (i - base) * 4)
5351 return 0;
5354 return 1;
5358 load_multiple_sequence (rtx *operands, int nops, int *regs, int *base,
5359 HOST_WIDE_INT *load_offset)
5361 int unsorted_regs[4];
5362 HOST_WIDE_INT unsorted_offsets[4];
5363 int order[4];
5364 int base_reg = -1;
5365 int i;
5367 /* Can only handle 2, 3, or 4 insns at present,
5368 though could be easily extended if required. */
5369 if (nops < 2 || nops > 4)
5370 abort ();
5372 /* Loop over the operands and check that the memory references are
5373 suitable (ie immediate offsets from the same base register). At
5374 the same time, extract the target register, and the memory
5375 offsets. */
5376 for (i = 0; i < nops; i++)
5378 rtx reg;
5379 rtx offset;
5381 /* Convert a subreg of a mem into the mem itself. */
5382 if (GET_CODE (operands[nops + i]) == SUBREG)
5383 operands[nops + i] = alter_subreg (operands + (nops + i));
5385 if (GET_CODE (operands[nops + i]) != MEM)
5386 abort ();
5388 /* Don't reorder volatile memory references; it doesn't seem worth
5389 looking for the case where the order is ok anyway. */
5390 if (MEM_VOLATILE_P (operands[nops + i]))
5391 return 0;
5393 offset = const0_rtx;
5395 if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG
5396 || (GET_CODE (reg) == SUBREG
5397 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5398 || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
5399 && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0))
5400 == REG)
5401 || (GET_CODE (reg) == SUBREG
5402 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5403 && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1))
5404 == CONST_INT)))
5406 if (i == 0)
5408 base_reg = REGNO (reg);
5409 unsorted_regs[0] = (GET_CODE (operands[i]) == REG
5410 ? REGNO (operands[i])
5411 : REGNO (SUBREG_REG (operands[i])));
5412 order[0] = 0;
5414 else
5416 if (base_reg != (int) REGNO (reg))
5417 /* Not addressed from the same base register. */
5418 return 0;
5420 unsorted_regs[i] = (GET_CODE (operands[i]) == REG
5421 ? REGNO (operands[i])
5422 : REGNO (SUBREG_REG (operands[i])));
5423 if (unsorted_regs[i] < unsorted_regs[order[0]])
5424 order[0] = i;
5427 /* If it isn't an integer register, or if it overwrites the
5428 base register but isn't the last insn in the list, then
5429 we can't do this. */
5430 if (unsorted_regs[i] < 0 || unsorted_regs[i] > 14
5431 || (i != nops - 1 && unsorted_regs[i] == base_reg))
5432 return 0;
5434 unsorted_offsets[i] = INTVAL (offset);
5436 else
5437 /* Not a suitable memory address. */
5438 return 0;
5441 /* All the useful information has now been extracted from the
5442 operands into unsorted_regs and unsorted_offsets; additionally,
5443 order[0] has been set to the lowest numbered register in the
5444 list. Sort the registers into order, and check that the memory
5445 offsets are ascending and adjacent. */
5447 for (i = 1; i < nops; i++)
5449 int j;
5451 order[i] = order[i - 1];
5452 for (j = 0; j < nops; j++)
5453 if (unsorted_regs[j] > unsorted_regs[order[i - 1]]
5454 && (order[i] == order[i - 1]
5455 || unsorted_regs[j] < unsorted_regs[order[i]]))
5456 order[i] = j;
5458 /* Have we found a suitable register? if not, one must be used more
5459 than once. */
5460 if (order[i] == order[i - 1])
5461 return 0;
5463 /* Is the memory address adjacent and ascending? */
5464 if (unsorted_offsets[order[i]] != unsorted_offsets[order[i - 1]] + 4)
5465 return 0;
5468 if (base)
5470 *base = base_reg;
5472 for (i = 0; i < nops; i++)
5473 regs[i] = unsorted_regs[order[i]];
5475 *load_offset = unsorted_offsets[order[0]];
5478 if (unsorted_offsets[order[0]] == 0)
5479 return 1; /* ldmia */
5481 if (unsorted_offsets[order[0]] == 4)
5482 return 2; /* ldmib */
5484 if (unsorted_offsets[order[nops - 1]] == 0)
5485 return 3; /* ldmda */
5487 if (unsorted_offsets[order[nops - 1]] == -4)
5488 return 4; /* ldmdb */
5490 /* For ARM8,9 & StrongARM, 2 ldr instructions are faster than an ldm
5491 if the offset isn't small enough. The reason 2 ldrs are faster
5492 is because these ARMs are able to do more than one cache access
5493 in a single cycle. The ARM9 and StrongARM have Harvard caches,
5494 whilst the ARM8 has a double bandwidth cache. This means that
5495 these cores can do both an instruction fetch and a data fetch in
5496 a single cycle, so the trick of calculating the address into a
5497 scratch register (one of the result regs) and then doing a load
5498 multiple actually becomes slower (and no smaller in code size).
5499 That is the transformation
5501 ldr rd1, [rbase + offset]
5502 ldr rd2, [rbase + offset + 4]
5506 add rd1, rbase, offset
5507 ldmia rd1, {rd1, rd2}
5509 produces worse code -- '3 cycles + any stalls on rd2' instead of
5510 '2 cycles + any stalls on rd2'. On ARMs with only one cache
5511 access per cycle, the first sequence could never complete in less
5512 than 6 cycles, whereas the ldm sequence would only take 5 and
5513 would make better use of sequential accesses if not hitting the
5514 cache.
5516 We cheat here and test 'arm_ld_sched' which we currently know to
5517 only be true for the ARM8, ARM9 and StrongARM. If this ever
5518 changes, then the test below needs to be reworked. */
5519 if (nops == 2 && arm_ld_sched)
5520 return 0;
5522 /* Can't do it without setting up the offset, only do this if it takes
5523 no more than one insn. */
5524 return (const_ok_for_arm (unsorted_offsets[order[0]])
5525 || const_ok_for_arm (-unsorted_offsets[order[0]])) ? 5 : 0;
5528 const char *
5529 emit_ldm_seq (rtx *operands, int nops)
5531 int regs[4];
5532 int base_reg;
5533 HOST_WIDE_INT offset;
5534 char buf[100];
5535 int i;
5537 switch (load_multiple_sequence (operands, nops, regs, &base_reg, &offset))
5539 case 1:
5540 strcpy (buf, "ldm%?ia\t");
5541 break;
5543 case 2:
5544 strcpy (buf, "ldm%?ib\t");
5545 break;
5547 case 3:
5548 strcpy (buf, "ldm%?da\t");
5549 break;
5551 case 4:
5552 strcpy (buf, "ldm%?db\t");
5553 break;
5555 case 5:
5556 if (offset >= 0)
5557 sprintf (buf, "add%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX,
5558 reg_names[regs[0]], REGISTER_PREFIX, reg_names[base_reg],
5559 (long) offset);
5560 else
5561 sprintf (buf, "sub%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX,
5562 reg_names[regs[0]], REGISTER_PREFIX, reg_names[base_reg],
5563 (long) -offset);
5564 output_asm_insn (buf, operands);
5565 base_reg = regs[0];
5566 strcpy (buf, "ldm%?ia\t");
5567 break;
5569 default:
5570 abort ();
5573 sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
5574 reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
5576 for (i = 1; i < nops; i++)
5577 sprintf (buf + strlen (buf), ", %s%s", REGISTER_PREFIX,
5578 reg_names[regs[i]]);
5580 strcat (buf, "}\t%@ phole ldm");
5582 output_asm_insn (buf, operands);
5583 return "";
5587 store_multiple_sequence (rtx *operands, int nops, int *regs, int *base,
5588 HOST_WIDE_INT * load_offset)
5590 int unsorted_regs[4];
5591 HOST_WIDE_INT unsorted_offsets[4];
5592 int order[4];
5593 int base_reg = -1;
5594 int i;
5596 /* Can only handle 2, 3, or 4 insns at present, though could be easily
5597 extended if required. */
5598 if (nops < 2 || nops > 4)
5599 abort ();
5601 /* Loop over the operands and check that the memory references are
5602 suitable (ie immediate offsets from the same base register). At
5603 the same time, extract the target register, and the memory
5604 offsets. */
5605 for (i = 0; i < nops; i++)
5607 rtx reg;
5608 rtx offset;
5610 /* Convert a subreg of a mem into the mem itself. */
5611 if (GET_CODE (operands[nops + i]) == SUBREG)
5612 operands[nops + i] = alter_subreg (operands + (nops + i));
5614 if (GET_CODE (operands[nops + i]) != MEM)
5615 abort ();
5617 /* Don't reorder volatile memory references; it doesn't seem worth
5618 looking for the case where the order is ok anyway. */
5619 if (MEM_VOLATILE_P (operands[nops + i]))
5620 return 0;
5622 offset = const0_rtx;
5624 if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG
5625 || (GET_CODE (reg) == SUBREG
5626 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5627 || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
5628 && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0))
5629 == REG)
5630 || (GET_CODE (reg) == SUBREG
5631 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5632 && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1))
5633 == CONST_INT)))
5635 if (i == 0)
5637 base_reg = REGNO (reg);
5638 unsorted_regs[0] = (GET_CODE (operands[i]) == REG
5639 ? REGNO (operands[i])
5640 : REGNO (SUBREG_REG (operands[i])));
5641 order[0] = 0;
5643 else
5645 if (base_reg != (int) REGNO (reg))
5646 /* Not addressed from the same base register. */
5647 return 0;
5649 unsorted_regs[i] = (GET_CODE (operands[i]) == REG
5650 ? REGNO (operands[i])
5651 : REGNO (SUBREG_REG (operands[i])));
5652 if (unsorted_regs[i] < unsorted_regs[order[0]])
5653 order[0] = i;
5656 /* If it isn't an integer register, then we can't do this. */
5657 if (unsorted_regs[i] < 0 || unsorted_regs[i] > 14)
5658 return 0;
5660 unsorted_offsets[i] = INTVAL (offset);
5662 else
5663 /* Not a suitable memory address. */
5664 return 0;
5667 /* All the useful information has now been extracted from the
5668 operands into unsorted_regs and unsorted_offsets; additionally,
5669 order[0] has been set to the lowest numbered register in the
5670 list. Sort the registers into order, and check that the memory
5671 offsets are ascending and adjacent. */
5673 for (i = 1; i < nops; i++)
5675 int j;
5677 order[i] = order[i - 1];
5678 for (j = 0; j < nops; j++)
5679 if (unsorted_regs[j] > unsorted_regs[order[i - 1]]
5680 && (order[i] == order[i - 1]
5681 || unsorted_regs[j] < unsorted_regs[order[i]]))
5682 order[i] = j;
5684 /* Have we found a suitable register? if not, one must be used more
5685 than once. */
5686 if (order[i] == order[i - 1])
5687 return 0;
5689 /* Is the memory address adjacent and ascending? */
5690 if (unsorted_offsets[order[i]] != unsorted_offsets[order[i - 1]] + 4)
5691 return 0;
5694 if (base)
5696 *base = base_reg;
5698 for (i = 0; i < nops; i++)
5699 regs[i] = unsorted_regs[order[i]];
5701 *load_offset = unsorted_offsets[order[0]];
5704 if (unsorted_offsets[order[0]] == 0)
5705 return 1; /* stmia */
5707 if (unsorted_offsets[order[0]] == 4)
5708 return 2; /* stmib */
5710 if (unsorted_offsets[order[nops - 1]] == 0)
5711 return 3; /* stmda */
5713 if (unsorted_offsets[order[nops - 1]] == -4)
5714 return 4; /* stmdb */
5716 return 0;
5719 const char *
5720 emit_stm_seq (rtx *operands, int nops)
5722 int regs[4];
5723 int base_reg;
5724 HOST_WIDE_INT offset;
5725 char buf[100];
5726 int i;
5728 switch (store_multiple_sequence (operands, nops, regs, &base_reg, &offset))
5730 case 1:
5731 strcpy (buf, "stm%?ia\t");
5732 break;
5734 case 2:
5735 strcpy (buf, "stm%?ib\t");
5736 break;
5738 case 3:
5739 strcpy (buf, "stm%?da\t");
5740 break;
5742 case 4:
5743 strcpy (buf, "stm%?db\t");
5744 break;
5746 default:
5747 abort ();
5750 sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
5751 reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
5753 for (i = 1; i < nops; i++)
5754 sprintf (buf + strlen (buf), ", %s%s", REGISTER_PREFIX,
5755 reg_names[regs[i]]);
5757 strcat (buf, "}\t%@ phole stm");
5759 output_asm_insn (buf, operands);
5760 return "";
5764 multi_register_push (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
5766 if (GET_CODE (op) != PARALLEL
5767 || (GET_CODE (XVECEXP (op, 0, 0)) != SET)
5768 || (GET_CODE (SET_SRC (XVECEXP (op, 0, 0))) != UNSPEC)
5769 || (XINT (SET_SRC (XVECEXP (op, 0, 0)), 1) != UNSPEC_PUSH_MULT))
5770 return 0;
5772 return 1;
5775 /* Routines for use in generating RTL. */
5778 arm_gen_load_multiple (int base_regno, int count, rtx from, int up,
5779 int write_back, int unchanging_p, int in_struct_p,
5780 int scalar_p)
5782 int i = 0, j;
5783 rtx result;
5784 int sign = up ? 1 : -1;
5785 rtx mem;
5787 /* XScale has load-store double instructions, but they have stricter
5788 alignment requirements than load-store multiple, so we can not
5789 use them.
5791 For XScale ldm requires 2 + NREGS cycles to complete and blocks
5792 the pipeline until completion.
5794 NREGS CYCLES
5800 An ldr instruction takes 1-3 cycles, but does not block the
5801 pipeline.
5803 NREGS CYCLES
5804 1 1-3
5805 2 2-6
5806 3 3-9
5807 4 4-12
5809 Best case ldr will always win. However, the more ldr instructions
5810 we issue, the less likely we are to be able to schedule them well.
5811 Using ldr instructions also increases code size.
5813 As a compromise, we use ldr for counts of 1 or 2 regs, and ldm
5814 for counts of 3 or 4 regs. */
5815 if (arm_tune_xscale && count <= 2 && ! optimize_size)
5817 rtx seq;
5819 start_sequence ();
5821 for (i = 0; i < count; i++)
5823 mem = gen_rtx_MEM (SImode, plus_constant (from, i * 4 * sign));
5824 RTX_UNCHANGING_P (mem) = unchanging_p;
5825 MEM_IN_STRUCT_P (mem) = in_struct_p;
5826 MEM_SCALAR_P (mem) = scalar_p;
5827 emit_move_insn (gen_rtx_REG (SImode, base_regno + i), mem);
5830 if (write_back)
5831 emit_move_insn (from, plus_constant (from, count * 4 * sign));
5833 seq = get_insns ();
5834 end_sequence ();
5836 return seq;
5839 result = gen_rtx_PARALLEL (VOIDmode,
5840 rtvec_alloc (count + (write_back ? 1 : 0)));
5841 if (write_back)
5843 XVECEXP (result, 0, 0)
5844 = gen_rtx_SET (GET_MODE (from), from,
5845 plus_constant (from, count * 4 * sign));
5846 i = 1;
5847 count++;
5850 for (j = 0; i < count; i++, j++)
5852 mem = gen_rtx_MEM (SImode, plus_constant (from, j * 4 * sign));
5853 RTX_UNCHANGING_P (mem) = unchanging_p;
5854 MEM_IN_STRUCT_P (mem) = in_struct_p;
5855 MEM_SCALAR_P (mem) = scalar_p;
5856 XVECEXP (result, 0, i)
5857 = gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, base_regno + j), mem);
5860 return result;
5864 arm_gen_store_multiple (int base_regno, int count, rtx to, int up,
5865 int write_back, int unchanging_p, int in_struct_p,
5866 int scalar_p)
5868 int i = 0, j;
5869 rtx result;
5870 int sign = up ? 1 : -1;
5871 rtx mem;
5873 /* See arm_gen_load_multiple for discussion of
5874 the pros/cons of ldm/stm usage for XScale. */
5875 if (arm_tune_xscale && count <= 2 && ! optimize_size)
5877 rtx seq;
5879 start_sequence ();
5881 for (i = 0; i < count; i++)
5883 mem = gen_rtx_MEM (SImode, plus_constant (to, i * 4 * sign));
5884 RTX_UNCHANGING_P (mem) = unchanging_p;
5885 MEM_IN_STRUCT_P (mem) = in_struct_p;
5886 MEM_SCALAR_P (mem) = scalar_p;
5887 emit_move_insn (mem, gen_rtx_REG (SImode, base_regno + i));
5890 if (write_back)
5891 emit_move_insn (to, plus_constant (to, count * 4 * sign));
5893 seq = get_insns ();
5894 end_sequence ();
5896 return seq;
5899 result = gen_rtx_PARALLEL (VOIDmode,
5900 rtvec_alloc (count + (write_back ? 1 : 0)));
5901 if (write_back)
5903 XVECEXP (result, 0, 0)
5904 = gen_rtx_SET (GET_MODE (to), to,
5905 plus_constant (to, count * 4 * sign));
5906 i = 1;
5907 count++;
5910 for (j = 0; i < count; i++, j++)
5912 mem = gen_rtx_MEM (SImode, plus_constant (to, j * 4 * sign));
5913 RTX_UNCHANGING_P (mem) = unchanging_p;
5914 MEM_IN_STRUCT_P (mem) = in_struct_p;
5915 MEM_SCALAR_P (mem) = scalar_p;
5917 XVECEXP (result, 0, i)
5918 = gen_rtx_SET (VOIDmode, mem, gen_rtx_REG (SImode, base_regno + j));
5921 return result;
5925 arm_gen_movstrqi (rtx *operands)
5927 HOST_WIDE_INT in_words_to_go, out_words_to_go, last_bytes;
5928 int i;
5929 rtx src, dst;
5930 rtx st_src, st_dst, fin_src, fin_dst;
5931 rtx part_bytes_reg = NULL;
5932 rtx mem;
5933 int dst_unchanging_p, dst_in_struct_p, src_unchanging_p, src_in_struct_p;
5934 int dst_scalar_p, src_scalar_p;
5936 if (GET_CODE (operands[2]) != CONST_INT
5937 || GET_CODE (operands[3]) != CONST_INT
5938 || INTVAL (operands[2]) > 64
5939 || INTVAL (operands[3]) & 3)
5940 return 0;
5942 st_dst = XEXP (operands[0], 0);
5943 st_src = XEXP (operands[1], 0);
5945 dst_unchanging_p = RTX_UNCHANGING_P (operands[0]);
5946 dst_in_struct_p = MEM_IN_STRUCT_P (operands[0]);
5947 dst_scalar_p = MEM_SCALAR_P (operands[0]);
5948 src_unchanging_p = RTX_UNCHANGING_P (operands[1]);
5949 src_in_struct_p = MEM_IN_STRUCT_P (operands[1]);
5950 src_scalar_p = MEM_SCALAR_P (operands[1]);
5952 fin_dst = dst = copy_to_mode_reg (SImode, st_dst);
5953 fin_src = src = copy_to_mode_reg (SImode, st_src);
5955 in_words_to_go = ARM_NUM_INTS (INTVAL (operands[2]));
5956 out_words_to_go = INTVAL (operands[2]) / 4;
5957 last_bytes = INTVAL (operands[2]) & 3;
5959 if (out_words_to_go != in_words_to_go && ((in_words_to_go - 1) & 3) != 0)
5960 part_bytes_reg = gen_rtx_REG (SImode, (in_words_to_go - 1) & 3);
5962 for (i = 0; in_words_to_go >= 2; i+=4)
5964 if (in_words_to_go > 4)
5965 emit_insn (arm_gen_load_multiple (0, 4, src, TRUE, TRUE,
5966 src_unchanging_p,
5967 src_in_struct_p,
5968 src_scalar_p));
5969 else
5970 emit_insn (arm_gen_load_multiple (0, in_words_to_go, src, TRUE,
5971 FALSE, src_unchanging_p,
5972 src_in_struct_p, src_scalar_p));
5974 if (out_words_to_go)
5976 if (out_words_to_go > 4)
5977 emit_insn (arm_gen_store_multiple (0, 4, dst, TRUE, TRUE,
5978 dst_unchanging_p,
5979 dst_in_struct_p,
5980 dst_scalar_p));
5981 else if (out_words_to_go != 1)
5982 emit_insn (arm_gen_store_multiple (0, out_words_to_go,
5983 dst, TRUE,
5984 (last_bytes == 0
5985 ? FALSE : TRUE),
5986 dst_unchanging_p,
5987 dst_in_struct_p,
5988 dst_scalar_p));
5989 else
5991 mem = gen_rtx_MEM (SImode, dst);
5992 RTX_UNCHANGING_P (mem) = dst_unchanging_p;
5993 MEM_IN_STRUCT_P (mem) = dst_in_struct_p;
5994 MEM_SCALAR_P (mem) = dst_scalar_p;
5995 emit_move_insn (mem, gen_rtx_REG (SImode, 0));
5996 if (last_bytes != 0)
5997 emit_insn (gen_addsi3 (dst, dst, GEN_INT (4)));
6001 in_words_to_go -= in_words_to_go < 4 ? in_words_to_go : 4;
6002 out_words_to_go -= out_words_to_go < 4 ? out_words_to_go : 4;
6005 /* OUT_WORDS_TO_GO will be zero here if there are byte stores to do. */
6006 if (out_words_to_go)
6008 rtx sreg;
6010 mem = gen_rtx_MEM (SImode, src);
6011 RTX_UNCHANGING_P (mem) = src_unchanging_p;
6012 MEM_IN_STRUCT_P (mem) = src_in_struct_p;
6013 MEM_SCALAR_P (mem) = src_scalar_p;
6014 emit_move_insn (sreg = gen_reg_rtx (SImode), mem);
6015 emit_move_insn (fin_src = gen_reg_rtx (SImode), plus_constant (src, 4));
6017 mem = gen_rtx_MEM (SImode, dst);
6018 RTX_UNCHANGING_P (mem) = dst_unchanging_p;
6019 MEM_IN_STRUCT_P (mem) = dst_in_struct_p;
6020 MEM_SCALAR_P (mem) = dst_scalar_p;
6021 emit_move_insn (mem, sreg);
6022 emit_move_insn (fin_dst = gen_reg_rtx (SImode), plus_constant (dst, 4));
6023 in_words_to_go--;
6025 if (in_words_to_go) /* Sanity check */
6026 abort ();
6029 if (in_words_to_go)
6031 if (in_words_to_go < 0)
6032 abort ();
6034 mem = gen_rtx_MEM (SImode, src);
6035 RTX_UNCHANGING_P (mem) = src_unchanging_p;
6036 MEM_IN_STRUCT_P (mem) = src_in_struct_p;
6037 MEM_SCALAR_P (mem) = src_scalar_p;
6038 part_bytes_reg = copy_to_mode_reg (SImode, mem);
6041 if (last_bytes && part_bytes_reg == NULL)
6042 abort ();
6044 if (BYTES_BIG_ENDIAN && last_bytes)
6046 rtx tmp = gen_reg_rtx (SImode);
6048 /* The bytes we want are in the top end of the word. */
6049 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg,
6050 GEN_INT (8 * (4 - last_bytes))));
6051 part_bytes_reg = tmp;
6053 while (last_bytes)
6055 mem = gen_rtx_MEM (QImode, plus_constant (dst, last_bytes - 1));
6056 RTX_UNCHANGING_P (mem) = dst_unchanging_p;
6057 MEM_IN_STRUCT_P (mem) = dst_in_struct_p;
6058 MEM_SCALAR_P (mem) = dst_scalar_p;
6059 emit_move_insn (mem, gen_lowpart (QImode, part_bytes_reg));
6061 if (--last_bytes)
6063 tmp = gen_reg_rtx (SImode);
6064 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (8)));
6065 part_bytes_reg = tmp;
6070 else
6072 if (last_bytes > 1)
6074 mem = gen_rtx_MEM (HImode, dst);
6075 RTX_UNCHANGING_P (mem) = dst_unchanging_p;
6076 MEM_IN_STRUCT_P (mem) = dst_in_struct_p;
6077 MEM_SCALAR_P (mem) = dst_scalar_p;
6078 emit_move_insn (mem, gen_lowpart (HImode, part_bytes_reg));
6079 last_bytes -= 2;
6080 if (last_bytes)
6082 rtx tmp = gen_reg_rtx (SImode);
6084 emit_insn (gen_addsi3 (dst, dst, const2_rtx));
6085 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (16)));
6086 part_bytes_reg = tmp;
6090 if (last_bytes)
6092 mem = gen_rtx_MEM (QImode, dst);
6093 RTX_UNCHANGING_P (mem) = dst_unchanging_p;
6094 MEM_IN_STRUCT_P (mem) = dst_in_struct_p;
6095 MEM_SCALAR_P (mem) = dst_scalar_p;
6096 emit_move_insn (mem, gen_lowpart (QImode, part_bytes_reg));
6100 return 1;
6103 /* Generate a memory reference for a half word, such that it will be loaded
6104 into the top 16 bits of the word. We can assume that the address is
6105 known to be alignable and of the form reg, or plus (reg, const). */
6108 arm_gen_rotated_half_load (rtx memref)
6110 HOST_WIDE_INT offset = 0;
6111 rtx base = XEXP (memref, 0);
6113 if (GET_CODE (base) == PLUS)
6115 offset = INTVAL (XEXP (base, 1));
6116 base = XEXP (base, 0);
6119 /* If we aren't allowed to generate unaligned addresses, then fail. */
6120 if (TARGET_MMU_TRAPS
6121 && ((BYTES_BIG_ENDIAN ? 1 : 0) ^ ((offset & 2) == 0)))
6122 return NULL;
6124 base = gen_rtx_MEM (SImode, plus_constant (base, offset & ~2));
6126 if ((BYTES_BIG_ENDIAN ? 1 : 0) ^ ((offset & 2) == 2))
6127 return base;
6129 return gen_rtx_ROTATE (SImode, base, GEN_INT (16));
6132 /* Select a dominance comparison mode if possible for a test of the general
6133 form (OP (COND_OR (X) (Y)) (const_int 0)). We support three forms.
6134 COND_OR == DOM_CC_X_AND_Y => (X && Y)
6135 COND_OR == DOM_CC_NX_OR_Y => ((! X) || Y)
6136 COND_OR == DOM_CC_X_OR_Y => (X || Y)
6137 In all cases OP will be either EQ or NE, but we don't need to know which
6138 here. If we are unable to support a dominance comparison we return
6139 CC mode. This will then fail to match for the RTL expressions that
6140 generate this call. */
6141 enum machine_mode
6142 arm_select_dominance_cc_mode (rtx x, rtx y, HOST_WIDE_INT cond_or)
6144 enum rtx_code cond1, cond2;
6145 int swapped = 0;
6147 /* Currently we will probably get the wrong result if the individual
6148 comparisons are not simple. This also ensures that it is safe to
6149 reverse a comparison if necessary. */
6150 if ((arm_select_cc_mode (cond1 = GET_CODE (x), XEXP (x, 0), XEXP (x, 1))
6151 != CCmode)
6152 || (arm_select_cc_mode (cond2 = GET_CODE (y), XEXP (y, 0), XEXP (y, 1))
6153 != CCmode))
6154 return CCmode;
6156 /* The if_then_else variant of this tests the second condition if the
6157 first passes, but is true if the first fails. Reverse the first
6158 condition to get a true "inclusive-or" expression. */
6159 if (cond_or == DOM_CC_NX_OR_Y)
6160 cond1 = reverse_condition (cond1);
6162 /* If the comparisons are not equal, and one doesn't dominate the other,
6163 then we can't do this. */
6164 if (cond1 != cond2
6165 && !comparison_dominates_p (cond1, cond2)
6166 && (swapped = 1, !comparison_dominates_p (cond2, cond1)))
6167 return CCmode;
6169 if (swapped)
6171 enum rtx_code temp = cond1;
6172 cond1 = cond2;
6173 cond2 = temp;
6176 switch (cond1)
6178 case EQ:
6179 if (cond2 == EQ || cond_or == DOM_CC_X_AND_Y)
6180 return CC_DEQmode;
6182 switch (cond2)
6184 case LE: return CC_DLEmode;
6185 case LEU: return CC_DLEUmode;
6186 case GE: return CC_DGEmode;
6187 case GEU: return CC_DGEUmode;
6188 default: break;
6191 break;
6193 case LT:
6194 if (cond2 == LT || cond_or == DOM_CC_X_AND_Y)
6195 return CC_DLTmode;
6196 if (cond2 == LE)
6197 return CC_DLEmode;
6198 if (cond2 == NE)
6199 return CC_DNEmode;
6200 break;
6202 case GT:
6203 if (cond2 == GT || cond_or == DOM_CC_X_AND_Y)
6204 return CC_DGTmode;
6205 if (cond2 == GE)
6206 return CC_DGEmode;
6207 if (cond2 == NE)
6208 return CC_DNEmode;
6209 break;
6211 case LTU:
6212 if (cond2 == LTU || cond_or == DOM_CC_X_AND_Y)
6213 return CC_DLTUmode;
6214 if (cond2 == LEU)
6215 return CC_DLEUmode;
6216 if (cond2 == NE)
6217 return CC_DNEmode;
6218 break;
6220 case GTU:
6221 if (cond2 == GTU || cond_or == DOM_CC_X_AND_Y)
6222 return CC_DGTUmode;
6223 if (cond2 == GEU)
6224 return CC_DGEUmode;
6225 if (cond2 == NE)
6226 return CC_DNEmode;
6227 break;
6229 /* The remaining cases only occur when both comparisons are the
6230 same. */
6231 case NE:
6232 return CC_DNEmode;
6234 case LE:
6235 return CC_DLEmode;
6237 case GE:
6238 return CC_DGEmode;
6240 case LEU:
6241 return CC_DLEUmode;
6243 case GEU:
6244 return CC_DGEUmode;
6246 default:
6247 break;
6250 abort ();
6253 enum machine_mode
6254 arm_select_cc_mode (enum rtx_code op, rtx x, rtx y)
6256 /* All floating point compares return CCFP if it is an equality
6257 comparison, and CCFPE otherwise. */
6258 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
6260 switch (op)
6262 case EQ:
6263 case NE:
6264 case UNORDERED:
6265 case ORDERED:
6266 case UNLT:
6267 case UNLE:
6268 case UNGT:
6269 case UNGE:
6270 case UNEQ:
6271 case LTGT:
6272 return CCFPmode;
6274 case LT:
6275 case LE:
6276 case GT:
6277 case GE:
6278 if (TARGET_HARD_FLOAT && TARGET_MAVERICK)
6279 return CCFPmode;
6280 return CCFPEmode;
6282 default:
6283 abort ();
6287 /* A compare with a shifted operand. Because of canonicalization, the
6288 comparison will have to be swapped when we emit the assembler. */
6289 if (GET_MODE (y) == SImode && GET_CODE (y) == REG
6290 && (GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
6291 || GET_CODE (x) == LSHIFTRT || GET_CODE (x) == ROTATE
6292 || GET_CODE (x) == ROTATERT))
6293 return CC_SWPmode;
6295 /* This is a special case that is used by combine to allow a
6296 comparison of a shifted byte load to be split into a zero-extend
6297 followed by a comparison of the shifted integer (only valid for
6298 equalities and unsigned inequalities). */
6299 if (GET_MODE (x) == SImode
6300 && GET_CODE (x) == ASHIFT
6301 && GET_CODE (XEXP (x, 1)) == CONST_INT && INTVAL (XEXP (x, 1)) == 24
6302 && GET_CODE (XEXP (x, 0)) == SUBREG
6303 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == MEM
6304 && GET_MODE (SUBREG_REG (XEXP (x, 0))) == QImode
6305 && (op == EQ || op == NE
6306 || op == GEU || op == GTU || op == LTU || op == LEU)
6307 && GET_CODE (y) == CONST_INT)
6308 return CC_Zmode;
6310 /* A construct for a conditional compare, if the false arm contains
6311 0, then both conditions must be true, otherwise either condition
6312 must be true. Not all conditions are possible, so CCmode is
6313 returned if it can't be done. */
6314 if (GET_CODE (x) == IF_THEN_ELSE
6315 && (XEXP (x, 2) == const0_rtx
6316 || XEXP (x, 2) == const1_rtx)
6317 && COMPARISON_P (XEXP (x, 0))
6318 && COMPARISON_P (XEXP (x, 1)))
6319 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
6320 INTVAL (XEXP (x, 2)));
6322 /* Alternate canonicalizations of the above. These are somewhat cleaner. */
6323 if (GET_CODE (x) == AND
6324 && COMPARISON_P (XEXP (x, 0))
6325 && COMPARISON_P (XEXP (x, 1)))
6326 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
6327 DOM_CC_X_AND_Y);
6329 if (GET_CODE (x) == IOR
6330 && COMPARISON_P (XEXP (x, 0))
6331 && COMPARISON_P (XEXP (x, 1)))
6332 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
6333 DOM_CC_X_OR_Y);
6335 /* An operation (on Thumb) where we want to test for a single bit.
6336 This is done by shifting that bit up into the top bit of a
6337 scratch register; we can then branch on the sign bit. */
6338 if (TARGET_THUMB
6339 && GET_MODE (x) == SImode
6340 && (op == EQ || op == NE)
6341 && (GET_CODE (x) == ZERO_EXTRACT))
6342 return CC_Nmode;
6344 /* An operation that sets the condition codes as a side-effect, the
6345 V flag is not set correctly, so we can only use comparisons where
6346 this doesn't matter. (For LT and GE we can use "mi" and "pl"
6347 instead.) */
6348 if (GET_MODE (x) == SImode
6349 && y == const0_rtx
6350 && (op == EQ || op == NE || op == LT || op == GE)
6351 && (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
6352 || GET_CODE (x) == AND || GET_CODE (x) == IOR
6353 || GET_CODE (x) == XOR || GET_CODE (x) == MULT
6354 || GET_CODE (x) == NOT || GET_CODE (x) == NEG
6355 || GET_CODE (x) == LSHIFTRT
6356 || GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
6357 || GET_CODE (x) == ROTATERT
6358 || (TARGET_ARM && GET_CODE (x) == ZERO_EXTRACT)))
6359 return CC_NOOVmode;
6361 if (GET_MODE (x) == QImode && (op == EQ || op == NE))
6362 return CC_Zmode;
6364 if (GET_MODE (x) == SImode && (op == LTU || op == GEU)
6365 && GET_CODE (x) == PLUS
6366 && (rtx_equal_p (XEXP (x, 0), y) || rtx_equal_p (XEXP (x, 1), y)))
6367 return CC_Cmode;
6369 return CCmode;
6372 /* X and Y are two things to compare using CODE. Emit the compare insn and
6373 return the rtx for register 0 in the proper mode. FP means this is a
6374 floating point compare: I don't think that it is needed on the arm. */
6376 arm_gen_compare_reg (enum rtx_code code, rtx x, rtx y)
6378 enum machine_mode mode = SELECT_CC_MODE (code, x, y);
6379 rtx cc_reg = gen_rtx_REG (mode, CC_REGNUM);
6381 emit_insn (gen_rtx_SET (VOIDmode, cc_reg,
6382 gen_rtx_COMPARE (mode, x, y)));
6384 return cc_reg;
6387 /* Generate a sequence of insns that will generate the correct return
6388 address mask depending on the physical architecture that the program
6389 is running on. */
6391 arm_gen_return_addr_mask (void)
6393 rtx reg = gen_reg_rtx (Pmode);
6395 emit_insn (gen_return_addr_mask (reg));
6396 return reg;
6399 void
6400 arm_reload_in_hi (rtx *operands)
6402 rtx ref = operands[1];
6403 rtx base, scratch;
6404 HOST_WIDE_INT offset = 0;
6406 if (GET_CODE (ref) == SUBREG)
6408 offset = SUBREG_BYTE (ref);
6409 ref = SUBREG_REG (ref);
6412 if (GET_CODE (ref) == REG)
6414 /* We have a pseudo which has been spilt onto the stack; there
6415 are two cases here: the first where there is a simple
6416 stack-slot replacement and a second where the stack-slot is
6417 out of range, or is used as a subreg. */
6418 if (reg_equiv_mem[REGNO (ref)])
6420 ref = reg_equiv_mem[REGNO (ref)];
6421 base = find_replacement (&XEXP (ref, 0));
6423 else
6424 /* The slot is out of range, or was dressed up in a SUBREG. */
6425 base = reg_equiv_address[REGNO (ref)];
6427 else
6428 base = find_replacement (&XEXP (ref, 0));
6430 /* Handle the case where the address is too complex to be offset by 1. */
6431 if (GET_CODE (base) == MINUS
6432 || (GET_CODE (base) == PLUS && GET_CODE (XEXP (base, 1)) != CONST_INT))
6434 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6436 emit_insn (gen_rtx_SET (VOIDmode, base_plus, base));
6437 base = base_plus;
6439 else if (GET_CODE (base) == PLUS)
6441 /* The addend must be CONST_INT, or we would have dealt with it above. */
6442 HOST_WIDE_INT hi, lo;
6444 offset += INTVAL (XEXP (base, 1));
6445 base = XEXP (base, 0);
6447 /* Rework the address into a legal sequence of insns. */
6448 /* Valid range for lo is -4095 -> 4095 */
6449 lo = (offset >= 0
6450 ? (offset & 0xfff)
6451 : -((-offset) & 0xfff));
6453 /* Corner case, if lo is the max offset then we would be out of range
6454 once we have added the additional 1 below, so bump the msb into the
6455 pre-loading insn(s). */
6456 if (lo == 4095)
6457 lo &= 0x7ff;
6459 hi = ((((offset - lo) & (HOST_WIDE_INT) 0xffffffff)
6460 ^ (HOST_WIDE_INT) 0x80000000)
6461 - (HOST_WIDE_INT) 0x80000000);
6463 if (hi + lo != offset)
6464 abort ();
6466 if (hi != 0)
6468 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6470 /* Get the base address; addsi3 knows how to handle constants
6471 that require more than one insn. */
6472 emit_insn (gen_addsi3 (base_plus, base, GEN_INT (hi)));
6473 base = base_plus;
6474 offset = lo;
6478 /* Operands[2] may overlap operands[0] (though it won't overlap
6479 operands[1]), that's why we asked for a DImode reg -- so we can
6480 use the bit that does not overlap. */
6481 if (REGNO (operands[2]) == REGNO (operands[0]))
6482 scratch = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6483 else
6484 scratch = gen_rtx_REG (SImode, REGNO (operands[2]));
6486 emit_insn (gen_zero_extendqisi2 (scratch,
6487 gen_rtx_MEM (QImode,
6488 plus_constant (base,
6489 offset))));
6490 emit_insn (gen_zero_extendqisi2 (gen_rtx_SUBREG (SImode, operands[0], 0),
6491 gen_rtx_MEM (QImode,
6492 plus_constant (base,
6493 offset + 1))));
6494 if (!BYTES_BIG_ENDIAN)
6495 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_SUBREG (SImode, operands[0], 0),
6496 gen_rtx_IOR (SImode,
6497 gen_rtx_ASHIFT
6498 (SImode,
6499 gen_rtx_SUBREG (SImode, operands[0], 0),
6500 GEN_INT (8)),
6501 scratch)));
6502 else
6503 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_SUBREG (SImode, operands[0], 0),
6504 gen_rtx_IOR (SImode,
6505 gen_rtx_ASHIFT (SImode, scratch,
6506 GEN_INT (8)),
6507 gen_rtx_SUBREG (SImode, operands[0],
6508 0))));
6511 /* Handle storing a half-word to memory during reload by synthesizing as two
6512 byte stores. Take care not to clobber the input values until after we
6513 have moved them somewhere safe. This code assumes that if the DImode
6514 scratch in operands[2] overlaps either the input value or output address
6515 in some way, then that value must die in this insn (we absolutely need
6516 two scratch registers for some corner cases). */
6517 void
6518 arm_reload_out_hi (rtx *operands)
6520 rtx ref = operands[0];
6521 rtx outval = operands[1];
6522 rtx base, scratch;
6523 HOST_WIDE_INT offset = 0;
6525 if (GET_CODE (ref) == SUBREG)
6527 offset = SUBREG_BYTE (ref);
6528 ref = SUBREG_REG (ref);
6531 if (GET_CODE (ref) == REG)
6533 /* We have a pseudo which has been spilt onto the stack; there
6534 are two cases here: the first where there is a simple
6535 stack-slot replacement and a second where the stack-slot is
6536 out of range, or is used as a subreg. */
6537 if (reg_equiv_mem[REGNO (ref)])
6539 ref = reg_equiv_mem[REGNO (ref)];
6540 base = find_replacement (&XEXP (ref, 0));
6542 else
6543 /* The slot is out of range, or was dressed up in a SUBREG. */
6544 base = reg_equiv_address[REGNO (ref)];
6546 else
6547 base = find_replacement (&XEXP (ref, 0));
6549 scratch = gen_rtx_REG (SImode, REGNO (operands[2]));
6551 /* Handle the case where the address is too complex to be offset by 1. */
6552 if (GET_CODE (base) == MINUS
6553 || (GET_CODE (base) == PLUS && GET_CODE (XEXP (base, 1)) != CONST_INT))
6555 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6557 /* Be careful not to destroy OUTVAL. */
6558 if (reg_overlap_mentioned_p (base_plus, outval))
6560 /* Updating base_plus might destroy outval, see if we can
6561 swap the scratch and base_plus. */
6562 if (!reg_overlap_mentioned_p (scratch, outval))
6564 rtx tmp = scratch;
6565 scratch = base_plus;
6566 base_plus = tmp;
6568 else
6570 rtx scratch_hi = gen_rtx_REG (HImode, REGNO (operands[2]));
6572 /* Be conservative and copy OUTVAL into the scratch now,
6573 this should only be necessary if outval is a subreg
6574 of something larger than a word. */
6575 /* XXX Might this clobber base? I can't see how it can,
6576 since scratch is known to overlap with OUTVAL, and
6577 must be wider than a word. */
6578 emit_insn (gen_movhi (scratch_hi, outval));
6579 outval = scratch_hi;
6583 emit_insn (gen_rtx_SET (VOIDmode, base_plus, base));
6584 base = base_plus;
6586 else if (GET_CODE (base) == PLUS)
6588 /* The addend must be CONST_INT, or we would have dealt with it above. */
6589 HOST_WIDE_INT hi, lo;
6591 offset += INTVAL (XEXP (base, 1));
6592 base = XEXP (base, 0);
6594 /* Rework the address into a legal sequence of insns. */
6595 /* Valid range for lo is -4095 -> 4095 */
6596 lo = (offset >= 0
6597 ? (offset & 0xfff)
6598 : -((-offset) & 0xfff));
6600 /* Corner case, if lo is the max offset then we would be out of range
6601 once we have added the additional 1 below, so bump the msb into the
6602 pre-loading insn(s). */
6603 if (lo == 4095)
6604 lo &= 0x7ff;
6606 hi = ((((offset - lo) & (HOST_WIDE_INT) 0xffffffff)
6607 ^ (HOST_WIDE_INT) 0x80000000)
6608 - (HOST_WIDE_INT) 0x80000000);
6610 if (hi + lo != offset)
6611 abort ();
6613 if (hi != 0)
6615 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6617 /* Be careful not to destroy OUTVAL. */
6618 if (reg_overlap_mentioned_p (base_plus, outval))
6620 /* Updating base_plus might destroy outval, see if we
6621 can swap the scratch and base_plus. */
6622 if (!reg_overlap_mentioned_p (scratch, outval))
6624 rtx tmp = scratch;
6625 scratch = base_plus;
6626 base_plus = tmp;
6628 else
6630 rtx scratch_hi = gen_rtx_REG (HImode, REGNO (operands[2]));
6632 /* Be conservative and copy outval into scratch now,
6633 this should only be necessary if outval is a
6634 subreg of something larger than a word. */
6635 /* XXX Might this clobber base? I can't see how it
6636 can, since scratch is known to overlap with
6637 outval. */
6638 emit_insn (gen_movhi (scratch_hi, outval));
6639 outval = scratch_hi;
6643 /* Get the base address; addsi3 knows how to handle constants
6644 that require more than one insn. */
6645 emit_insn (gen_addsi3 (base_plus, base, GEN_INT (hi)));
6646 base = base_plus;
6647 offset = lo;
6651 if (BYTES_BIG_ENDIAN)
6653 emit_insn (gen_movqi (gen_rtx_MEM (QImode,
6654 plus_constant (base, offset + 1)),
6655 gen_lowpart (QImode, outval)));
6656 emit_insn (gen_lshrsi3 (scratch,
6657 gen_rtx_SUBREG (SImode, outval, 0),
6658 GEN_INT (8)));
6659 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (base, offset)),
6660 gen_lowpart (QImode, scratch)));
6662 else
6664 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (base, offset)),
6665 gen_lowpart (QImode, outval)));
6666 emit_insn (gen_lshrsi3 (scratch,
6667 gen_rtx_SUBREG (SImode, outval, 0),
6668 GEN_INT (8)));
6669 emit_insn (gen_movqi (gen_rtx_MEM (QImode,
6670 plus_constant (base, offset + 1)),
6671 gen_lowpart (QImode, scratch)));
6675 /* Print a symbolic form of X to the debug file, F. */
6676 static void
6677 arm_print_value (FILE *f, rtx x)
6679 switch (GET_CODE (x))
6681 case CONST_INT:
6682 fprintf (f, HOST_WIDE_INT_PRINT_HEX, INTVAL (x));
6683 return;
6685 case CONST_DOUBLE:
6686 fprintf (f, "<0x%lx,0x%lx>", (long)XWINT (x, 2), (long)XWINT (x, 3));
6687 return;
6689 case CONST_VECTOR:
6691 int i;
6693 fprintf (f, "<");
6694 for (i = 0; i < CONST_VECTOR_NUNITS (x); i++)
6696 fprintf (f, HOST_WIDE_INT_PRINT_HEX, INTVAL (CONST_VECTOR_ELT (x, i)));
6697 if (i < (CONST_VECTOR_NUNITS (x) - 1))
6698 fputc (',', f);
6700 fprintf (f, ">");
6702 return;
6704 case CONST_STRING:
6705 fprintf (f, "\"%s\"", XSTR (x, 0));
6706 return;
6708 case SYMBOL_REF:
6709 fprintf (f, "`%s'", XSTR (x, 0));
6710 return;
6712 case LABEL_REF:
6713 fprintf (f, "L%d", INSN_UID (XEXP (x, 0)));
6714 return;
6716 case CONST:
6717 arm_print_value (f, XEXP (x, 0));
6718 return;
6720 case PLUS:
6721 arm_print_value (f, XEXP (x, 0));
6722 fprintf (f, "+");
6723 arm_print_value (f, XEXP (x, 1));
6724 return;
6726 case PC:
6727 fprintf (f, "pc");
6728 return;
6730 default:
6731 fprintf (f, "????");
6732 return;
6736 /* Routines for manipulation of the constant pool. */
6738 /* Arm instructions cannot load a large constant directly into a
6739 register; they have to come from a pc relative load. The constant
6740 must therefore be placed in the addressable range of the pc
6741 relative load. Depending on the precise pc relative load
6742 instruction the range is somewhere between 256 bytes and 4k. This
6743 means that we often have to dump a constant inside a function, and
6744 generate code to branch around it.
6746 It is important to minimize this, since the branches will slow
6747 things down and make the code larger.
6749 Normally we can hide the table after an existing unconditional
6750 branch so that there is no interruption of the flow, but in the
6751 worst case the code looks like this:
6753 ldr rn, L1
6755 b L2
6756 align
6757 L1: .long value
6761 ldr rn, L3
6763 b L4
6764 align
6765 L3: .long value
6769 We fix this by performing a scan after scheduling, which notices
6770 which instructions need to have their operands fetched from the
6771 constant table and builds the table.
6773 The algorithm starts by building a table of all the constants that
6774 need fixing up and all the natural barriers in the function (places
6775 where a constant table can be dropped without breaking the flow).
6776 For each fixup we note how far the pc-relative replacement will be
6777 able to reach and the offset of the instruction into the function.
6779 Having built the table we then group the fixes together to form
6780 tables that are as large as possible (subject to addressing
6781 constraints) and emit each table of constants after the last
6782 barrier that is within range of all the instructions in the group.
6783 If a group does not contain a barrier, then we forcibly create one
6784 by inserting a jump instruction into the flow. Once the table has
6785 been inserted, the insns are then modified to reference the
6786 relevant entry in the pool.
6788 Possible enhancements to the algorithm (not implemented) are:
6790 1) For some processors and object formats, there may be benefit in
6791 aligning the pools to the start of cache lines; this alignment
6792 would need to be taken into account when calculating addressability
6793 of a pool. */
6795 /* These typedefs are located at the start of this file, so that
6796 they can be used in the prototypes there. This comment is to
6797 remind readers of that fact so that the following structures
6798 can be understood more easily.
6800 typedef struct minipool_node Mnode;
6801 typedef struct minipool_fixup Mfix; */
6803 struct minipool_node
6805 /* Doubly linked chain of entries. */
6806 Mnode * next;
6807 Mnode * prev;
6808 /* The maximum offset into the code that this entry can be placed. While
6809 pushing fixes for forward references, all entries are sorted in order
6810 of increasing max_address. */
6811 HOST_WIDE_INT max_address;
6812 /* Similarly for an entry inserted for a backwards ref. */
6813 HOST_WIDE_INT min_address;
6814 /* The number of fixes referencing this entry. This can become zero
6815 if we "unpush" an entry. In this case we ignore the entry when we
6816 come to emit the code. */
6817 int refcount;
6818 /* The offset from the start of the minipool. */
6819 HOST_WIDE_INT offset;
6820 /* The value in table. */
6821 rtx value;
6822 /* The mode of value. */
6823 enum machine_mode mode;
6824 /* The size of the value. With iWMMXt enabled
6825 sizes > 4 also imply an alignment of 8-bytes. */
6826 int fix_size;
6829 struct minipool_fixup
6831 Mfix * next;
6832 rtx insn;
6833 HOST_WIDE_INT address;
6834 rtx * loc;
6835 enum machine_mode mode;
6836 int fix_size;
6837 rtx value;
6838 Mnode * minipool;
6839 HOST_WIDE_INT forwards;
6840 HOST_WIDE_INT backwards;
6843 /* Fixes less than a word need padding out to a word boundary. */
6844 #define MINIPOOL_FIX_SIZE(mode) \
6845 (GET_MODE_SIZE ((mode)) >= 4 ? GET_MODE_SIZE ((mode)) : 4)
6847 static Mnode * minipool_vector_head;
6848 static Mnode * minipool_vector_tail;
6849 static rtx minipool_vector_label;
6851 /* The linked list of all minipool fixes required for this function. */
6852 Mfix * minipool_fix_head;
6853 Mfix * minipool_fix_tail;
6854 /* The fix entry for the current minipool, once it has been placed. */
6855 Mfix * minipool_barrier;
6857 /* Determines if INSN is the start of a jump table. Returns the end
6858 of the TABLE or NULL_RTX. */
6859 static rtx
6860 is_jump_table (rtx insn)
6862 rtx table;
6864 if (GET_CODE (insn) == JUMP_INSN
6865 && JUMP_LABEL (insn) != NULL
6866 && ((table = next_real_insn (JUMP_LABEL (insn)))
6867 == next_real_insn (insn))
6868 && table != NULL
6869 && GET_CODE (table) == JUMP_INSN
6870 && (GET_CODE (PATTERN (table)) == ADDR_VEC
6871 || GET_CODE (PATTERN (table)) == ADDR_DIFF_VEC))
6872 return table;
6874 return NULL_RTX;
6877 #ifndef JUMP_TABLES_IN_TEXT_SECTION
6878 #define JUMP_TABLES_IN_TEXT_SECTION 0
6879 #endif
6881 static HOST_WIDE_INT
6882 get_jump_table_size (rtx insn)
6884 /* ADDR_VECs only take room if read-only data does into the text
6885 section. */
6886 if (JUMP_TABLES_IN_TEXT_SECTION
6887 #if !defined(READONLY_DATA_SECTION) && !defined(READONLY_DATA_SECTION_ASM_OP)
6888 || 1
6889 #endif
6892 rtx body = PATTERN (insn);
6893 int elt = GET_CODE (body) == ADDR_DIFF_VEC ? 1 : 0;
6895 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, elt);
6898 return 0;
6901 /* Move a minipool fix MP from its current location to before MAX_MP.
6902 If MAX_MP is NULL, then MP doesn't need moving, but the addressing
6903 constraints may need updating. */
6904 static Mnode *
6905 move_minipool_fix_forward_ref (Mnode *mp, Mnode *max_mp,
6906 HOST_WIDE_INT max_address)
6908 /* This should never be true and the code below assumes these are
6909 different. */
6910 if (mp == max_mp)
6911 abort ();
6913 if (max_mp == NULL)
6915 if (max_address < mp->max_address)
6916 mp->max_address = max_address;
6918 else
6920 if (max_address > max_mp->max_address - mp->fix_size)
6921 mp->max_address = max_mp->max_address - mp->fix_size;
6922 else
6923 mp->max_address = max_address;
6925 /* Unlink MP from its current position. Since max_mp is non-null,
6926 mp->prev must be non-null. */
6927 mp->prev->next = mp->next;
6928 if (mp->next != NULL)
6929 mp->next->prev = mp->prev;
6930 else
6931 minipool_vector_tail = mp->prev;
6933 /* Re-insert it before MAX_MP. */
6934 mp->next = max_mp;
6935 mp->prev = max_mp->prev;
6936 max_mp->prev = mp;
6938 if (mp->prev != NULL)
6939 mp->prev->next = mp;
6940 else
6941 minipool_vector_head = mp;
6944 /* Save the new entry. */
6945 max_mp = mp;
6947 /* Scan over the preceding entries and adjust their addresses as
6948 required. */
6949 while (mp->prev != NULL
6950 && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
6952 mp->prev->max_address = mp->max_address - mp->prev->fix_size;
6953 mp = mp->prev;
6956 return max_mp;
6959 /* Add a constant to the minipool for a forward reference. Returns the
6960 node added or NULL if the constant will not fit in this pool. */
6961 static Mnode *
6962 add_minipool_forward_ref (Mfix *fix)
6964 /* If set, max_mp is the first pool_entry that has a lower
6965 constraint than the one we are trying to add. */
6966 Mnode * max_mp = NULL;
6967 HOST_WIDE_INT max_address = fix->address + fix->forwards;
6968 Mnode * mp;
6970 /* If this fix's address is greater than the address of the first
6971 entry, then we can't put the fix in this pool. We subtract the
6972 size of the current fix to ensure that if the table is fully
6973 packed we still have enough room to insert this value by suffling
6974 the other fixes forwards. */
6975 if (minipool_vector_head &&
6976 fix->address >= minipool_vector_head->max_address - fix->fix_size)
6977 return NULL;
6979 /* Scan the pool to see if a constant with the same value has
6980 already been added. While we are doing this, also note the
6981 location where we must insert the constant if it doesn't already
6982 exist. */
6983 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
6985 if (GET_CODE (fix->value) == GET_CODE (mp->value)
6986 && fix->mode == mp->mode
6987 && (GET_CODE (fix->value) != CODE_LABEL
6988 || (CODE_LABEL_NUMBER (fix->value)
6989 == CODE_LABEL_NUMBER (mp->value)))
6990 && rtx_equal_p (fix->value, mp->value))
6992 /* More than one fix references this entry. */
6993 mp->refcount++;
6994 return move_minipool_fix_forward_ref (mp, max_mp, max_address);
6997 /* Note the insertion point if necessary. */
6998 if (max_mp == NULL
6999 && mp->max_address > max_address)
7000 max_mp = mp;
7002 /* If we are inserting an 8-bytes aligned quantity and
7003 we have not already found an insertion point, then
7004 make sure that all such 8-byte aligned quantities are
7005 placed at the start of the pool. */
7006 if (ARM_DOUBLEWORD_ALIGN
7007 && max_mp == NULL
7008 && fix->fix_size == 8
7009 && mp->fix_size != 8)
7011 max_mp = mp;
7012 max_address = mp->max_address;
7016 /* The value is not currently in the minipool, so we need to create
7017 a new entry for it. If MAX_MP is NULL, the entry will be put on
7018 the end of the list since the placement is less constrained than
7019 any existing entry. Otherwise, we insert the new fix before
7020 MAX_MP and, if necessary, adjust the constraints on the other
7021 entries. */
7022 mp = xmalloc (sizeof (* mp));
7023 mp->fix_size = fix->fix_size;
7024 mp->mode = fix->mode;
7025 mp->value = fix->value;
7026 mp->refcount = 1;
7027 /* Not yet required for a backwards ref. */
7028 mp->min_address = -65536;
7030 if (max_mp == NULL)
7032 mp->max_address = max_address;
7033 mp->next = NULL;
7034 mp->prev = minipool_vector_tail;
7036 if (mp->prev == NULL)
7038 minipool_vector_head = mp;
7039 minipool_vector_label = gen_label_rtx ();
7041 else
7042 mp->prev->next = mp;
7044 minipool_vector_tail = mp;
7046 else
7048 if (max_address > max_mp->max_address - mp->fix_size)
7049 mp->max_address = max_mp->max_address - mp->fix_size;
7050 else
7051 mp->max_address = max_address;
7053 mp->next = max_mp;
7054 mp->prev = max_mp->prev;
7055 max_mp->prev = mp;
7056 if (mp->prev != NULL)
7057 mp->prev->next = mp;
7058 else
7059 minipool_vector_head = mp;
7062 /* Save the new entry. */
7063 max_mp = mp;
7065 /* Scan over the preceding entries and adjust their addresses as
7066 required. */
7067 while (mp->prev != NULL
7068 && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
7070 mp->prev->max_address = mp->max_address - mp->prev->fix_size;
7071 mp = mp->prev;
7074 return max_mp;
7077 static Mnode *
7078 move_minipool_fix_backward_ref (Mnode *mp, Mnode *min_mp,
7079 HOST_WIDE_INT min_address)
7081 HOST_WIDE_INT offset;
7083 /* This should never be true, and the code below assumes these are
7084 different. */
7085 if (mp == min_mp)
7086 abort ();
7088 if (min_mp == NULL)
7090 if (min_address > mp->min_address)
7091 mp->min_address = min_address;
7093 else
7095 /* We will adjust this below if it is too loose. */
7096 mp->min_address = min_address;
7098 /* Unlink MP from its current position. Since min_mp is non-null,
7099 mp->next must be non-null. */
7100 mp->next->prev = mp->prev;
7101 if (mp->prev != NULL)
7102 mp->prev->next = mp->next;
7103 else
7104 minipool_vector_head = mp->next;
7106 /* Reinsert it after MIN_MP. */
7107 mp->prev = min_mp;
7108 mp->next = min_mp->next;
7109 min_mp->next = mp;
7110 if (mp->next != NULL)
7111 mp->next->prev = mp;
7112 else
7113 minipool_vector_tail = mp;
7116 min_mp = mp;
7118 offset = 0;
7119 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7121 mp->offset = offset;
7122 if (mp->refcount > 0)
7123 offset += mp->fix_size;
7125 if (mp->next && mp->next->min_address < mp->min_address + mp->fix_size)
7126 mp->next->min_address = mp->min_address + mp->fix_size;
7129 return min_mp;
7132 /* Add a constant to the minipool for a backward reference. Returns the
7133 node added or NULL if the constant will not fit in this pool.
7135 Note that the code for insertion for a backwards reference can be
7136 somewhat confusing because the calculated offsets for each fix do
7137 not take into account the size of the pool (which is still under
7138 construction. */
7139 static Mnode *
7140 add_minipool_backward_ref (Mfix *fix)
7142 /* If set, min_mp is the last pool_entry that has a lower constraint
7143 than the one we are trying to add. */
7144 Mnode *min_mp = NULL;
7145 /* This can be negative, since it is only a constraint. */
7146 HOST_WIDE_INT min_address = fix->address - fix->backwards;
7147 Mnode *mp;
7149 /* If we can't reach the current pool from this insn, or if we can't
7150 insert this entry at the end of the pool without pushing other
7151 fixes out of range, then we don't try. This ensures that we
7152 can't fail later on. */
7153 if (min_address >= minipool_barrier->address
7154 || (minipool_vector_tail->min_address + fix->fix_size
7155 >= minipool_barrier->address))
7156 return NULL;
7158 /* Scan the pool to see if a constant with the same value has
7159 already been added. While we are doing this, also note the
7160 location where we must insert the constant if it doesn't already
7161 exist. */
7162 for (mp = minipool_vector_tail; mp != NULL; mp = mp->prev)
7164 if (GET_CODE (fix->value) == GET_CODE (mp->value)
7165 && fix->mode == mp->mode
7166 && (GET_CODE (fix->value) != CODE_LABEL
7167 || (CODE_LABEL_NUMBER (fix->value)
7168 == CODE_LABEL_NUMBER (mp->value)))
7169 && rtx_equal_p (fix->value, mp->value)
7170 /* Check that there is enough slack to move this entry to the
7171 end of the table (this is conservative). */
7172 && (mp->max_address
7173 > (minipool_barrier->address
7174 + minipool_vector_tail->offset
7175 + minipool_vector_tail->fix_size)))
7177 mp->refcount++;
7178 return move_minipool_fix_backward_ref (mp, min_mp, min_address);
7181 if (min_mp != NULL)
7182 mp->min_address += fix->fix_size;
7183 else
7185 /* Note the insertion point if necessary. */
7186 if (mp->min_address < min_address)
7188 /* For now, we do not allow the insertion of 8-byte alignment
7189 requiring nodes anywhere but at the start of the pool. */
7190 if (ARM_DOUBLEWORD_ALIGN
7191 && fix->fix_size == 8 && mp->fix_size != 8)
7192 return NULL;
7193 else
7194 min_mp = mp;
7196 else if (mp->max_address
7197 < minipool_barrier->address + mp->offset + fix->fix_size)
7199 /* Inserting before this entry would push the fix beyond
7200 its maximum address (which can happen if we have
7201 re-located a forwards fix); force the new fix to come
7202 after it. */
7203 min_mp = mp;
7204 min_address = mp->min_address + fix->fix_size;
7206 /* If we are inserting an 8-bytes aligned quantity and
7207 we have not already found an insertion point, then
7208 make sure that all such 8-byte aligned quantities are
7209 placed at the start of the pool. */
7210 else if (ARM_DOUBLEWORD_ALIGN
7211 && min_mp == NULL
7212 && fix->fix_size == 8
7213 && mp->fix_size < 8)
7215 min_mp = mp;
7216 min_address = mp->min_address + fix->fix_size;
7221 /* We need to create a new entry. */
7222 mp = xmalloc (sizeof (* mp));
7223 mp->fix_size = fix->fix_size;
7224 mp->mode = fix->mode;
7225 mp->value = fix->value;
7226 mp->refcount = 1;
7227 mp->max_address = minipool_barrier->address + 65536;
7229 mp->min_address = min_address;
7231 if (min_mp == NULL)
7233 mp->prev = NULL;
7234 mp->next = minipool_vector_head;
7236 if (mp->next == NULL)
7238 minipool_vector_tail = mp;
7239 minipool_vector_label = gen_label_rtx ();
7241 else
7242 mp->next->prev = mp;
7244 minipool_vector_head = mp;
7246 else
7248 mp->next = min_mp->next;
7249 mp->prev = min_mp;
7250 min_mp->next = mp;
7252 if (mp->next != NULL)
7253 mp->next->prev = mp;
7254 else
7255 minipool_vector_tail = mp;
7258 /* Save the new entry. */
7259 min_mp = mp;
7261 if (mp->prev)
7262 mp = mp->prev;
7263 else
7264 mp->offset = 0;
7266 /* Scan over the following entries and adjust their offsets. */
7267 while (mp->next != NULL)
7269 if (mp->next->min_address < mp->min_address + mp->fix_size)
7270 mp->next->min_address = mp->min_address + mp->fix_size;
7272 if (mp->refcount)
7273 mp->next->offset = mp->offset + mp->fix_size;
7274 else
7275 mp->next->offset = mp->offset;
7277 mp = mp->next;
7280 return min_mp;
7283 static void
7284 assign_minipool_offsets (Mfix *barrier)
7286 HOST_WIDE_INT offset = 0;
7287 Mnode *mp;
7289 minipool_barrier = barrier;
7291 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7293 mp->offset = offset;
7295 if (mp->refcount > 0)
7296 offset += mp->fix_size;
7300 /* Output the literal table */
7301 static void
7302 dump_minipool (rtx scan)
7304 Mnode * mp;
7305 Mnode * nmp;
7306 int align64 = 0;
7308 if (ARM_DOUBLEWORD_ALIGN)
7309 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7310 if (mp->refcount > 0 && mp->fix_size == 8)
7312 align64 = 1;
7313 break;
7316 if (dump_file)
7317 fprintf (dump_file,
7318 ";; Emitting minipool after insn %u; address %ld; align %d (bytes)\n",
7319 INSN_UID (scan), (unsigned long) minipool_barrier->address, align64 ? 8 : 4);
7321 scan = emit_label_after (gen_label_rtx (), scan);
7322 scan = emit_insn_after (align64 ? gen_align_8 () : gen_align_4 (), scan);
7323 scan = emit_label_after (minipool_vector_label, scan);
7325 for (mp = minipool_vector_head; mp != NULL; mp = nmp)
7327 if (mp->refcount > 0)
7329 if (dump_file)
7331 fprintf (dump_file,
7332 ";; Offset %u, min %ld, max %ld ",
7333 (unsigned) mp->offset, (unsigned long) mp->min_address,
7334 (unsigned long) mp->max_address);
7335 arm_print_value (dump_file, mp->value);
7336 fputc ('\n', dump_file);
7339 switch (mp->fix_size)
7341 #ifdef HAVE_consttable_1
7342 case 1:
7343 scan = emit_insn_after (gen_consttable_1 (mp->value), scan);
7344 break;
7346 #endif
7347 #ifdef HAVE_consttable_2
7348 case 2:
7349 scan = emit_insn_after (gen_consttable_2 (mp->value), scan);
7350 break;
7352 #endif
7353 #ifdef HAVE_consttable_4
7354 case 4:
7355 scan = emit_insn_after (gen_consttable_4 (mp->value), scan);
7356 break;
7358 #endif
7359 #ifdef HAVE_consttable_8
7360 case 8:
7361 scan = emit_insn_after (gen_consttable_8 (mp->value), scan);
7362 break;
7364 #endif
7365 default:
7366 abort ();
7367 break;
7371 nmp = mp->next;
7372 free (mp);
7375 minipool_vector_head = minipool_vector_tail = NULL;
7376 scan = emit_insn_after (gen_consttable_end (), scan);
7377 scan = emit_barrier_after (scan);
7380 /* Return the cost of forcibly inserting a barrier after INSN. */
7381 static int
7382 arm_barrier_cost (rtx insn)
7384 /* Basing the location of the pool on the loop depth is preferable,
7385 but at the moment, the basic block information seems to be
7386 corrupt by this stage of the compilation. */
7387 int base_cost = 50;
7388 rtx next = next_nonnote_insn (insn);
7390 if (next != NULL && GET_CODE (next) == CODE_LABEL)
7391 base_cost -= 20;
7393 switch (GET_CODE (insn))
7395 case CODE_LABEL:
7396 /* It will always be better to place the table before the label, rather
7397 than after it. */
7398 return 50;
7400 case INSN:
7401 case CALL_INSN:
7402 return base_cost;
7404 case JUMP_INSN:
7405 return base_cost - 10;
7407 default:
7408 return base_cost + 10;
7412 /* Find the best place in the insn stream in the range
7413 (FIX->address,MAX_ADDRESS) to forcibly insert a minipool barrier.
7414 Create the barrier by inserting a jump and add a new fix entry for
7415 it. */
7416 static Mfix *
7417 create_fix_barrier (Mfix *fix, HOST_WIDE_INT max_address)
7419 HOST_WIDE_INT count = 0;
7420 rtx barrier;
7421 rtx from = fix->insn;
7422 rtx selected = from;
7423 int selected_cost;
7424 HOST_WIDE_INT selected_address;
7425 Mfix * new_fix;
7426 HOST_WIDE_INT max_count = max_address - fix->address;
7427 rtx label = gen_label_rtx ();
7429 selected_cost = arm_barrier_cost (from);
7430 selected_address = fix->address;
7432 while (from && count < max_count)
7434 rtx tmp;
7435 int new_cost;
7437 /* This code shouldn't have been called if there was a natural barrier
7438 within range. */
7439 if (GET_CODE (from) == BARRIER)
7440 abort ();
7442 /* Count the length of this insn. */
7443 count += get_attr_length (from);
7445 /* If there is a jump table, add its length. */
7446 tmp = is_jump_table (from);
7447 if (tmp != NULL)
7449 count += get_jump_table_size (tmp);
7451 /* Jump tables aren't in a basic block, so base the cost on
7452 the dispatch insn. If we select this location, we will
7453 still put the pool after the table. */
7454 new_cost = arm_barrier_cost (from);
7456 if (count < max_count && new_cost <= selected_cost)
7458 selected = tmp;
7459 selected_cost = new_cost;
7460 selected_address = fix->address + count;
7463 /* Continue after the dispatch table. */
7464 from = NEXT_INSN (tmp);
7465 continue;
7468 new_cost = arm_barrier_cost (from);
7470 if (count < max_count && new_cost <= selected_cost)
7472 selected = from;
7473 selected_cost = new_cost;
7474 selected_address = fix->address + count;
7477 from = NEXT_INSN (from);
7480 /* Create a new JUMP_INSN that branches around a barrier. */
7481 from = emit_jump_insn_after (gen_jump (label), selected);
7482 JUMP_LABEL (from) = label;
7483 barrier = emit_barrier_after (from);
7484 emit_label_after (label, barrier);
7486 /* Create a minipool barrier entry for the new barrier. */
7487 new_fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* new_fix));
7488 new_fix->insn = barrier;
7489 new_fix->address = selected_address;
7490 new_fix->next = fix->next;
7491 fix->next = new_fix;
7493 return new_fix;
7496 /* Record that there is a natural barrier in the insn stream at
7497 ADDRESS. */
7498 static void
7499 push_minipool_barrier (rtx insn, HOST_WIDE_INT address)
7501 Mfix * fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* fix));
7503 fix->insn = insn;
7504 fix->address = address;
7506 fix->next = NULL;
7507 if (minipool_fix_head != NULL)
7508 minipool_fix_tail->next = fix;
7509 else
7510 minipool_fix_head = fix;
7512 minipool_fix_tail = fix;
7515 /* Record INSN, which will need fixing up to load a value from the
7516 minipool. ADDRESS is the offset of the insn since the start of the
7517 function; LOC is a pointer to the part of the insn which requires
7518 fixing; VALUE is the constant that must be loaded, which is of type
7519 MODE. */
7520 static void
7521 push_minipool_fix (rtx insn, HOST_WIDE_INT address, rtx *loc,
7522 enum machine_mode mode, rtx value)
7524 Mfix * fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* fix));
7526 #ifdef AOF_ASSEMBLER
7527 /* PIC symbol references need to be converted into offsets into the
7528 based area. */
7529 /* XXX This shouldn't be done here. */
7530 if (flag_pic && GET_CODE (value) == SYMBOL_REF)
7531 value = aof_pic_entry (value);
7532 #endif /* AOF_ASSEMBLER */
7534 fix->insn = insn;
7535 fix->address = address;
7536 fix->loc = loc;
7537 fix->mode = mode;
7538 fix->fix_size = MINIPOOL_FIX_SIZE (mode);
7539 fix->value = value;
7540 fix->forwards = get_attr_pool_range (insn);
7541 fix->backwards = get_attr_neg_pool_range (insn);
7542 fix->minipool = NULL;
7544 /* If an insn doesn't have a range defined for it, then it isn't
7545 expecting to be reworked by this code. Better to abort now than
7546 to generate duff assembly code. */
7547 if (fix->forwards == 0 && fix->backwards == 0)
7548 abort ();
7550 /* With AAPCS/iWMMXt enabled, the pool is aligned to an 8-byte boundary.
7551 So there might be an empty word before the start of the pool.
7552 Hence we reduce the forward range by 4 to allow for this
7553 possibility. */
7554 if (ARM_DOUBLEWORD_ALIGN && fix->fix_size == 8)
7555 fix->forwards -= 4;
7557 if (dump_file)
7559 fprintf (dump_file,
7560 ";; %smode fixup for i%d; addr %lu, range (%ld,%ld): ",
7561 GET_MODE_NAME (mode),
7562 INSN_UID (insn), (unsigned long) address,
7563 -1 * (long)fix->backwards, (long)fix->forwards);
7564 arm_print_value (dump_file, fix->value);
7565 fprintf (dump_file, "\n");
7568 /* Add it to the chain of fixes. */
7569 fix->next = NULL;
7571 if (minipool_fix_head != NULL)
7572 minipool_fix_tail->next = fix;
7573 else
7574 minipool_fix_head = fix;
7576 minipool_fix_tail = fix;
7579 /* Scan INSN and note any of its operands that need fixing.
7580 If DO_PUSHES is false we do not actually push any of the fixups
7581 needed. The function returns TRUE is any fixups were needed/pushed.
7582 This is used by arm_memory_load_p() which needs to know about loads
7583 of constants that will be converted into minipool loads. */
7584 static bool
7585 note_invalid_constants (rtx insn, HOST_WIDE_INT address, int do_pushes)
7587 bool result = false;
7588 int opno;
7590 extract_insn (insn);
7592 if (!constrain_operands (1))
7593 fatal_insn_not_found (insn);
7595 if (recog_data.n_alternatives == 0)
7596 return false;
7598 /* Fill in recog_op_alt with information about the constraints of this insn. */
7599 preprocess_constraints ();
7601 for (opno = 0; opno < recog_data.n_operands; opno++)
7603 /* Things we need to fix can only occur in inputs. */
7604 if (recog_data.operand_type[opno] != OP_IN)
7605 continue;
7607 /* If this alternative is a memory reference, then any mention
7608 of constants in this alternative is really to fool reload
7609 into allowing us to accept one there. We need to fix them up
7610 now so that we output the right code. */
7611 if (recog_op_alt[opno][which_alternative].memory_ok)
7613 rtx op = recog_data.operand[opno];
7615 if (CONSTANT_P (op))
7617 if (do_pushes)
7618 push_minipool_fix (insn, address, recog_data.operand_loc[opno],
7619 recog_data.operand_mode[opno], op);
7620 result = true;
7622 else if (GET_CODE (op) == MEM
7623 && GET_CODE (XEXP (op, 0)) == SYMBOL_REF
7624 && CONSTANT_POOL_ADDRESS_P (XEXP (op, 0)))
7626 if (do_pushes)
7628 rtx cop = avoid_constant_pool_reference (op);
7630 /* Casting the address of something to a mode narrower
7631 than a word can cause avoid_constant_pool_reference()
7632 to return the pool reference itself. That's no good to
7633 us here. Lets just hope that we can use the
7634 constant pool value directly. */
7635 if (op == cop)
7636 cop = get_pool_constant (XEXP (op, 0));
7638 push_minipool_fix (insn, address,
7639 recog_data.operand_loc[opno],
7640 recog_data.operand_mode[opno], cop);
7643 result = true;
7648 return result;
7651 /* Gcc puts the pool in the wrong place for ARM, since we can only
7652 load addresses a limited distance around the pc. We do some
7653 special munging to move the constant pool values to the correct
7654 point in the code. */
7655 static void
7656 arm_reorg (void)
7658 rtx insn;
7659 HOST_WIDE_INT address = 0;
7660 Mfix * fix;
7662 minipool_fix_head = minipool_fix_tail = NULL;
7664 /* The first insn must always be a note, or the code below won't
7665 scan it properly. */
7666 insn = get_insns ();
7667 if (GET_CODE (insn) != NOTE)
7668 abort ();
7670 /* Scan all the insns and record the operands that will need fixing. */
7671 for (insn = next_nonnote_insn (insn); insn; insn = next_nonnote_insn (insn))
7673 if (TARGET_CIRRUS_FIX_INVALID_INSNS
7674 && (arm_cirrus_insn_p (insn)
7675 || GET_CODE (insn) == JUMP_INSN
7676 || arm_memory_load_p (insn)))
7677 cirrus_reorg (insn);
7679 if (GET_CODE (insn) == BARRIER)
7680 push_minipool_barrier (insn, address);
7681 else if (INSN_P (insn))
7683 rtx table;
7685 note_invalid_constants (insn, address, true);
7686 address += get_attr_length (insn);
7688 /* If the insn is a vector jump, add the size of the table
7689 and skip the table. */
7690 if ((table = is_jump_table (insn)) != NULL)
7692 address += get_jump_table_size (table);
7693 insn = table;
7698 fix = minipool_fix_head;
7700 /* Now scan the fixups and perform the required changes. */
7701 while (fix)
7703 Mfix * ftmp;
7704 Mfix * fdel;
7705 Mfix * last_added_fix;
7706 Mfix * last_barrier = NULL;
7707 Mfix * this_fix;
7709 /* Skip any further barriers before the next fix. */
7710 while (fix && GET_CODE (fix->insn) == BARRIER)
7711 fix = fix->next;
7713 /* No more fixes. */
7714 if (fix == NULL)
7715 break;
7717 last_added_fix = NULL;
7719 for (ftmp = fix; ftmp; ftmp = ftmp->next)
7721 if (GET_CODE (ftmp->insn) == BARRIER)
7723 if (ftmp->address >= minipool_vector_head->max_address)
7724 break;
7726 last_barrier = ftmp;
7728 else if ((ftmp->minipool = add_minipool_forward_ref (ftmp)) == NULL)
7729 break;
7731 last_added_fix = ftmp; /* Keep track of the last fix added. */
7734 /* If we found a barrier, drop back to that; any fixes that we
7735 could have reached but come after the barrier will now go in
7736 the next mini-pool. */
7737 if (last_barrier != NULL)
7739 /* Reduce the refcount for those fixes that won't go into this
7740 pool after all. */
7741 for (fdel = last_barrier->next;
7742 fdel && fdel != ftmp;
7743 fdel = fdel->next)
7745 fdel->minipool->refcount--;
7746 fdel->minipool = NULL;
7749 ftmp = last_barrier;
7751 else
7753 /* ftmp is first fix that we can't fit into this pool and
7754 there no natural barriers that we could use. Insert a
7755 new barrier in the code somewhere between the previous
7756 fix and this one, and arrange to jump around it. */
7757 HOST_WIDE_INT max_address;
7759 /* The last item on the list of fixes must be a barrier, so
7760 we can never run off the end of the list of fixes without
7761 last_barrier being set. */
7762 if (ftmp == NULL)
7763 abort ();
7765 max_address = minipool_vector_head->max_address;
7766 /* Check that there isn't another fix that is in range that
7767 we couldn't fit into this pool because the pool was
7768 already too large: we need to put the pool before such an
7769 instruction. */
7770 if (ftmp->address < max_address)
7771 max_address = ftmp->address;
7773 last_barrier = create_fix_barrier (last_added_fix, max_address);
7776 assign_minipool_offsets (last_barrier);
7778 while (ftmp)
7780 if (GET_CODE (ftmp->insn) != BARRIER
7781 && ((ftmp->minipool = add_minipool_backward_ref (ftmp))
7782 == NULL))
7783 break;
7785 ftmp = ftmp->next;
7788 /* Scan over the fixes we have identified for this pool, fixing them
7789 up and adding the constants to the pool itself. */
7790 for (this_fix = fix; this_fix && ftmp != this_fix;
7791 this_fix = this_fix->next)
7792 if (GET_CODE (this_fix->insn) != BARRIER)
7794 rtx addr
7795 = plus_constant (gen_rtx_LABEL_REF (VOIDmode,
7796 minipool_vector_label),
7797 this_fix->minipool->offset);
7798 *this_fix->loc = gen_rtx_MEM (this_fix->mode, addr);
7801 dump_minipool (last_barrier->insn);
7802 fix = ftmp;
7805 /* From now on we must synthesize any constants that we can't handle
7806 directly. This can happen if the RTL gets split during final
7807 instruction generation. */
7808 after_arm_reorg = 1;
7810 /* Free the minipool memory. */
7811 obstack_free (&minipool_obstack, minipool_startobj);
7814 /* Routines to output assembly language. */
7816 /* If the rtx is the correct value then return the string of the number.
7817 In this way we can ensure that valid double constants are generated even
7818 when cross compiling. */
7819 const char *
7820 fp_immediate_constant (rtx x)
7822 REAL_VALUE_TYPE r;
7823 int i;
7825 if (!fp_consts_inited)
7826 init_fp_table ();
7828 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
7829 for (i = 0; i < 8; i++)
7830 if (REAL_VALUES_EQUAL (r, values_fp[i]))
7831 return strings_fp[i];
7833 abort ();
7836 /* As for fp_immediate_constant, but value is passed directly, not in rtx. */
7837 static const char *
7838 fp_const_from_val (REAL_VALUE_TYPE *r)
7840 int i;
7842 if (!fp_consts_inited)
7843 init_fp_table ();
7845 for (i = 0; i < 8; i++)
7846 if (REAL_VALUES_EQUAL (*r, values_fp[i]))
7847 return strings_fp[i];
7849 abort ();
7852 /* Output the operands of a LDM/STM instruction to STREAM.
7853 MASK is the ARM register set mask of which only bits 0-15 are important.
7854 REG is the base register, either the frame pointer or the stack pointer,
7855 INSTR is the possibly suffixed load or store instruction. */
7856 static void
7857 print_multi_reg (FILE *stream, const char *instr, int reg, int mask)
7859 int i;
7860 int not_first = FALSE;
7862 fputc ('\t', stream);
7863 asm_fprintf (stream, instr, reg);
7864 fputs (", {", stream);
7866 for (i = 0; i <= LAST_ARM_REGNUM; i++)
7867 if (mask & (1 << i))
7869 if (not_first)
7870 fprintf (stream, ", ");
7872 asm_fprintf (stream, "%r", i);
7873 not_first = TRUE;
7876 fprintf (stream, "}");
7878 /* Add a ^ character for the 26-bit ABI, but only if we were loading
7879 the PC. Otherwise we would generate an UNPREDICTABLE instruction.
7880 Strictly speaking the instruction would be unpredicatble only if
7881 we were writing back the base register as well, but since we never
7882 want to generate an LDM type 2 instruction (register bank switching)
7883 which is what you get if the PC is not being loaded, we do not need
7884 to check for writeback. */
7885 if (! TARGET_APCS_32
7886 && ((mask & (1 << PC_REGNUM)) != 0))
7887 fprintf (stream, "^");
7889 fprintf (stream, "\n");
7893 /* Output a FLDMX instruction to STREAM.
7894 BASE if the register containing the address.
7895 REG and COUNT specify the register range.
7896 Extra registers may be added to avoid hardware bugs. */
7898 static void
7899 arm_output_fldmx (FILE * stream, unsigned int base, int reg, int count)
7901 int i;
7903 /* Workaround ARM10 VFPr1 bug. */
7904 if (count == 2 && !arm_arch6)
7906 if (reg == 15)
7907 reg--;
7908 count++;
7911 fputc ('\t', stream);
7912 asm_fprintf (stream, "fldmfdx\t%r!, {", base);
7914 for (i = reg; i < reg + count; i++)
7916 if (i > reg)
7917 fputs (", ", stream);
7918 asm_fprintf (stream, "d%d", i);
7920 fputs ("}\n", stream);
7925 /* Output the assembly for a store multiple. */
7927 const char *
7928 vfp_output_fstmx (rtx * operands)
7930 char pattern[100];
7931 int p;
7932 int base;
7933 int i;
7935 strcpy (pattern, "fstmfdx\t%m0!, {%P1");
7936 p = strlen (pattern);
7938 if (GET_CODE (operands[1]) != REG)
7939 abort ();
7941 base = (REGNO (operands[1]) - FIRST_VFP_REGNUM) / 2;
7942 for (i = 1; i < XVECLEN (operands[2], 0); i++)
7944 p += sprintf (&pattern[p], ", d%d", base + i);
7946 strcpy (&pattern[p], "}");
7948 output_asm_insn (pattern, operands);
7949 return "";
7953 /* Emit RTL to save block of VFP register pairs to the stack. Returns the
7954 number of bytes pushed. */
7956 static int
7957 vfp_emit_fstmx (int base_reg, int count)
7959 rtx par;
7960 rtx dwarf;
7961 rtx tmp, reg;
7962 int i;
7964 /* Workaround ARM10 VFPr1 bug. Data corruption can occur when exactly two
7965 register pairs are stored by a store multiple insn. We avoid this
7966 by pushing an extra pair. */
7967 if (count == 2 && !arm_arch6)
7969 if (base_reg == LAST_VFP_REGNUM - 3)
7970 base_reg -= 2;
7971 count++;
7974 /* ??? The frame layout is implementation defined. We describe
7975 standard format 1 (equivalent to a FSTMD insn and unused pad word).
7976 We really need some way of representing the whole block so that the
7977 unwinder can figure it out at runtime. */
7978 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
7979 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (count + 1));
7981 reg = gen_rtx_REG (DFmode, base_reg);
7982 base_reg += 2;
7984 XVECEXP (par, 0, 0)
7985 = gen_rtx_SET (VOIDmode,
7986 gen_rtx_MEM (BLKmode,
7987 gen_rtx_PRE_DEC (BLKmode, stack_pointer_rtx)),
7988 gen_rtx_UNSPEC (BLKmode,
7989 gen_rtvec (1, reg),
7990 UNSPEC_PUSH_MULT));
7992 tmp = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7993 gen_rtx_PLUS (SImode, stack_pointer_rtx,
7994 GEN_INT (-(count * 8 + 4))));
7995 RTX_FRAME_RELATED_P (tmp) = 1;
7996 XVECEXP (dwarf, 0, 0) = tmp;
7998 tmp = gen_rtx_SET (VOIDmode,
7999 gen_rtx_MEM (DFmode, stack_pointer_rtx),
8000 reg);
8001 RTX_FRAME_RELATED_P (tmp) = 1;
8002 XVECEXP (dwarf, 0, 1) = tmp;
8004 for (i = 1; i < count; i++)
8006 reg = gen_rtx_REG (DFmode, base_reg);
8007 base_reg += 2;
8008 XVECEXP (par, 0, i) = gen_rtx_USE (VOIDmode, reg);
8010 tmp = gen_rtx_SET (VOIDmode,
8011 gen_rtx_MEM (DFmode,
8012 gen_rtx_PLUS (SImode,
8013 stack_pointer_rtx,
8014 GEN_INT (i * 8))),
8015 reg);
8016 RTX_FRAME_RELATED_P (tmp) = 1;
8017 XVECEXP (dwarf, 0, i + 1) = tmp;
8020 par = emit_insn (par);
8021 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
8022 REG_NOTES (par));
8023 RTX_FRAME_RELATED_P (par) = 1;
8025 return count * 8 + 4;
8029 /* Output a 'call' insn. */
8030 const char *
8031 output_call (rtx *operands)
8033 /* Handle calls to lr using ip (which may be clobbered in subr anyway). */
8035 if (REGNO (operands[0]) == LR_REGNUM)
8037 operands[0] = gen_rtx_REG (SImode, IP_REGNUM);
8038 output_asm_insn ("mov%?\t%0, %|lr", operands);
8041 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
8043 if (TARGET_INTERWORK)
8044 output_asm_insn ("bx%?\t%0", operands);
8045 else
8046 output_asm_insn ("mov%?\t%|pc, %0", operands);
8048 return "";
8051 /* Output a 'call' insn that is a reference in memory. */
8052 const char *
8053 output_call_mem (rtx *operands)
8055 if (TARGET_INTERWORK)
8057 output_asm_insn ("ldr%?\t%|ip, %0", operands);
8058 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
8059 output_asm_insn ("bx%?\t%|ip", operands);
8061 else if (regno_use_in (LR_REGNUM, operands[0]))
8063 /* LR is used in the memory address. We load the address in the
8064 first instruction. It's safe to use IP as the target of the
8065 load since the call will kill it anyway. */
8066 output_asm_insn ("ldr%?\t%|ip, %0", operands);
8067 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
8068 output_asm_insn ("mov%?\t%|pc, %|ip", operands);
8070 else
8072 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
8073 output_asm_insn ("ldr%?\t%|pc, %0", operands);
8076 return "";
8080 /* Output a move from arm registers to an fpa registers.
8081 OPERANDS[0] is an fpa register.
8082 OPERANDS[1] is the first registers of an arm register pair. */
8083 const char *
8084 output_mov_long_double_fpa_from_arm (rtx *operands)
8086 int arm_reg0 = REGNO (operands[1]);
8087 rtx ops[3];
8089 if (arm_reg0 == IP_REGNUM)
8090 abort ();
8092 ops[0] = gen_rtx_REG (SImode, arm_reg0);
8093 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
8094 ops[2] = gen_rtx_REG (SImode, 2 + arm_reg0);
8096 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1, %2}", ops);
8097 output_asm_insn ("ldf%?e\t%0, [%|sp], #12", operands);
8099 return "";
8102 /* Output a move from an fpa register to arm registers.
8103 OPERANDS[0] is the first registers of an arm register pair.
8104 OPERANDS[1] is an fpa register. */
8105 const char *
8106 output_mov_long_double_arm_from_fpa (rtx *operands)
8108 int arm_reg0 = REGNO (operands[0]);
8109 rtx ops[3];
8111 if (arm_reg0 == IP_REGNUM)
8112 abort ();
8114 ops[0] = gen_rtx_REG (SImode, arm_reg0);
8115 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
8116 ops[2] = gen_rtx_REG (SImode, 2 + arm_reg0);
8118 output_asm_insn ("stf%?e\t%1, [%|sp, #-12]!", operands);
8119 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1, %2}", ops);
8120 return "";
8123 /* Output a move from arm registers to arm registers of a long double
8124 OPERANDS[0] is the destination.
8125 OPERANDS[1] is the source. */
8126 const char *
8127 output_mov_long_double_arm_from_arm (rtx *operands)
8129 /* We have to be careful here because the two might overlap. */
8130 int dest_start = REGNO (operands[0]);
8131 int src_start = REGNO (operands[1]);
8132 rtx ops[2];
8133 int i;
8135 if (dest_start < src_start)
8137 for (i = 0; i < 3; i++)
8139 ops[0] = gen_rtx_REG (SImode, dest_start + i);
8140 ops[1] = gen_rtx_REG (SImode, src_start + i);
8141 output_asm_insn ("mov%?\t%0, %1", ops);
8144 else
8146 for (i = 2; i >= 0; i--)
8148 ops[0] = gen_rtx_REG (SImode, dest_start + i);
8149 ops[1] = gen_rtx_REG (SImode, src_start + i);
8150 output_asm_insn ("mov%?\t%0, %1", ops);
8154 return "";
8158 /* Output a move from arm registers to an fpa registers.
8159 OPERANDS[0] is an fpa register.
8160 OPERANDS[1] is the first registers of an arm register pair. */
8161 const char *
8162 output_mov_double_fpa_from_arm (rtx *operands)
8164 int arm_reg0 = REGNO (operands[1]);
8165 rtx ops[2];
8167 if (arm_reg0 == IP_REGNUM)
8168 abort ();
8170 ops[0] = gen_rtx_REG (SImode, arm_reg0);
8171 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
8172 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1}", ops);
8173 output_asm_insn ("ldf%?d\t%0, [%|sp], #8", operands);
8174 return "";
8177 /* Output a move from an fpa register to arm registers.
8178 OPERANDS[0] is the first registers of an arm register pair.
8179 OPERANDS[1] is an fpa register. */
8180 const char *
8181 output_mov_double_arm_from_fpa (rtx *operands)
8183 int arm_reg0 = REGNO (operands[0]);
8184 rtx ops[2];
8186 if (arm_reg0 == IP_REGNUM)
8187 abort ();
8189 ops[0] = gen_rtx_REG (SImode, arm_reg0);
8190 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
8191 output_asm_insn ("stf%?d\t%1, [%|sp, #-8]!", operands);
8192 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1}", ops);
8193 return "";
8196 /* Output a move between double words.
8197 It must be REG<-REG, REG<-CONST_DOUBLE, REG<-CONST_INT, REG<-MEM
8198 or MEM<-REG and all MEMs must be offsettable addresses. */
8199 const char *
8200 output_move_double (rtx *operands)
8202 enum rtx_code code0 = GET_CODE (operands[0]);
8203 enum rtx_code code1 = GET_CODE (operands[1]);
8204 rtx otherops[3];
8206 if (code0 == REG)
8208 int reg0 = REGNO (operands[0]);
8210 otherops[0] = gen_rtx_REG (SImode, 1 + reg0);
8212 if (code1 == REG)
8214 int reg1 = REGNO (operands[1]);
8215 if (reg1 == IP_REGNUM)
8216 abort ();
8218 /* Ensure the second source is not overwritten. */
8219 if (reg1 == reg0 + (WORDS_BIG_ENDIAN ? -1 : 1))
8220 output_asm_insn ("mov%?\t%Q0, %Q1\n\tmov%?\t%R0, %R1", operands);
8221 else
8222 output_asm_insn ("mov%?\t%R0, %R1\n\tmov%?\t%Q0, %Q1", operands);
8224 else if (code1 == CONST_VECTOR)
8226 HOST_WIDE_INT hint = 0;
8228 switch (GET_MODE (operands[1]))
8230 case V2SImode:
8231 otherops[1] = GEN_INT (INTVAL (CONST_VECTOR_ELT (operands[1], 1)));
8232 operands[1] = GEN_INT (INTVAL (CONST_VECTOR_ELT (operands[1], 0)));
8233 break;
8235 case V4HImode:
8236 if (BYTES_BIG_ENDIAN)
8238 hint = INTVAL (CONST_VECTOR_ELT (operands[1], 2));
8239 hint <<= 16;
8240 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 3));
8242 else
8244 hint = INTVAL (CONST_VECTOR_ELT (operands[1], 3));
8245 hint <<= 16;
8246 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 2));
8249 otherops[1] = GEN_INT (hint);
8250 hint = 0;
8252 if (BYTES_BIG_ENDIAN)
8254 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 0));
8255 hint <<= 16;
8256 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 1));
8258 else
8260 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 1));
8261 hint <<= 16;
8262 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 0));
8265 operands[1] = GEN_INT (hint);
8266 break;
8268 case V8QImode:
8269 if (BYTES_BIG_ENDIAN)
8271 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 4));
8272 hint <<= 8;
8273 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 5));
8274 hint <<= 8;
8275 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 6));
8276 hint <<= 8;
8277 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 7));
8279 else
8281 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 7));
8282 hint <<= 8;
8283 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 6));
8284 hint <<= 8;
8285 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 5));
8286 hint <<= 8;
8287 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 4));
8290 otherops[1] = GEN_INT (hint);
8291 hint = 0;
8293 if (BYTES_BIG_ENDIAN)
8295 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 0));
8296 hint <<= 8;
8297 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 1));
8298 hint <<= 8;
8299 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 2));
8300 hint <<= 8;
8301 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 3));
8303 else
8305 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 3));
8306 hint <<= 8;
8307 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 2));
8308 hint <<= 8;
8309 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 1));
8310 hint <<= 8;
8311 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 0));
8314 operands[1] = GEN_INT (hint);
8315 break;
8317 default:
8318 abort ();
8320 output_mov_immediate (operands);
8321 output_mov_immediate (otherops);
8323 else if (code1 == CONST_DOUBLE)
8325 if (GET_MODE (operands[1]) == DFmode)
8327 REAL_VALUE_TYPE r;
8328 long l[2];
8330 REAL_VALUE_FROM_CONST_DOUBLE (r, operands[1]);
8331 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
8332 otherops[1] = GEN_INT (l[1]);
8333 operands[1] = GEN_INT (l[0]);
8335 else if (GET_MODE (operands[1]) != VOIDmode)
8336 abort ();
8337 else if (WORDS_BIG_ENDIAN)
8339 otherops[1] = GEN_INT (CONST_DOUBLE_LOW (operands[1]));
8340 operands[1] = GEN_INT (CONST_DOUBLE_HIGH (operands[1]));
8342 else
8344 otherops[1] = GEN_INT (CONST_DOUBLE_HIGH (operands[1]));
8345 operands[1] = GEN_INT (CONST_DOUBLE_LOW (operands[1]));
8348 output_mov_immediate (operands);
8349 output_mov_immediate (otherops);
8351 else if (code1 == CONST_INT)
8353 #if HOST_BITS_PER_WIDE_INT > 32
8354 /* If HOST_WIDE_INT is more than 32 bits, the intval tells us
8355 what the upper word is. */
8356 if (WORDS_BIG_ENDIAN)
8358 otherops[1] = GEN_INT (ARM_SIGN_EXTEND (INTVAL (operands[1])));
8359 operands[1] = GEN_INT (INTVAL (operands[1]) >> 32);
8361 else
8363 otherops[1] = GEN_INT (INTVAL (operands[1]) >> 32);
8364 operands[1] = GEN_INT (ARM_SIGN_EXTEND (INTVAL (operands[1])));
8366 #else
8367 /* Sign extend the intval into the high-order word. */
8368 if (WORDS_BIG_ENDIAN)
8370 otherops[1] = operands[1];
8371 operands[1] = (INTVAL (operands[1]) < 0
8372 ? constm1_rtx : const0_rtx);
8374 else
8375 otherops[1] = INTVAL (operands[1]) < 0 ? constm1_rtx : const0_rtx;
8376 #endif
8377 output_mov_immediate (otherops);
8378 output_mov_immediate (operands);
8380 else if (code1 == MEM)
8382 switch (GET_CODE (XEXP (operands[1], 0)))
8384 case REG:
8385 output_asm_insn ("ldm%?ia\t%m1, %M0", operands);
8386 break;
8388 case PRE_INC:
8389 abort (); /* Should never happen now. */
8390 break;
8392 case PRE_DEC:
8393 output_asm_insn ("ldm%?db\t%m1!, %M0", operands);
8394 break;
8396 case POST_INC:
8397 output_asm_insn ("ldm%?ia\t%m1!, %M0", operands);
8398 break;
8400 case POST_DEC:
8401 abort (); /* Should never happen now. */
8402 break;
8404 case LABEL_REF:
8405 case CONST:
8406 output_asm_insn ("adr%?\t%0, %1", operands);
8407 output_asm_insn ("ldm%?ia\t%0, %M0", operands);
8408 break;
8410 default:
8411 if (arm_add_operand (XEXP (XEXP (operands[1], 0), 1),
8412 GET_MODE (XEXP (XEXP (operands[1], 0), 1))))
8414 otherops[0] = operands[0];
8415 otherops[1] = XEXP (XEXP (operands[1], 0), 0);
8416 otherops[2] = XEXP (XEXP (operands[1], 0), 1);
8418 if (GET_CODE (XEXP (operands[1], 0)) == PLUS)
8420 if (GET_CODE (otherops[2]) == CONST_INT)
8422 switch ((int) INTVAL (otherops[2]))
8424 case -8:
8425 output_asm_insn ("ldm%?db\t%1, %M0", otherops);
8426 return "";
8427 case -4:
8428 output_asm_insn ("ldm%?da\t%1, %M0", otherops);
8429 return "";
8430 case 4:
8431 output_asm_insn ("ldm%?ib\t%1, %M0", otherops);
8432 return "";
8435 if (!(const_ok_for_arm (INTVAL (otherops[2]))))
8436 output_asm_insn ("sub%?\t%0, %1, #%n2", otherops);
8437 else
8438 output_asm_insn ("add%?\t%0, %1, %2", otherops);
8440 else
8441 output_asm_insn ("add%?\t%0, %1, %2", otherops);
8443 else
8444 output_asm_insn ("sub%?\t%0, %1, %2", otherops);
8446 return "ldm%?ia\t%0, %M0";
8448 else
8450 otherops[1] = adjust_address (operands[1], SImode, 4);
8451 /* Take care of overlapping base/data reg. */
8452 if (reg_mentioned_p (operands[0], operands[1]))
8454 output_asm_insn ("ldr%?\t%0, %1", otherops);
8455 output_asm_insn ("ldr%?\t%0, %1", operands);
8457 else
8459 output_asm_insn ("ldr%?\t%0, %1", operands);
8460 output_asm_insn ("ldr%?\t%0, %1", otherops);
8465 else
8466 abort (); /* Constraints should prevent this. */
8468 else if (code0 == MEM && code1 == REG)
8470 if (REGNO (operands[1]) == IP_REGNUM)
8471 abort ();
8473 switch (GET_CODE (XEXP (operands[0], 0)))
8475 case REG:
8476 output_asm_insn ("stm%?ia\t%m0, %M1", operands);
8477 break;
8479 case PRE_INC:
8480 abort (); /* Should never happen now. */
8481 break;
8483 case PRE_DEC:
8484 output_asm_insn ("stm%?db\t%m0!, %M1", operands);
8485 break;
8487 case POST_INC:
8488 output_asm_insn ("stm%?ia\t%m0!, %M1", operands);
8489 break;
8491 case POST_DEC:
8492 abort (); /* Should never happen now. */
8493 break;
8495 case PLUS:
8496 if (GET_CODE (XEXP (XEXP (operands[0], 0), 1)) == CONST_INT)
8498 switch ((int) INTVAL (XEXP (XEXP (operands[0], 0), 1)))
8500 case -8:
8501 output_asm_insn ("stm%?db\t%m0, %M1", operands);
8502 return "";
8504 case -4:
8505 output_asm_insn ("stm%?da\t%m0, %M1", operands);
8506 return "";
8508 case 4:
8509 output_asm_insn ("stm%?ib\t%m0, %M1", operands);
8510 return "";
8513 /* Fall through */
8515 default:
8516 otherops[0] = adjust_address (operands[0], SImode, 4);
8517 otherops[1] = gen_rtx_REG (SImode, 1 + REGNO (operands[1]));
8518 output_asm_insn ("str%?\t%1, %0", operands);
8519 output_asm_insn ("str%?\t%1, %0", otherops);
8522 else
8523 /* Constraints should prevent this. */
8524 abort ();
8526 return "";
8530 /* Output an arbitrary MOV reg, #n.
8531 OPERANDS[0] is a register. OPERANDS[1] is a const_int. */
8532 const char *
8533 output_mov_immediate (rtx *operands)
8535 HOST_WIDE_INT n = INTVAL (operands[1]);
8537 /* Try to use one MOV. */
8538 if (const_ok_for_arm (n))
8539 output_asm_insn ("mov%?\t%0, %1", operands);
8541 /* Try to use one MVN. */
8542 else if (const_ok_for_arm (~n))
8544 operands[1] = GEN_INT (~n);
8545 output_asm_insn ("mvn%?\t%0, %1", operands);
8547 else
8549 int n_ones = 0;
8550 int i;
8552 /* If all else fails, make it out of ORRs or BICs as appropriate. */
8553 for (i = 0; i < 32; i++)
8554 if (n & 1 << i)
8555 n_ones++;
8557 if (n_ones > 16) /* Shorter to use MVN with BIC in this case. */
8558 output_multi_immediate (operands, "mvn%?\t%0, %1", "bic%?\t%0, %0, %1", 1, ~ n);
8559 else
8560 output_multi_immediate (operands, "mov%?\t%0, %1", "orr%?\t%0, %0, %1", 1, n);
8563 return "";
8566 /* Output an ADD r, s, #n where n may be too big for one instruction.
8567 If adding zero to one register, output nothing. */
8568 const char *
8569 output_add_immediate (rtx *operands)
8571 HOST_WIDE_INT n = INTVAL (operands[2]);
8573 if (n != 0 || REGNO (operands[0]) != REGNO (operands[1]))
8575 if (n < 0)
8576 output_multi_immediate (operands,
8577 "sub%?\t%0, %1, %2", "sub%?\t%0, %0, %2", 2,
8578 -n);
8579 else
8580 output_multi_immediate (operands,
8581 "add%?\t%0, %1, %2", "add%?\t%0, %0, %2", 2,
8585 return "";
8588 /* Output a multiple immediate operation.
8589 OPERANDS is the vector of operands referred to in the output patterns.
8590 INSTR1 is the output pattern to use for the first constant.
8591 INSTR2 is the output pattern to use for subsequent constants.
8592 IMMED_OP is the index of the constant slot in OPERANDS.
8593 N is the constant value. */
8594 static const char *
8595 output_multi_immediate (rtx *operands, const char *instr1, const char *instr2,
8596 int immed_op, HOST_WIDE_INT n)
8598 #if HOST_BITS_PER_WIDE_INT > 32
8599 n &= 0xffffffff;
8600 #endif
8602 if (n == 0)
8604 /* Quick and easy output. */
8605 operands[immed_op] = const0_rtx;
8606 output_asm_insn (instr1, operands);
8608 else
8610 int i;
8611 const char * instr = instr1;
8613 /* Note that n is never zero here (which would give no output). */
8614 for (i = 0; i < 32; i += 2)
8616 if (n & (3 << i))
8618 operands[immed_op] = GEN_INT (n & (255 << i));
8619 output_asm_insn (instr, operands);
8620 instr = instr2;
8621 i += 6;
8626 return "";
8629 /* Return the appropriate ARM instruction for the operation code.
8630 The returned result should not be overwritten. OP is the rtx of the
8631 operation. SHIFT_FIRST_ARG is TRUE if the first argument of the operator
8632 was shifted. */
8633 const char *
8634 arithmetic_instr (rtx op, int shift_first_arg)
8636 switch (GET_CODE (op))
8638 case PLUS:
8639 return "add";
8641 case MINUS:
8642 return shift_first_arg ? "rsb" : "sub";
8644 case IOR:
8645 return "orr";
8647 case XOR:
8648 return "eor";
8650 case AND:
8651 return "and";
8653 default:
8654 abort ();
8658 /* Ensure valid constant shifts and return the appropriate shift mnemonic
8659 for the operation code. The returned result should not be overwritten.
8660 OP is the rtx code of the shift.
8661 On exit, *AMOUNTP will be -1 if the shift is by a register, or a constant
8662 shift. */
8663 static const char *
8664 shift_op (rtx op, HOST_WIDE_INT *amountp)
8666 const char * mnem;
8667 enum rtx_code code = GET_CODE (op);
8669 if (GET_CODE (XEXP (op, 1)) == REG || GET_CODE (XEXP (op, 1)) == SUBREG)
8670 *amountp = -1;
8671 else if (GET_CODE (XEXP (op, 1)) == CONST_INT)
8672 *amountp = INTVAL (XEXP (op, 1));
8673 else
8674 abort ();
8676 switch (code)
8678 case ASHIFT:
8679 mnem = "asl";
8680 break;
8682 case ASHIFTRT:
8683 mnem = "asr";
8684 break;
8686 case LSHIFTRT:
8687 mnem = "lsr";
8688 break;
8690 case ROTATERT:
8691 mnem = "ror";
8692 break;
8694 case MULT:
8695 /* We never have to worry about the amount being other than a
8696 power of 2, since this case can never be reloaded from a reg. */
8697 if (*amountp != -1)
8698 *amountp = int_log2 (*amountp);
8699 else
8700 abort ();
8701 return "asl";
8703 default:
8704 abort ();
8707 if (*amountp != -1)
8709 /* This is not 100% correct, but follows from the desire to merge
8710 multiplication by a power of 2 with the recognizer for a
8711 shift. >=32 is not a valid shift for "asl", so we must try and
8712 output a shift that produces the correct arithmetical result.
8713 Using lsr #32 is identical except for the fact that the carry bit
8714 is not set correctly if we set the flags; but we never use the
8715 carry bit from such an operation, so we can ignore that. */
8716 if (code == ROTATERT)
8717 /* Rotate is just modulo 32. */
8718 *amountp &= 31;
8719 else if (*amountp != (*amountp & 31))
8721 if (code == ASHIFT)
8722 mnem = "lsr";
8723 *amountp = 32;
8726 /* Shifts of 0 are no-ops. */
8727 if (*amountp == 0)
8728 return NULL;
8731 return mnem;
8734 /* Obtain the shift from the POWER of two. */
8736 static HOST_WIDE_INT
8737 int_log2 (HOST_WIDE_INT power)
8739 HOST_WIDE_INT shift = 0;
8741 while ((((HOST_WIDE_INT) 1 << shift) & power) == 0)
8743 if (shift > 31)
8744 abort ();
8745 shift++;
8748 return shift;
8751 /* Output a .ascii pseudo-op, keeping track of lengths. This is because
8752 /bin/as is horribly restrictive. */
8753 #define MAX_ASCII_LEN 51
8755 void
8756 output_ascii_pseudo_op (FILE *stream, const unsigned char *p, int len)
8758 int i;
8759 int len_so_far = 0;
8761 fputs ("\t.ascii\t\"", stream);
8763 for (i = 0; i < len; i++)
8765 int c = p[i];
8767 if (len_so_far >= MAX_ASCII_LEN)
8769 fputs ("\"\n\t.ascii\t\"", stream);
8770 len_so_far = 0;
8773 switch (c)
8775 case TARGET_TAB:
8776 fputs ("\\t", stream);
8777 len_so_far += 2;
8778 break;
8780 case TARGET_FF:
8781 fputs ("\\f", stream);
8782 len_so_far += 2;
8783 break;
8785 case TARGET_BS:
8786 fputs ("\\b", stream);
8787 len_so_far += 2;
8788 break;
8790 case TARGET_CR:
8791 fputs ("\\r", stream);
8792 len_so_far += 2;
8793 break;
8795 case TARGET_NEWLINE:
8796 fputs ("\\n", stream);
8797 c = p [i + 1];
8798 if ((c >= ' ' && c <= '~')
8799 || c == TARGET_TAB)
8800 /* This is a good place for a line break. */
8801 len_so_far = MAX_ASCII_LEN;
8802 else
8803 len_so_far += 2;
8804 break;
8806 case '\"':
8807 case '\\':
8808 putc ('\\', stream);
8809 len_so_far++;
8810 /* Drop through. */
8812 default:
8813 if (c >= ' ' && c <= '~')
8815 putc (c, stream);
8816 len_so_far++;
8818 else
8820 fprintf (stream, "\\%03o", c);
8821 len_so_far += 4;
8823 break;
8827 fputs ("\"\n", stream);
8830 /* Compute the register sabe mask for registers 0 through 12
8831 inclusive. This code is used by arm_compute_save_reg_mask. */
8832 static unsigned long
8833 arm_compute_save_reg0_reg12_mask (void)
8835 unsigned long func_type = arm_current_func_type ();
8836 unsigned int save_reg_mask = 0;
8837 unsigned int reg;
8839 if (IS_INTERRUPT (func_type))
8841 unsigned int max_reg;
8842 /* Interrupt functions must not corrupt any registers,
8843 even call clobbered ones. If this is a leaf function
8844 we can just examine the registers used by the RTL, but
8845 otherwise we have to assume that whatever function is
8846 called might clobber anything, and so we have to save
8847 all the call-clobbered registers as well. */
8848 if (ARM_FUNC_TYPE (func_type) == ARM_FT_FIQ)
8849 /* FIQ handlers have registers r8 - r12 banked, so
8850 we only need to check r0 - r7, Normal ISRs only
8851 bank r14 and r15, so we must check up to r12.
8852 r13 is the stack pointer which is always preserved,
8853 so we do not need to consider it here. */
8854 max_reg = 7;
8855 else
8856 max_reg = 12;
8858 for (reg = 0; reg <= max_reg; reg++)
8859 if (regs_ever_live[reg]
8860 || (! current_function_is_leaf && call_used_regs [reg]))
8861 save_reg_mask |= (1 << reg);
8863 else
8865 /* In the normal case we only need to save those registers
8866 which are call saved and which are used by this function. */
8867 for (reg = 0; reg <= 10; reg++)
8868 if (regs_ever_live[reg] && ! call_used_regs [reg])
8869 save_reg_mask |= (1 << reg);
8871 /* Handle the frame pointer as a special case. */
8872 if (! TARGET_APCS_FRAME
8873 && ! frame_pointer_needed
8874 && regs_ever_live[HARD_FRAME_POINTER_REGNUM]
8875 && ! call_used_regs[HARD_FRAME_POINTER_REGNUM])
8876 save_reg_mask |= 1 << HARD_FRAME_POINTER_REGNUM;
8878 /* If we aren't loading the PIC register,
8879 don't stack it even though it may be live. */
8880 if (flag_pic
8881 && ! TARGET_SINGLE_PIC_BASE
8882 && regs_ever_live[PIC_OFFSET_TABLE_REGNUM])
8883 save_reg_mask |= 1 << PIC_OFFSET_TABLE_REGNUM;
8886 return save_reg_mask;
8889 /* Compute a bit mask of which registers need to be
8890 saved on the stack for the current function. */
8892 static unsigned long
8893 arm_compute_save_reg_mask (void)
8895 unsigned int save_reg_mask = 0;
8896 unsigned long func_type = arm_current_func_type ();
8898 if (IS_NAKED (func_type))
8899 /* This should never really happen. */
8900 return 0;
8902 /* If we are creating a stack frame, then we must save the frame pointer,
8903 IP (which will hold the old stack pointer), LR and the PC. */
8904 if (frame_pointer_needed)
8905 save_reg_mask |=
8906 (1 << ARM_HARD_FRAME_POINTER_REGNUM)
8907 | (1 << IP_REGNUM)
8908 | (1 << LR_REGNUM)
8909 | (1 << PC_REGNUM);
8911 /* Volatile functions do not return, so there
8912 is no need to save any other registers. */
8913 if (IS_VOLATILE (func_type))
8914 return save_reg_mask;
8916 save_reg_mask |= arm_compute_save_reg0_reg12_mask ();
8918 /* Decide if we need to save the link register.
8919 Interrupt routines have their own banked link register,
8920 so they never need to save it.
8921 Otherwise if we do not use the link register we do not need to save
8922 it. If we are pushing other registers onto the stack however, we
8923 can save an instruction in the epilogue by pushing the link register
8924 now and then popping it back into the PC. This incurs extra memory
8925 accesses though, so we only do it when optimizing for size, and only
8926 if we know that we will not need a fancy return sequence. */
8927 if (regs_ever_live [LR_REGNUM]
8928 || (save_reg_mask
8929 && optimize_size
8930 && ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL))
8931 save_reg_mask |= 1 << LR_REGNUM;
8933 if (cfun->machine->lr_save_eliminated)
8934 save_reg_mask &= ~ (1 << LR_REGNUM);
8936 if (TARGET_REALLY_IWMMXT
8937 && ((bit_count (save_reg_mask)
8938 + ARM_NUM_INTS (current_function_pretend_args_size)) % 2) != 0)
8940 unsigned int reg;
8942 /* The total number of registers that are going to be pushed
8943 onto the stack is odd. We need to ensure that the stack
8944 is 64-bit aligned before we start to save iWMMXt registers,
8945 and also before we start to create locals. (A local variable
8946 might be a double or long long which we will load/store using
8947 an iWMMXt instruction). Therefore we need to push another
8948 ARM register, so that the stack will be 64-bit aligned. We
8949 try to avoid using the arg registers (r0 -r3) as they might be
8950 used to pass values in a tail call. */
8951 for (reg = 4; reg <= 12; reg++)
8952 if ((save_reg_mask & (1 << reg)) == 0)
8953 break;
8955 if (reg <= 12)
8956 save_reg_mask |= (1 << reg);
8957 else
8959 cfun->machine->sibcall_blocked = 1;
8960 save_reg_mask |= (1 << 3);
8964 return save_reg_mask;
8968 /* Return the number of bytes required to save VFP registers. */
8969 static int
8970 arm_get_vfp_saved_size (void)
8972 unsigned int regno;
8973 int count;
8974 int saved;
8976 saved = 0;
8977 /* Space for saved VFP registers. */
8978 if (TARGET_HARD_FLOAT && TARGET_VFP)
8980 count = 0;
8981 for (regno = FIRST_VFP_REGNUM;
8982 regno < LAST_VFP_REGNUM;
8983 regno += 2)
8985 if ((!regs_ever_live[regno] || call_used_regs[regno])
8986 && (!regs_ever_live[regno + 1] || call_used_regs[regno + 1]))
8988 if (count > 0)
8990 /* Workaround ARM10 VFPr1 bug. */
8991 if (count == 2 && !arm_arch6)
8992 count++;
8993 saved += count * 8 + 4;
8995 count = 0;
8997 else
8998 count++;
9000 if (count > 0)
9002 if (count == 2 && !arm_arch6)
9003 count++;
9004 saved += count * 8 + 4;
9007 return saved;
9011 /* Generate a function exit sequence. If REALLY_RETURN is false, then do
9012 everything bar the final return instruction. */
9013 const char *
9014 output_return_instruction (rtx operand, int really_return, int reverse)
9016 char conditional[10];
9017 char instr[100];
9018 int reg;
9019 unsigned long live_regs_mask;
9020 unsigned long func_type;
9021 arm_stack_offsets *offsets;
9023 func_type = arm_current_func_type ();
9025 if (IS_NAKED (func_type))
9026 return "";
9028 if (IS_VOLATILE (func_type) && TARGET_ABORT_NORETURN)
9030 /* If this function was declared non-returning, and we have
9031 found a tail call, then we have to trust that the called
9032 function won't return. */
9033 if (really_return)
9035 rtx ops[2];
9037 /* Otherwise, trap an attempted return by aborting. */
9038 ops[0] = operand;
9039 ops[1] = gen_rtx_SYMBOL_REF (Pmode, NEED_PLT_RELOC ? "abort(PLT)"
9040 : "abort");
9041 assemble_external_libcall (ops[1]);
9042 output_asm_insn (reverse ? "bl%D0\t%a1" : "bl%d0\t%a1", ops);
9045 return "";
9048 if (current_function_calls_alloca && !really_return)
9049 abort ();
9051 sprintf (conditional, "%%?%%%c0", reverse ? 'D' : 'd');
9053 return_used_this_function = 1;
9055 live_regs_mask = arm_compute_save_reg_mask ();
9057 if (live_regs_mask)
9059 const char * return_reg;
9061 /* If we do not have any special requirements for function exit
9062 (eg interworking, or ISR) then we can load the return address
9063 directly into the PC. Otherwise we must load it into LR. */
9064 if (really_return
9065 && ! TARGET_INTERWORK)
9066 return_reg = reg_names[PC_REGNUM];
9067 else
9068 return_reg = reg_names[LR_REGNUM];
9070 if ((live_regs_mask & (1 << IP_REGNUM)) == (1 << IP_REGNUM))
9072 /* There are three possible reasons for the IP register
9073 being saved. 1) a stack frame was created, in which case
9074 IP contains the old stack pointer, or 2) an ISR routine
9075 corrupted it, or 3) it was saved to align the stack on
9076 iWMMXt. In case 1, restore IP into SP, otherwise just
9077 restore IP. */
9078 if (frame_pointer_needed)
9080 live_regs_mask &= ~ (1 << IP_REGNUM);
9081 live_regs_mask |= (1 << SP_REGNUM);
9083 else
9085 if (! IS_INTERRUPT (func_type)
9086 && ! TARGET_REALLY_IWMMXT)
9087 abort ();
9091 /* On some ARM architectures it is faster to use LDR rather than
9092 LDM to load a single register. On other architectures, the
9093 cost is the same. In 26 bit mode, or for exception handlers,
9094 we have to use LDM to load the PC so that the CPSR is also
9095 restored. */
9096 for (reg = 0; reg <= LAST_ARM_REGNUM; reg++)
9098 if (live_regs_mask == (unsigned int)(1 << reg))
9099 break;
9101 if (reg <= LAST_ARM_REGNUM
9102 && (reg != LR_REGNUM
9103 || ! really_return
9104 || (TARGET_APCS_32 && ! IS_INTERRUPT (func_type))))
9106 sprintf (instr, "ldr%s\t%%|%s, [%%|sp], #4", conditional,
9107 (reg == LR_REGNUM) ? return_reg : reg_names[reg]);
9109 else
9111 char *p;
9112 int first = 1;
9114 /* Generate the load multiple instruction to restore the
9115 registers. Note we can get here, even if
9116 frame_pointer_needed is true, but only if sp already
9117 points to the base of the saved core registers. */
9118 if (live_regs_mask & (1 << SP_REGNUM))
9120 unsigned HOST_WIDE_INT stack_adjust;
9122 offsets = arm_get_frame_offsets ();
9123 stack_adjust = offsets->outgoing_args - offsets->saved_regs;
9124 if (stack_adjust != 0 && stack_adjust != 4)
9125 abort ();
9127 if (stack_adjust && arm_arch5)
9128 sprintf (instr, "ldm%sib\t%%|sp, {", conditional);
9129 else
9131 /* If we can't use ldmib (SA110 bug), then try to pop r3
9132 instead. */
9133 if (stack_adjust)
9134 live_regs_mask |= 1 << 3;
9135 sprintf (instr, "ldm%sfd\t%%|sp, {", conditional);
9138 else
9139 sprintf (instr, "ldm%sfd\t%%|sp!, {", conditional);
9141 p = instr + strlen (instr);
9143 for (reg = 0; reg <= SP_REGNUM; reg++)
9144 if (live_regs_mask & (1 << reg))
9146 int l = strlen (reg_names[reg]);
9148 if (first)
9149 first = 0;
9150 else
9152 memcpy (p, ", ", 2);
9153 p += 2;
9156 memcpy (p, "%|", 2);
9157 memcpy (p + 2, reg_names[reg], l);
9158 p += l + 2;
9161 if (live_regs_mask & (1 << LR_REGNUM))
9163 sprintf (p, "%s%%|%s}", first ? "" : ", ", return_reg);
9164 /* Decide if we need to add the ^ symbol to the end of the
9165 register list. This causes the saved condition codes
9166 register to be copied into the current condition codes
9167 register. We do the copy if we are conforming to the 32-bit
9168 ABI and this is an interrupt function, or if we are
9169 conforming to the 26-bit ABI. There is a special case for
9170 the 26-bit ABI however, which is if we are writing back the
9171 stack pointer but not loading the PC. In this case adding
9172 the ^ symbol would create a type 2 LDM instruction, where
9173 writeback is UNPREDICTABLE. We are safe in leaving the ^
9174 character off in this case however, since the actual return
9175 instruction will be a MOVS which will restore the CPSR. */
9176 if ((TARGET_APCS_32 && IS_INTERRUPT (func_type))
9177 || (! TARGET_APCS_32 && really_return))
9178 strcat (p, "^");
9180 else
9181 strcpy (p, "}");
9184 output_asm_insn (instr, & operand);
9186 /* See if we need to generate an extra instruction to
9187 perform the actual function return. */
9188 if (really_return
9189 && func_type != ARM_FT_INTERWORKED
9190 && (live_regs_mask & (1 << LR_REGNUM)) != 0)
9192 /* The return has already been handled
9193 by loading the LR into the PC. */
9194 really_return = 0;
9198 if (really_return)
9200 switch ((int) ARM_FUNC_TYPE (func_type))
9202 case ARM_FT_ISR:
9203 case ARM_FT_FIQ:
9204 sprintf (instr, "sub%ss\t%%|pc, %%|lr, #4", conditional);
9205 break;
9207 case ARM_FT_INTERWORKED:
9208 sprintf (instr, "bx%s\t%%|lr", conditional);
9209 break;
9211 case ARM_FT_EXCEPTION:
9212 sprintf (instr, "mov%ss\t%%|pc, %%|lr", conditional);
9213 break;
9215 default:
9216 /* ARMv5 implementations always provide BX, so interworking
9217 is the default unless APCS-26 is in use. */
9218 if ((insn_flags & FL_ARCH5) != 0 && TARGET_APCS_32)
9219 sprintf (instr, "bx%s\t%%|lr", conditional);
9220 else
9221 sprintf (instr, "mov%s%s\t%%|pc, %%|lr",
9222 conditional, TARGET_APCS_32 ? "" : "s");
9223 break;
9226 output_asm_insn (instr, & operand);
9229 return "";
9232 /* Write the function name into the code section, directly preceding
9233 the function prologue.
9235 Code will be output similar to this:
9237 .ascii "arm_poke_function_name", 0
9238 .align
9240 .word 0xff000000 + (t1 - t0)
9241 arm_poke_function_name
9242 mov ip, sp
9243 stmfd sp!, {fp, ip, lr, pc}
9244 sub fp, ip, #4
9246 When performing a stack backtrace, code can inspect the value
9247 of 'pc' stored at 'fp' + 0. If the trace function then looks
9248 at location pc - 12 and the top 8 bits are set, then we know
9249 that there is a function name embedded immediately preceding this
9250 location and has length ((pc[-3]) & 0xff000000).
9252 We assume that pc is declared as a pointer to an unsigned long.
9254 It is of no benefit to output the function name if we are assembling
9255 a leaf function. These function types will not contain a stack
9256 backtrace structure, therefore it is not possible to determine the
9257 function name. */
9258 void
9259 arm_poke_function_name (FILE *stream, const char *name)
9261 unsigned long alignlength;
9262 unsigned long length;
9263 rtx x;
9265 length = strlen (name) + 1;
9266 alignlength = ROUND_UP_WORD (length);
9268 ASM_OUTPUT_ASCII (stream, name, length);
9269 ASM_OUTPUT_ALIGN (stream, 2);
9270 x = GEN_INT ((unsigned HOST_WIDE_INT) 0xff000000 + alignlength);
9271 assemble_aligned_integer (UNITS_PER_WORD, x);
9274 /* Place some comments into the assembler stream
9275 describing the current function. */
9276 static void
9277 arm_output_function_prologue (FILE *f, HOST_WIDE_INT frame_size)
9279 unsigned long func_type;
9281 if (!TARGET_ARM)
9283 thumb_output_function_prologue (f, frame_size);
9284 return;
9287 /* Sanity check. */
9288 if (arm_ccfsm_state || arm_target_insn)
9289 abort ();
9291 func_type = arm_current_func_type ();
9293 switch ((int) ARM_FUNC_TYPE (func_type))
9295 default:
9296 case ARM_FT_NORMAL:
9297 break;
9298 case ARM_FT_INTERWORKED:
9299 asm_fprintf (f, "\t%@ Function supports interworking.\n");
9300 break;
9301 case ARM_FT_EXCEPTION_HANDLER:
9302 asm_fprintf (f, "\t%@ C++ Exception Handler.\n");
9303 break;
9304 case ARM_FT_ISR:
9305 asm_fprintf (f, "\t%@ Interrupt Service Routine.\n");
9306 break;
9307 case ARM_FT_FIQ:
9308 asm_fprintf (f, "\t%@ Fast Interrupt Service Routine.\n");
9309 break;
9310 case ARM_FT_EXCEPTION:
9311 asm_fprintf (f, "\t%@ ARM Exception Handler.\n");
9312 break;
9315 if (IS_NAKED (func_type))
9316 asm_fprintf (f, "\t%@ Naked Function: prologue and epilogue provided by programmer.\n");
9318 if (IS_VOLATILE (func_type))
9319 asm_fprintf (f, "\t%@ Volatile: function does not return.\n");
9321 if (IS_NESTED (func_type))
9322 asm_fprintf (f, "\t%@ Nested: function declared inside another function.\n");
9324 asm_fprintf (f, "\t%@ args = %d, pretend = %d, frame = %wd\n",
9325 current_function_args_size,
9326 current_function_pretend_args_size, frame_size);
9328 asm_fprintf (f, "\t%@ frame_needed = %d, uses_anonymous_args = %d\n",
9329 frame_pointer_needed,
9330 cfun->machine->uses_anonymous_args);
9332 if (cfun->machine->lr_save_eliminated)
9333 asm_fprintf (f, "\t%@ link register save eliminated.\n");
9335 #ifdef AOF_ASSEMBLER
9336 if (flag_pic)
9337 asm_fprintf (f, "\tmov\t%r, %r\n", IP_REGNUM, PIC_OFFSET_TABLE_REGNUM);
9338 #endif
9340 return_used_this_function = 0;
9343 const char *
9344 arm_output_epilogue (rtx sibling)
9346 int reg;
9347 unsigned long saved_regs_mask;
9348 unsigned long func_type;
9349 /* Floats_offset is the offset from the "virtual" frame. In an APCS
9350 frame that is $fp + 4 for a non-variadic function. */
9351 int floats_offset = 0;
9352 rtx operands[3];
9353 FILE * f = asm_out_file;
9354 rtx eh_ofs = cfun->machine->eh_epilogue_sp_ofs;
9355 unsigned int lrm_count = 0;
9356 int really_return = (sibling == NULL);
9357 int start_reg;
9358 arm_stack_offsets *offsets;
9360 /* If we have already generated the return instruction
9361 then it is futile to generate anything else. */
9362 if (use_return_insn (FALSE, sibling) && return_used_this_function)
9363 return "";
9365 func_type = arm_current_func_type ();
9367 if (IS_NAKED (func_type))
9368 /* Naked functions don't have epilogues. */
9369 return "";
9371 if (IS_VOLATILE (func_type) && TARGET_ABORT_NORETURN)
9373 rtx op;
9375 /* A volatile function should never return. Call abort. */
9376 op = gen_rtx_SYMBOL_REF (Pmode, NEED_PLT_RELOC ? "abort(PLT)" : "abort");
9377 assemble_external_libcall (op);
9378 output_asm_insn ("bl\t%a0", &op);
9380 return "";
9383 if (ARM_FUNC_TYPE (func_type) == ARM_FT_EXCEPTION_HANDLER
9384 && ! really_return)
9385 /* If we are throwing an exception, then we really must
9386 be doing a return, so we can't tail-call. */
9387 abort ();
9389 offsets = arm_get_frame_offsets ();
9390 saved_regs_mask = arm_compute_save_reg_mask ();
9392 if (TARGET_IWMMXT)
9393 lrm_count = bit_count (saved_regs_mask);
9395 floats_offset = offsets->saved_args;
9396 /* Compute how far away the floats will be. */
9397 for (reg = 0; reg <= LAST_ARM_REGNUM; reg++)
9398 if (saved_regs_mask & (1 << reg))
9399 floats_offset += 4;
9401 if (frame_pointer_needed)
9403 /* This variable is for the Virtual Frame Pointer, not VFP regs. */
9404 int vfp_offset = offsets->frame;
9406 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
9408 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
9409 if (regs_ever_live[reg] && !call_used_regs[reg])
9411 floats_offset += 12;
9412 asm_fprintf (f, "\tldfe\t%r, [%r, #-%d]\n",
9413 reg, FP_REGNUM, floats_offset - vfp_offset);
9416 else
9418 start_reg = LAST_FPA_REGNUM;
9420 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
9422 if (regs_ever_live[reg] && !call_used_regs[reg])
9424 floats_offset += 12;
9426 /* We can't unstack more than four registers at once. */
9427 if (start_reg - reg == 3)
9429 asm_fprintf (f, "\tlfm\t%r, 4, [%r, #-%d]\n",
9430 reg, FP_REGNUM, floats_offset - vfp_offset);
9431 start_reg = reg - 1;
9434 else
9436 if (reg != start_reg)
9437 asm_fprintf (f, "\tlfm\t%r, %d, [%r, #-%d]\n",
9438 reg + 1, start_reg - reg,
9439 FP_REGNUM, floats_offset - vfp_offset);
9440 start_reg = reg - 1;
9444 /* Just in case the last register checked also needs unstacking. */
9445 if (reg != start_reg)
9446 asm_fprintf (f, "\tlfm\t%r, %d, [%r, #-%d]\n",
9447 reg + 1, start_reg - reg,
9448 FP_REGNUM, floats_offset - vfp_offset);
9451 if (TARGET_HARD_FLOAT && TARGET_VFP)
9453 int saved_size;
9455 /* The fldmx insn does not have base+offset addressing modes,
9456 so we use IP to hold the address. */
9457 saved_size = arm_get_vfp_saved_size ();
9459 if (saved_size > 0)
9461 floats_offset += saved_size;
9462 asm_fprintf (f, "\tsub\t%r, %r, #%d\n", IP_REGNUM,
9463 FP_REGNUM, floats_offset - vfp_offset);
9465 start_reg = FIRST_VFP_REGNUM;
9466 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
9468 if ((!regs_ever_live[reg] || call_used_regs[reg])
9469 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
9471 if (start_reg != reg)
9472 arm_output_fldmx (f, IP_REGNUM,
9473 (start_reg - FIRST_VFP_REGNUM) / 2,
9474 (reg - start_reg) / 2);
9475 start_reg = reg + 2;
9478 if (start_reg != reg)
9479 arm_output_fldmx (f, IP_REGNUM,
9480 (start_reg - FIRST_VFP_REGNUM) / 2,
9481 (reg - start_reg) / 2);
9484 if (TARGET_IWMMXT)
9486 /* The frame pointer is guaranteed to be non-double-word aligned.
9487 This is because it is set to (old_stack_pointer - 4) and the
9488 old_stack_pointer was double word aligned. Thus the offset to
9489 the iWMMXt registers to be loaded must also be non-double-word
9490 sized, so that the resultant address *is* double-word aligned.
9491 We can ignore floats_offset since that was already included in
9492 the live_regs_mask. */
9493 lrm_count += (lrm_count % 2 ? 2 : 1);
9495 for (reg = LAST_IWMMXT_REGNUM; reg >= FIRST_IWMMXT_REGNUM; reg--)
9496 if (regs_ever_live[reg] && !call_used_regs[reg])
9498 asm_fprintf (f, "\twldrd\t%r, [%r, #-%d]\n",
9499 reg, FP_REGNUM, lrm_count * 4);
9500 lrm_count += 2;
9504 /* saved_regs_mask should contain the IP, which at the time of stack
9505 frame generation actually contains the old stack pointer. So a
9506 quick way to unwind the stack is just pop the IP register directly
9507 into the stack pointer. */
9508 if ((saved_regs_mask & (1 << IP_REGNUM)) == 0)
9509 abort ();
9510 saved_regs_mask &= ~ (1 << IP_REGNUM);
9511 saved_regs_mask |= (1 << SP_REGNUM);
9513 /* There are two registers left in saved_regs_mask - LR and PC. We
9514 only need to restore the LR register (the return address), but to
9515 save time we can load it directly into the PC, unless we need a
9516 special function exit sequence, or we are not really returning. */
9517 if (really_return && ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL)
9518 /* Delete the LR from the register mask, so that the LR on
9519 the stack is loaded into the PC in the register mask. */
9520 saved_regs_mask &= ~ (1 << LR_REGNUM);
9521 else
9522 saved_regs_mask &= ~ (1 << PC_REGNUM);
9524 /* We must use SP as the base register, because SP is one of the
9525 registers being restored. If an interrupt or page fault
9526 happens in the ldm instruction, the SP might or might not
9527 have been restored. That would be bad, as then SP will no
9528 longer indicate the safe area of stack, and we can get stack
9529 corruption. Using SP as the base register means that it will
9530 be reset correctly to the original value, should an interrupt
9531 occur. If the stack pointer already points at the right
9532 place, then omit the subtraction. */
9533 if (offsets->outgoing_args != (1 + (int) bit_count (saved_regs_mask))
9534 || current_function_calls_alloca)
9535 asm_fprintf (f, "\tsub\t%r, %r, #%d\n", SP_REGNUM, FP_REGNUM,
9536 4 * bit_count (saved_regs_mask));
9537 print_multi_reg (f, "ldmfd\t%r", SP_REGNUM, saved_regs_mask);
9539 if (IS_INTERRUPT (func_type))
9540 /* Interrupt handlers will have pushed the
9541 IP onto the stack, so restore it now. */
9542 print_multi_reg (f, "ldmfd\t%r!", SP_REGNUM, 1 << IP_REGNUM);
9544 else
9546 /* Restore stack pointer if necessary. */
9547 if (offsets->outgoing_args != offsets->saved_regs)
9549 operands[0] = operands[1] = stack_pointer_rtx;
9550 operands[2] = GEN_INT (offsets->outgoing_args - offsets->saved_regs);
9551 output_add_immediate (operands);
9554 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
9556 for (reg = FIRST_FPA_REGNUM; reg <= LAST_FPA_REGNUM; reg++)
9557 if (regs_ever_live[reg] && !call_used_regs[reg])
9558 asm_fprintf (f, "\tldfe\t%r, [%r], #12\n",
9559 reg, SP_REGNUM);
9561 else
9563 start_reg = FIRST_FPA_REGNUM;
9565 for (reg = FIRST_FPA_REGNUM; reg <= LAST_FPA_REGNUM; reg++)
9567 if (regs_ever_live[reg] && !call_used_regs[reg])
9569 if (reg - start_reg == 3)
9571 asm_fprintf (f, "\tlfmfd\t%r, 4, [%r]!\n",
9572 start_reg, SP_REGNUM);
9573 start_reg = reg + 1;
9576 else
9578 if (reg != start_reg)
9579 asm_fprintf (f, "\tlfmfd\t%r, %d, [%r]!\n",
9580 start_reg, reg - start_reg,
9581 SP_REGNUM);
9583 start_reg = reg + 1;
9587 /* Just in case the last register checked also needs unstacking. */
9588 if (reg != start_reg)
9589 asm_fprintf (f, "\tlfmfd\t%r, %d, [%r]!\n",
9590 start_reg, reg - start_reg, SP_REGNUM);
9593 if (TARGET_HARD_FLOAT && TARGET_VFP)
9595 start_reg = FIRST_VFP_REGNUM;
9596 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
9598 if ((!regs_ever_live[reg] || call_used_regs[reg])
9599 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
9601 if (start_reg != reg)
9602 arm_output_fldmx (f, SP_REGNUM,
9603 (start_reg - FIRST_VFP_REGNUM) / 2,
9604 (reg - start_reg) / 2);
9605 start_reg = reg + 2;
9608 if (start_reg != reg)
9609 arm_output_fldmx (f, SP_REGNUM,
9610 (start_reg - FIRST_VFP_REGNUM) / 2,
9611 (reg - start_reg) / 2);
9613 if (TARGET_IWMMXT)
9614 for (reg = FIRST_IWMMXT_REGNUM; reg <= LAST_IWMMXT_REGNUM; reg++)
9615 if (regs_ever_live[reg] && !call_used_regs[reg])
9616 asm_fprintf (f, "\twldrd\t%r, [%r], #8\n", reg, SP_REGNUM);
9618 /* If we can, restore the LR into the PC. */
9619 if (ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
9620 && really_return
9621 && current_function_pretend_args_size == 0
9622 && saved_regs_mask & (1 << LR_REGNUM))
9624 saved_regs_mask &= ~ (1 << LR_REGNUM);
9625 saved_regs_mask |= (1 << PC_REGNUM);
9628 /* Load the registers off the stack. If we only have one register
9629 to load use the LDR instruction - it is faster. */
9630 if (saved_regs_mask == (1 << LR_REGNUM))
9632 /* The exception handler ignores the LR, so we do
9633 not really need to load it off the stack. */
9634 if (eh_ofs)
9635 asm_fprintf (f, "\tadd\t%r, %r, #4\n", SP_REGNUM, SP_REGNUM);
9636 else
9637 asm_fprintf (f, "\tldr\t%r, [%r], #4\n", LR_REGNUM, SP_REGNUM);
9639 else if (saved_regs_mask)
9641 if (saved_regs_mask & (1 << SP_REGNUM))
9642 /* Note - write back to the stack register is not enabled
9643 (ie "ldmfd sp!..."). We know that the stack pointer is
9644 in the list of registers and if we add writeback the
9645 instruction becomes UNPREDICTABLE. */
9646 print_multi_reg (f, "ldmfd\t%r", SP_REGNUM, saved_regs_mask);
9647 else
9648 print_multi_reg (f, "ldmfd\t%r!", SP_REGNUM, saved_regs_mask);
9651 if (current_function_pretend_args_size)
9653 /* Unwind the pre-pushed regs. */
9654 operands[0] = operands[1] = stack_pointer_rtx;
9655 operands[2] = GEN_INT (current_function_pretend_args_size);
9656 output_add_immediate (operands);
9660 if (! really_return
9661 || (ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
9662 && current_function_pretend_args_size == 0
9663 && saved_regs_mask & (1 << PC_REGNUM)))
9664 return "";
9666 /* Generate the return instruction. */
9667 switch ((int) ARM_FUNC_TYPE (func_type))
9669 case ARM_FT_EXCEPTION_HANDLER:
9670 /* Even in 26-bit mode we do a mov (rather than a movs)
9671 because we don't have the PSR bits set in the address. */
9672 asm_fprintf (f, "\tmov\t%r, %r\n", PC_REGNUM, EXCEPTION_LR_REGNUM);
9673 break;
9675 case ARM_FT_ISR:
9676 case ARM_FT_FIQ:
9677 asm_fprintf (f, "\tsubs\t%r, %r, #4\n", PC_REGNUM, LR_REGNUM);
9678 break;
9680 case ARM_FT_EXCEPTION:
9681 asm_fprintf (f, "\tmovs\t%r, %r\n", PC_REGNUM, LR_REGNUM);
9682 break;
9684 case ARM_FT_INTERWORKED:
9685 asm_fprintf (f, "\tbx\t%r\n", LR_REGNUM);
9686 break;
9688 default:
9689 if (frame_pointer_needed)
9690 /* If we used the frame pointer then the return address
9691 will have been loaded off the stack directly into the
9692 PC, so there is no need to issue a MOV instruction
9693 here. */
9695 else if (current_function_pretend_args_size == 0
9696 && (saved_regs_mask & (1 << LR_REGNUM)))
9697 /* Similarly we may have been able to load LR into the PC
9698 even if we did not create a stack frame. */
9700 else if (TARGET_APCS_32)
9701 asm_fprintf (f, "\tmov\t%r, %r\n", PC_REGNUM, LR_REGNUM);
9702 else
9703 asm_fprintf (f, "\tmovs\t%r, %r\n", PC_REGNUM, LR_REGNUM);
9704 break;
9707 return "";
9710 static void
9711 arm_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
9712 HOST_WIDE_INT frame_size ATTRIBUTE_UNUSED)
9714 arm_stack_offsets *offsets;
9716 if (TARGET_THUMB)
9718 /* ??? Probably not safe to set this here, since it assumes that a
9719 function will be emitted as assembly immediately after we generate
9720 RTL for it. This does not happen for inline functions. */
9721 return_used_this_function = 0;
9723 else
9725 /* We need to take into account any stack-frame rounding. */
9726 offsets = arm_get_frame_offsets ();
9728 if (use_return_insn (FALSE, NULL)
9729 && return_used_this_function
9730 && offsets->saved_regs != offsets->outgoing_args
9731 && !frame_pointer_needed)
9732 abort ();
9734 /* Reset the ARM-specific per-function variables. */
9735 after_arm_reorg = 0;
9739 /* Generate and emit an insn that we will recognize as a push_multi.
9740 Unfortunately, since this insn does not reflect very well the actual
9741 semantics of the operation, we need to annotate the insn for the benefit
9742 of DWARF2 frame unwind information. */
9743 static rtx
9744 emit_multi_reg_push (int mask)
9746 int num_regs = 0;
9747 int num_dwarf_regs;
9748 int i, j;
9749 rtx par;
9750 rtx dwarf;
9751 int dwarf_par_index;
9752 rtx tmp, reg;
9754 for (i = 0; i <= LAST_ARM_REGNUM; i++)
9755 if (mask & (1 << i))
9756 num_regs++;
9758 if (num_regs == 0 || num_regs > 16)
9759 abort ();
9761 /* We don't record the PC in the dwarf frame information. */
9762 num_dwarf_regs = num_regs;
9763 if (mask & (1 << PC_REGNUM))
9764 num_dwarf_regs--;
9766 /* For the body of the insn we are going to generate an UNSPEC in
9767 parallel with several USEs. This allows the insn to be recognized
9768 by the push_multi pattern in the arm.md file. The insn looks
9769 something like this:
9771 (parallel [
9772 (set (mem:BLK (pre_dec:BLK (reg:SI sp)))
9773 (unspec:BLK [(reg:SI r4)] UNSPEC_PUSH_MULT))
9774 (use (reg:SI 11 fp))
9775 (use (reg:SI 12 ip))
9776 (use (reg:SI 14 lr))
9777 (use (reg:SI 15 pc))
9780 For the frame note however, we try to be more explicit and actually
9781 show each register being stored into the stack frame, plus a (single)
9782 decrement of the stack pointer. We do it this way in order to be
9783 friendly to the stack unwinding code, which only wants to see a single
9784 stack decrement per instruction. The RTL we generate for the note looks
9785 something like this:
9787 (sequence [
9788 (set (reg:SI sp) (plus:SI (reg:SI sp) (const_int -20)))
9789 (set (mem:SI (reg:SI sp)) (reg:SI r4))
9790 (set (mem:SI (plus:SI (reg:SI sp) (const_int 4))) (reg:SI fp))
9791 (set (mem:SI (plus:SI (reg:SI sp) (const_int 8))) (reg:SI ip))
9792 (set (mem:SI (plus:SI (reg:SI sp) (const_int 12))) (reg:SI lr))
9795 This sequence is used both by the code to support stack unwinding for
9796 exceptions handlers and the code to generate dwarf2 frame debugging. */
9798 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (num_regs));
9799 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (num_dwarf_regs + 1));
9800 dwarf_par_index = 1;
9802 for (i = 0; i <= LAST_ARM_REGNUM; i++)
9804 if (mask & (1 << i))
9806 reg = gen_rtx_REG (SImode, i);
9808 XVECEXP (par, 0, 0)
9809 = gen_rtx_SET (VOIDmode,
9810 gen_rtx_MEM (BLKmode,
9811 gen_rtx_PRE_DEC (BLKmode,
9812 stack_pointer_rtx)),
9813 gen_rtx_UNSPEC (BLKmode,
9814 gen_rtvec (1, reg),
9815 UNSPEC_PUSH_MULT));
9817 if (i != PC_REGNUM)
9819 tmp = gen_rtx_SET (VOIDmode,
9820 gen_rtx_MEM (SImode, stack_pointer_rtx),
9821 reg);
9822 RTX_FRAME_RELATED_P (tmp) = 1;
9823 XVECEXP (dwarf, 0, dwarf_par_index) = tmp;
9824 dwarf_par_index++;
9827 break;
9831 for (j = 1, i++; j < num_regs; i++)
9833 if (mask & (1 << i))
9835 reg = gen_rtx_REG (SImode, i);
9837 XVECEXP (par, 0, j) = gen_rtx_USE (VOIDmode, reg);
9839 if (i != PC_REGNUM)
9841 tmp = gen_rtx_SET (VOIDmode,
9842 gen_rtx_MEM (SImode,
9843 plus_constant (stack_pointer_rtx,
9844 4 * j)),
9845 reg);
9846 RTX_FRAME_RELATED_P (tmp) = 1;
9847 XVECEXP (dwarf, 0, dwarf_par_index++) = tmp;
9850 j++;
9854 par = emit_insn (par);
9856 tmp = gen_rtx_SET (SImode,
9857 stack_pointer_rtx,
9858 gen_rtx_PLUS (SImode,
9859 stack_pointer_rtx,
9860 GEN_INT (-4 * num_regs)));
9861 RTX_FRAME_RELATED_P (tmp) = 1;
9862 XVECEXP (dwarf, 0, 0) = tmp;
9864 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
9865 REG_NOTES (par));
9866 return par;
9869 static rtx
9870 emit_sfm (int base_reg, int count)
9872 rtx par;
9873 rtx dwarf;
9874 rtx tmp, reg;
9875 int i;
9877 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
9878 dwarf = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
9880 reg = gen_rtx_REG (XFmode, base_reg++);
9882 XVECEXP (par, 0, 0)
9883 = gen_rtx_SET (VOIDmode,
9884 gen_rtx_MEM (BLKmode,
9885 gen_rtx_PRE_DEC (BLKmode, stack_pointer_rtx)),
9886 gen_rtx_UNSPEC (BLKmode,
9887 gen_rtvec (1, reg),
9888 UNSPEC_PUSH_MULT));
9890 = gen_rtx_SET (VOIDmode,
9891 gen_rtx_MEM (XFmode,
9892 gen_rtx_PRE_DEC (BLKmode, stack_pointer_rtx)),
9893 reg);
9894 RTX_FRAME_RELATED_P (tmp) = 1;
9895 XVECEXP (dwarf, 0, count - 1) = tmp;
9897 for (i = 1; i < count; i++)
9899 reg = gen_rtx_REG (XFmode, base_reg++);
9900 XVECEXP (par, 0, i) = gen_rtx_USE (VOIDmode, reg);
9902 tmp = gen_rtx_SET (VOIDmode,
9903 gen_rtx_MEM (XFmode,
9904 gen_rtx_PRE_DEC (BLKmode,
9905 stack_pointer_rtx)),
9906 reg);
9907 RTX_FRAME_RELATED_P (tmp) = 1;
9908 XVECEXP (dwarf, 0, count - i - 1) = tmp;
9911 par = emit_insn (par);
9912 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
9913 REG_NOTES (par));
9914 return par;
9918 /* Compute the distance from register FROM to register TO.
9919 These can be the arg pointer (26), the soft frame pointer (25),
9920 the stack pointer (13) or the hard frame pointer (11).
9921 Typical stack layout looks like this:
9923 old stack pointer -> | |
9924 ----
9925 | | \
9926 | | saved arguments for
9927 | | vararg functions
9928 | | /
9930 hard FP & arg pointer -> | | \
9931 | | stack
9932 | | frame
9933 | | /
9935 | | \
9936 | | call saved
9937 | | registers
9938 soft frame pointer -> | | /
9940 | | \
9941 | | local
9942 | | variables
9943 | | /
9945 | | \
9946 | | outgoing
9947 | | arguments
9948 current stack pointer -> | | /
9951 For a given function some or all of these stack components
9952 may not be needed, giving rise to the possibility of
9953 eliminating some of the registers.
9955 The values returned by this function must reflect the behavior
9956 of arm_expand_prologue() and arm_compute_save_reg_mask().
9958 The sign of the number returned reflects the direction of stack
9959 growth, so the values are positive for all eliminations except
9960 from the soft frame pointer to the hard frame pointer.
9962 SFP may point just inside the local variables block to ensure correct
9963 alignment. */
9966 /* Calculate stack offsets. These are used to calculate register elimination
9967 offsets and in prologue/epilogue code. */
9969 static arm_stack_offsets *
9970 arm_get_frame_offsets (void)
9972 struct arm_stack_offsets *offsets;
9973 unsigned long func_type;
9974 int leaf;
9975 int saved;
9976 HOST_WIDE_INT frame_size;
9978 offsets = &cfun->machine->stack_offsets;
9980 /* We need to know if we are a leaf function. Unfortunately, it
9981 is possible to be called after start_sequence has been called,
9982 which causes get_insns to return the insns for the sequence,
9983 not the function, which will cause leaf_function_p to return
9984 the incorrect result.
9986 to know about leaf functions once reload has completed, and the
9987 frame size cannot be changed after that time, so we can safely
9988 use the cached value. */
9990 if (reload_completed)
9991 return offsets;
9993 /* Initially this is the size of the local variables. It will translated
9994 into an offset once we have determined the size of preceding data. */
9995 frame_size = ROUND_UP_WORD (get_frame_size ());
9997 leaf = leaf_function_p ();
9999 /* Space for variadic functions. */
10000 offsets->saved_args = current_function_pretend_args_size;
10002 offsets->frame = offsets->saved_args + (frame_pointer_needed ? 4 : 0);
10004 if (TARGET_ARM)
10006 unsigned int regno;
10008 saved = bit_count (arm_compute_save_reg_mask ()) * 4;
10010 /* We know that SP will be doubleword aligned on entry, and we must
10011 preserve that condition at any subroutine call. We also require the
10012 soft frame pointer to be doubleword aligned. */
10014 if (TARGET_REALLY_IWMMXT)
10016 /* Check for the call-saved iWMMXt registers. */
10017 for (regno = FIRST_IWMMXT_REGNUM;
10018 regno <= LAST_IWMMXT_REGNUM;
10019 regno++)
10020 if (regs_ever_live [regno] && ! call_used_regs [regno])
10021 saved += 8;
10024 func_type = arm_current_func_type ();
10025 if (! IS_VOLATILE (func_type))
10027 /* Space for saved FPA registers. */
10028 for (regno = FIRST_FPA_REGNUM; regno <= LAST_FPA_REGNUM; regno++)
10029 if (regs_ever_live[regno] && ! call_used_regs[regno])
10030 saved += 12;
10032 /* Space for saved VFP registers. */
10033 if (TARGET_HARD_FLOAT && TARGET_VFP)
10034 saved += arm_get_vfp_saved_size ();
10037 else /* TARGET_THUMB */
10039 int reg;
10040 int count_regs;
10042 saved = 0;
10043 count_regs = 0;
10044 for (reg = 8; reg < 13; reg ++)
10045 if (THUMB_REG_PUSHED_P (reg))
10046 count_regs ++;
10047 if (count_regs)
10048 saved += 4 * count_regs;
10049 count_regs = 0;
10050 for (reg = 0; reg <= LAST_LO_REGNUM; reg ++)
10051 if (THUMB_REG_PUSHED_P (reg))
10052 count_regs ++;
10053 if (count_regs || ! leaf_function_p ()
10054 || thumb_far_jump_used_p ())
10055 saved += 4 * (count_regs + 1);
10056 if (TARGET_BACKTRACE)
10058 if ((count_regs & 0xFF) == 0 && (regs_ever_live[3] != 0))
10059 saved += 20;
10060 else
10061 saved += 16;
10065 /* Saved registers include the stack frame. */
10066 offsets->saved_regs = offsets->saved_args + saved;
10067 offsets->soft_frame = offsets->saved_regs;
10068 /* A leaf function does not need any stack alignment if it has nothing
10069 on the stack. */
10070 if (leaf && frame_size == 0)
10072 offsets->outgoing_args = offsets->soft_frame;
10073 return offsets;
10076 /* Ensure SFP has the correct alignment. */
10077 if (ARM_DOUBLEWORD_ALIGN
10078 && (offsets->soft_frame & 7))
10079 offsets->soft_frame += 4;
10081 offsets->outgoing_args = offsets->soft_frame + frame_size
10082 + current_function_outgoing_args_size;
10084 if (ARM_DOUBLEWORD_ALIGN)
10086 /* Ensure SP remains doubleword aligned. */
10087 if (offsets->outgoing_args & 7)
10088 offsets->outgoing_args += 4;
10089 if (offsets->outgoing_args & 7)
10090 abort ();
10093 return offsets;
10097 /* Calculate the relative offsets for the different stack pointers. Positive
10098 offsets are in the direction of stack growth. */
10100 unsigned int
10101 arm_compute_initial_elimination_offset (unsigned int from, unsigned int to)
10103 arm_stack_offsets *offsets;
10105 offsets = arm_get_frame_offsets ();
10107 /* OK, now we have enough information to compute the distances.
10108 There must be an entry in these switch tables for each pair
10109 of registers in ELIMINABLE_REGS, even if some of the entries
10110 seem to be redundant or useless. */
10111 switch (from)
10113 case ARG_POINTER_REGNUM:
10114 switch (to)
10116 case THUMB_HARD_FRAME_POINTER_REGNUM:
10117 return 0;
10119 case FRAME_POINTER_REGNUM:
10120 /* This is the reverse of the soft frame pointer
10121 to hard frame pointer elimination below. */
10122 return offsets->soft_frame - offsets->saved_args;
10124 case ARM_HARD_FRAME_POINTER_REGNUM:
10125 /* If there is no stack frame then the hard
10126 frame pointer and the arg pointer coincide. */
10127 if (offsets->frame == offsets->saved_regs)
10128 return 0;
10129 /* FIXME: Not sure about this. Maybe we should always return 0 ? */
10130 return (frame_pointer_needed
10131 && current_function_needs_context
10132 && ! cfun->machine->uses_anonymous_args) ? 4 : 0;
10134 case STACK_POINTER_REGNUM:
10135 /* If nothing has been pushed on the stack at all
10136 then this will return -4. This *is* correct! */
10137 return offsets->outgoing_args - (offsets->saved_args + 4);
10139 default:
10140 abort ();
10142 break;
10144 case FRAME_POINTER_REGNUM:
10145 switch (to)
10147 case THUMB_HARD_FRAME_POINTER_REGNUM:
10148 return 0;
10150 case ARM_HARD_FRAME_POINTER_REGNUM:
10151 /* The hard frame pointer points to the top entry in the
10152 stack frame. The soft frame pointer to the bottom entry
10153 in the stack frame. If there is no stack frame at all,
10154 then they are identical. */
10156 return offsets->frame - offsets->soft_frame;
10158 case STACK_POINTER_REGNUM:
10159 return offsets->outgoing_args - offsets->soft_frame;
10161 default:
10162 abort ();
10164 break;
10166 default:
10167 /* You cannot eliminate from the stack pointer.
10168 In theory you could eliminate from the hard frame
10169 pointer to the stack pointer, but this will never
10170 happen, since if a stack frame is not needed the
10171 hard frame pointer will never be used. */
10172 abort ();
10177 /* Generate the prologue instructions for entry into an ARM function. */
10178 void
10179 arm_expand_prologue (void)
10181 int reg;
10182 rtx amount;
10183 rtx insn;
10184 rtx ip_rtx;
10185 unsigned long live_regs_mask;
10186 unsigned long func_type;
10187 int fp_offset = 0;
10188 int saved_pretend_args = 0;
10189 int saved_regs = 0;
10190 unsigned int args_to_push;
10191 arm_stack_offsets *offsets;
10193 func_type = arm_current_func_type ();
10195 /* Naked functions don't have prologues. */
10196 if (IS_NAKED (func_type))
10197 return;
10199 /* Make a copy of c_f_p_a_s as we may need to modify it locally. */
10200 args_to_push = current_function_pretend_args_size;
10202 /* Compute which register we will have to save onto the stack. */
10203 live_regs_mask = arm_compute_save_reg_mask ();
10205 ip_rtx = gen_rtx_REG (SImode, IP_REGNUM);
10207 if (frame_pointer_needed)
10209 if (IS_INTERRUPT (func_type))
10211 /* Interrupt functions must not corrupt any registers.
10212 Creating a frame pointer however, corrupts the IP
10213 register, so we must push it first. */
10214 insn = emit_multi_reg_push (1 << IP_REGNUM);
10216 /* Do not set RTX_FRAME_RELATED_P on this insn.
10217 The dwarf stack unwinding code only wants to see one
10218 stack decrement per function, and this is not it. If
10219 this instruction is labeled as being part of the frame
10220 creation sequence then dwarf2out_frame_debug_expr will
10221 abort when it encounters the assignment of IP to FP
10222 later on, since the use of SP here establishes SP as
10223 the CFA register and not IP.
10225 Anyway this instruction is not really part of the stack
10226 frame creation although it is part of the prologue. */
10228 else if (IS_NESTED (func_type))
10230 /* The Static chain register is the same as the IP register
10231 used as a scratch register during stack frame creation.
10232 To get around this need to find somewhere to store IP
10233 whilst the frame is being created. We try the following
10234 places in order:
10236 1. The last argument register.
10237 2. A slot on the stack above the frame. (This only
10238 works if the function is not a varargs function).
10239 3. Register r3, after pushing the argument registers
10240 onto the stack.
10242 Note - we only need to tell the dwarf2 backend about the SP
10243 adjustment in the second variant; the static chain register
10244 doesn't need to be unwound, as it doesn't contain a value
10245 inherited from the caller. */
10247 if (regs_ever_live[3] == 0)
10249 insn = gen_rtx_REG (SImode, 3);
10250 insn = gen_rtx_SET (SImode, insn, ip_rtx);
10251 insn = emit_insn (insn);
10253 else if (args_to_push == 0)
10255 rtx dwarf;
10256 insn = gen_rtx_PRE_DEC (SImode, stack_pointer_rtx);
10257 insn = gen_rtx_MEM (SImode, insn);
10258 insn = gen_rtx_SET (VOIDmode, insn, ip_rtx);
10259 insn = emit_insn (insn);
10261 fp_offset = 4;
10263 /* Just tell the dwarf backend that we adjusted SP. */
10264 dwarf = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
10265 gen_rtx_PLUS (SImode, stack_pointer_rtx,
10266 GEN_INT (-fp_offset)));
10267 RTX_FRAME_RELATED_P (insn) = 1;
10268 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
10269 dwarf, REG_NOTES (insn));
10271 else
10273 /* Store the args on the stack. */
10274 if (cfun->machine->uses_anonymous_args)
10275 insn = emit_multi_reg_push
10276 ((0xf0 >> (args_to_push / 4)) & 0xf);
10277 else
10278 insn = emit_insn
10279 (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10280 GEN_INT (- args_to_push)));
10282 RTX_FRAME_RELATED_P (insn) = 1;
10284 saved_pretend_args = 1;
10285 fp_offset = args_to_push;
10286 args_to_push = 0;
10288 /* Now reuse r3 to preserve IP. */
10289 insn = gen_rtx_REG (SImode, 3);
10290 insn = gen_rtx_SET (SImode, insn, ip_rtx);
10291 (void) emit_insn (insn);
10295 if (fp_offset)
10297 insn = gen_rtx_PLUS (SImode, stack_pointer_rtx, GEN_INT (fp_offset));
10298 insn = gen_rtx_SET (SImode, ip_rtx, insn);
10300 else
10301 insn = gen_movsi (ip_rtx, stack_pointer_rtx);
10303 insn = emit_insn (insn);
10304 RTX_FRAME_RELATED_P (insn) = 1;
10307 if (args_to_push)
10309 /* Push the argument registers, or reserve space for them. */
10310 if (cfun->machine->uses_anonymous_args)
10311 insn = emit_multi_reg_push
10312 ((0xf0 >> (args_to_push / 4)) & 0xf);
10313 else
10314 insn = emit_insn
10315 (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10316 GEN_INT (- args_to_push)));
10317 RTX_FRAME_RELATED_P (insn) = 1;
10320 /* If this is an interrupt service routine, and the link register
10321 is going to be pushed, and we are not creating a stack frame,
10322 (which would involve an extra push of IP and a pop in the epilogue)
10323 subtracting four from LR now will mean that the function return
10324 can be done with a single instruction. */
10325 if ((func_type == ARM_FT_ISR || func_type == ARM_FT_FIQ)
10326 && (live_regs_mask & (1 << LR_REGNUM)) != 0
10327 && ! frame_pointer_needed)
10328 emit_insn (gen_rtx_SET (SImode,
10329 gen_rtx_REG (SImode, LR_REGNUM),
10330 gen_rtx_PLUS (SImode,
10331 gen_rtx_REG (SImode, LR_REGNUM),
10332 GEN_INT (-4))));
10334 if (live_regs_mask)
10336 insn = emit_multi_reg_push (live_regs_mask);
10337 saved_regs += bit_count (live_regs_mask) * 4;
10338 RTX_FRAME_RELATED_P (insn) = 1;
10341 if (TARGET_IWMMXT)
10342 for (reg = LAST_IWMMXT_REGNUM; reg >= FIRST_IWMMXT_REGNUM; reg--)
10343 if (regs_ever_live[reg] && ! call_used_regs [reg])
10345 insn = gen_rtx_PRE_DEC (V2SImode, stack_pointer_rtx);
10346 insn = gen_rtx_MEM (V2SImode, insn);
10347 insn = emit_insn (gen_rtx_SET (VOIDmode, insn,
10348 gen_rtx_REG (V2SImode, reg)));
10349 RTX_FRAME_RELATED_P (insn) = 1;
10350 saved_regs += 8;
10353 if (! IS_VOLATILE (func_type))
10355 int start_reg;
10357 /* Save any floating point call-saved registers used by this
10358 function. */
10359 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
10361 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
10362 if (regs_ever_live[reg] && !call_used_regs[reg])
10364 insn = gen_rtx_PRE_DEC (XFmode, stack_pointer_rtx);
10365 insn = gen_rtx_MEM (XFmode, insn);
10366 insn = emit_insn (gen_rtx_SET (VOIDmode, insn,
10367 gen_rtx_REG (XFmode, reg)));
10368 RTX_FRAME_RELATED_P (insn) = 1;
10369 saved_regs += 12;
10372 else
10374 start_reg = LAST_FPA_REGNUM;
10376 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
10378 if (regs_ever_live[reg] && !call_used_regs[reg])
10380 if (start_reg - reg == 3)
10382 insn = emit_sfm (reg, 4);
10383 RTX_FRAME_RELATED_P (insn) = 1;
10384 start_reg = reg - 1;
10387 else
10389 if (start_reg != reg)
10391 insn = emit_sfm (reg + 1, start_reg - reg);
10392 RTX_FRAME_RELATED_P (insn) = 1;
10393 saved_regs += (start_reg - reg) * 12;
10395 start_reg = reg - 1;
10399 if (start_reg != reg)
10401 insn = emit_sfm (reg + 1, start_reg - reg);
10402 saved_regs += (start_reg - reg) * 12;
10403 RTX_FRAME_RELATED_P (insn) = 1;
10406 if (TARGET_HARD_FLOAT && TARGET_VFP)
10408 start_reg = FIRST_VFP_REGNUM;
10410 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
10412 if ((!regs_ever_live[reg] || call_used_regs[reg])
10413 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
10415 if (start_reg != reg)
10416 saved_regs += vfp_emit_fstmx (start_reg,
10417 (reg - start_reg) / 2);
10418 start_reg = reg + 2;
10421 if (start_reg != reg)
10422 saved_regs += vfp_emit_fstmx (start_reg,
10423 (reg - start_reg) / 2);
10427 if (frame_pointer_needed)
10429 /* Create the new frame pointer. */
10430 insn = GEN_INT (-(4 + args_to_push + fp_offset));
10431 insn = emit_insn (gen_addsi3 (hard_frame_pointer_rtx, ip_rtx, insn));
10432 RTX_FRAME_RELATED_P (insn) = 1;
10434 if (IS_NESTED (func_type))
10436 /* Recover the static chain register. */
10437 if (regs_ever_live [3] == 0
10438 || saved_pretend_args)
10439 insn = gen_rtx_REG (SImode, 3);
10440 else /* if (current_function_pretend_args_size == 0) */
10442 insn = gen_rtx_PLUS (SImode, hard_frame_pointer_rtx,
10443 GEN_INT (4));
10444 insn = gen_rtx_MEM (SImode, insn);
10447 emit_insn (gen_rtx_SET (SImode, ip_rtx, insn));
10448 /* Add a USE to stop propagate_one_insn() from barfing. */
10449 emit_insn (gen_prologue_use (ip_rtx));
10453 offsets = arm_get_frame_offsets ();
10454 if (offsets->outgoing_args != offsets->saved_args + saved_regs)
10456 /* This add can produce multiple insns for a large constant, so we
10457 need to get tricky. */
10458 rtx last = get_last_insn ();
10460 amount = GEN_INT (offsets->saved_args + saved_regs
10461 - offsets->outgoing_args);
10463 insn = emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10464 amount));
10467 last = last ? NEXT_INSN (last) : get_insns ();
10468 RTX_FRAME_RELATED_P (last) = 1;
10470 while (last != insn);
10472 /* If the frame pointer is needed, emit a special barrier that
10473 will prevent the scheduler from moving stores to the frame
10474 before the stack adjustment. */
10475 if (frame_pointer_needed)
10476 insn = emit_insn (gen_stack_tie (stack_pointer_rtx,
10477 hard_frame_pointer_rtx));
10480 /* If we are profiling, make sure no instructions are scheduled before
10481 the call to mcount. Similarly if the user has requested no
10482 scheduling in the prolog. */
10483 if (current_function_profile || TARGET_NO_SCHED_PRO)
10484 emit_insn (gen_blockage ());
10486 /* If the link register is being kept alive, with the return address in it,
10487 then make sure that it does not get reused by the ce2 pass. */
10488 if ((live_regs_mask & (1 << LR_REGNUM)) == 0)
10490 emit_insn (gen_prologue_use (gen_rtx_REG (SImode, LR_REGNUM)));
10491 cfun->machine->lr_save_eliminated = 1;
10495 /* If CODE is 'd', then the X is a condition operand and the instruction
10496 should only be executed if the condition is true.
10497 if CODE is 'D', then the X is a condition operand and the instruction
10498 should only be executed if the condition is false: however, if the mode
10499 of the comparison is CCFPEmode, then always execute the instruction -- we
10500 do this because in these circumstances !GE does not necessarily imply LT;
10501 in these cases the instruction pattern will take care to make sure that
10502 an instruction containing %d will follow, thereby undoing the effects of
10503 doing this instruction unconditionally.
10504 If CODE is 'N' then X is a floating point operand that must be negated
10505 before output.
10506 If CODE is 'B' then output a bitwise inverted value of X (a const int).
10507 If X is a REG and CODE is `M', output a ldm/stm style multi-reg. */
10508 void
10509 arm_print_operand (FILE *stream, rtx x, int code)
10511 switch (code)
10513 case '@':
10514 fputs (ASM_COMMENT_START, stream);
10515 return;
10517 case '_':
10518 fputs (user_label_prefix, stream);
10519 return;
10521 case '|':
10522 fputs (REGISTER_PREFIX, stream);
10523 return;
10525 case '?':
10526 if (arm_ccfsm_state == 3 || arm_ccfsm_state == 4)
10528 if (TARGET_THUMB || current_insn_predicate != NULL)
10529 abort ();
10531 fputs (arm_condition_codes[arm_current_cc], stream);
10533 else if (current_insn_predicate)
10535 enum arm_cond_code code;
10537 if (TARGET_THUMB)
10538 abort ();
10540 code = get_arm_condition_code (current_insn_predicate);
10541 fputs (arm_condition_codes[code], stream);
10543 return;
10545 case 'N':
10547 REAL_VALUE_TYPE r;
10548 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
10549 r = REAL_VALUE_NEGATE (r);
10550 fprintf (stream, "%s", fp_const_from_val (&r));
10552 return;
10554 case 'B':
10555 if (GET_CODE (x) == CONST_INT)
10557 HOST_WIDE_INT val;
10558 val = ARM_SIGN_EXTEND (~INTVAL (x));
10559 fprintf (stream, HOST_WIDE_INT_PRINT_DEC, val);
10561 else
10563 putc ('~', stream);
10564 output_addr_const (stream, x);
10566 return;
10568 case 'i':
10569 fprintf (stream, "%s", arithmetic_instr (x, 1));
10570 return;
10572 /* Truncate Cirrus shift counts. */
10573 case 's':
10574 if (GET_CODE (x) == CONST_INT)
10576 fprintf (stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0x3f);
10577 return;
10579 arm_print_operand (stream, x, 0);
10580 return;
10582 case 'I':
10583 fprintf (stream, "%s", arithmetic_instr (x, 0));
10584 return;
10586 case 'S':
10588 HOST_WIDE_INT val;
10589 const char * shift = shift_op (x, &val);
10591 if (shift)
10593 fprintf (stream, ", %s ", shift_op (x, &val));
10594 if (val == -1)
10595 arm_print_operand (stream, XEXP (x, 1), 0);
10596 else
10597 fprintf (stream, "#" HOST_WIDE_INT_PRINT_DEC, val);
10600 return;
10602 /* An explanation of the 'Q', 'R' and 'H' register operands:
10604 In a pair of registers containing a DI or DF value the 'Q'
10605 operand returns the register number of the register containing
10606 the least significant part of the value. The 'R' operand returns
10607 the register number of the register containing the most
10608 significant part of the value.
10610 The 'H' operand returns the higher of the two register numbers.
10611 On a run where WORDS_BIG_ENDIAN is true the 'H' operand is the
10612 same as the 'Q' operand, since the most significant part of the
10613 value is held in the lower number register. The reverse is true
10614 on systems where WORDS_BIG_ENDIAN is false.
10616 The purpose of these operands is to distinguish between cases
10617 where the endian-ness of the values is important (for example
10618 when they are added together), and cases where the endian-ness
10619 is irrelevant, but the order of register operations is important.
10620 For example when loading a value from memory into a register
10621 pair, the endian-ness does not matter. Provided that the value
10622 from the lower memory address is put into the lower numbered
10623 register, and the value from the higher address is put into the
10624 higher numbered register, the load will work regardless of whether
10625 the value being loaded is big-wordian or little-wordian. The
10626 order of the two register loads can matter however, if the address
10627 of the memory location is actually held in one of the registers
10628 being overwritten by the load. */
10629 case 'Q':
10630 if (REGNO (x) > LAST_ARM_REGNUM)
10631 abort ();
10632 asm_fprintf (stream, "%r", REGNO (x) + (WORDS_BIG_ENDIAN ? 1 : 0));
10633 return;
10635 case 'R':
10636 if (REGNO (x) > LAST_ARM_REGNUM)
10637 abort ();
10638 asm_fprintf (stream, "%r", REGNO (x) + (WORDS_BIG_ENDIAN ? 0 : 1));
10639 return;
10641 case 'H':
10642 if (REGNO (x) > LAST_ARM_REGNUM)
10643 abort ();
10644 asm_fprintf (stream, "%r", REGNO (x) + 1);
10645 return;
10647 case 'm':
10648 asm_fprintf (stream, "%r",
10649 GET_CODE (XEXP (x, 0)) == REG
10650 ? REGNO (XEXP (x, 0)) : REGNO (XEXP (XEXP (x, 0), 0)));
10651 return;
10653 case 'M':
10654 asm_fprintf (stream, "{%r-%r}",
10655 REGNO (x),
10656 REGNO (x) + ARM_NUM_REGS (GET_MODE (x)) - 1);
10657 return;
10659 case 'd':
10660 /* CONST_TRUE_RTX means always -- that's the default. */
10661 if (x == const_true_rtx)
10662 return;
10664 fputs (arm_condition_codes[get_arm_condition_code (x)],
10665 stream);
10666 return;
10668 case 'D':
10669 /* CONST_TRUE_RTX means not always -- ie never. We shouldn't ever
10670 want to do that. */
10671 if (x == const_true_rtx)
10672 abort ();
10674 fputs (arm_condition_codes[ARM_INVERSE_CONDITION_CODE
10675 (get_arm_condition_code (x))],
10676 stream);
10677 return;
10679 /* Cirrus registers can be accessed in a variety of ways:
10680 single floating point (f)
10681 double floating point (d)
10682 32bit integer (fx)
10683 64bit integer (dx). */
10684 case 'W': /* Cirrus register in F mode. */
10685 case 'X': /* Cirrus register in D mode. */
10686 case 'Y': /* Cirrus register in FX mode. */
10687 case 'Z': /* Cirrus register in DX mode. */
10688 if (GET_CODE (x) != REG || REGNO_REG_CLASS (REGNO (x)) != CIRRUS_REGS)
10689 abort ();
10691 fprintf (stream, "mv%s%s",
10692 code == 'W' ? "f"
10693 : code == 'X' ? "d"
10694 : code == 'Y' ? "fx" : "dx", reg_names[REGNO (x)] + 2);
10696 return;
10698 /* Print cirrus register in the mode specified by the register's mode. */
10699 case 'V':
10701 int mode = GET_MODE (x);
10703 if (GET_CODE (x) != REG || REGNO_REG_CLASS (REGNO (x)) != CIRRUS_REGS)
10704 abort ();
10706 fprintf (stream, "mv%s%s",
10707 mode == DFmode ? "d"
10708 : mode == SImode ? "fx"
10709 : mode == DImode ? "dx"
10710 : "f", reg_names[REGNO (x)] + 2);
10712 return;
10715 case 'U':
10716 if (GET_CODE (x) != REG
10717 || REGNO (x) < FIRST_IWMMXT_GR_REGNUM
10718 || REGNO (x) > LAST_IWMMXT_GR_REGNUM)
10719 /* Bad value for wCG register number. */
10720 abort ();
10721 else
10722 fprintf (stream, "%d", REGNO (x) - FIRST_IWMMXT_GR_REGNUM);
10723 return;
10725 /* Print an iWMMXt control register name. */
10726 case 'w':
10727 if (GET_CODE (x) != CONST_INT
10728 || INTVAL (x) < 0
10729 || INTVAL (x) >= 16)
10730 /* Bad value for wC register number. */
10731 abort ();
10732 else
10734 static const char * wc_reg_names [16] =
10736 "wCID", "wCon", "wCSSF", "wCASF",
10737 "wC4", "wC5", "wC6", "wC7",
10738 "wCGR0", "wCGR1", "wCGR2", "wCGR3",
10739 "wC12", "wC13", "wC14", "wC15"
10742 fprintf (stream, wc_reg_names [INTVAL (x)]);
10744 return;
10746 /* Print a VFP double precision register name. */
10747 case 'P':
10749 int mode = GET_MODE (x);
10750 int num;
10752 if (mode != DImode && mode != DFmode)
10753 abort ();
10755 if (GET_CODE (x) != REG
10756 || !IS_VFP_REGNUM (REGNO (x)))
10757 abort ();
10759 num = REGNO(x) - FIRST_VFP_REGNUM;
10760 if (num & 1)
10761 abort ();
10763 fprintf (stream, "d%d", num >> 1);
10765 return;
10767 default:
10768 if (x == 0)
10769 abort ();
10771 if (GET_CODE (x) == REG)
10772 asm_fprintf (stream, "%r", REGNO (x));
10773 else if (GET_CODE (x) == MEM)
10775 output_memory_reference_mode = GET_MODE (x);
10776 output_address (XEXP (x, 0));
10778 else if (GET_CODE (x) == CONST_DOUBLE)
10779 fprintf (stream, "#%s", fp_immediate_constant (x));
10780 else if (GET_CODE (x) == NEG)
10781 abort (); /* This should never happen now. */
10782 else
10784 fputc ('#', stream);
10785 output_addr_const (stream, x);
10790 #ifndef AOF_ASSEMBLER
10791 /* Target hook for assembling integer objects. The ARM version needs to
10792 handle word-sized values specially. */
10793 static bool
10794 arm_assemble_integer (rtx x, unsigned int size, int aligned_p)
10796 if (size == UNITS_PER_WORD && aligned_p)
10798 fputs ("\t.word\t", asm_out_file);
10799 output_addr_const (asm_out_file, x);
10801 /* Mark symbols as position independent. We only do this in the
10802 .text segment, not in the .data segment. */
10803 if (NEED_GOT_RELOC && flag_pic && making_const_table &&
10804 (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF))
10806 if (GET_CODE (x) == SYMBOL_REF
10807 && (CONSTANT_POOL_ADDRESS_P (x)
10808 || SYMBOL_REF_LOCAL_P (x)))
10809 fputs ("(GOTOFF)", asm_out_file);
10810 else if (GET_CODE (x) == LABEL_REF)
10811 fputs ("(GOTOFF)", asm_out_file);
10812 else
10813 fputs ("(GOT)", asm_out_file);
10815 fputc ('\n', asm_out_file);
10816 return true;
10819 if (VECTOR_MODE_SUPPORTED_P (GET_MODE (x)))
10821 int i, units;
10823 if (GET_CODE (x) != CONST_VECTOR)
10824 abort ();
10826 units = CONST_VECTOR_NUNITS (x);
10828 switch (GET_MODE (x))
10830 case V2SImode: size = 4; break;
10831 case V4HImode: size = 2; break;
10832 case V8QImode: size = 1; break;
10833 default:
10834 abort ();
10837 for (i = 0; i < units; i++)
10839 rtx elt;
10841 elt = CONST_VECTOR_ELT (x, i);
10842 assemble_integer
10843 (elt, size, i == 0 ? BIGGEST_ALIGNMENT : size * BITS_PER_UNIT, 1);
10846 return true;
10849 return default_assemble_integer (x, size, aligned_p);
10851 #endif
10853 /* A finite state machine takes care of noticing whether or not instructions
10854 can be conditionally executed, and thus decrease execution time and code
10855 size by deleting branch instructions. The fsm is controlled by
10856 final_prescan_insn, and controls the actions of ASM_OUTPUT_OPCODE. */
10858 /* The state of the fsm controlling condition codes are:
10859 0: normal, do nothing special
10860 1: make ASM_OUTPUT_OPCODE not output this instruction
10861 2: make ASM_OUTPUT_OPCODE not output this instruction
10862 3: make instructions conditional
10863 4: make instructions conditional
10865 State transitions (state->state by whom under condition):
10866 0 -> 1 final_prescan_insn if the `target' is a label
10867 0 -> 2 final_prescan_insn if the `target' is an unconditional branch
10868 1 -> 3 ASM_OUTPUT_OPCODE after not having output the conditional branch
10869 2 -> 4 ASM_OUTPUT_OPCODE after not having output the conditional branch
10870 3 -> 0 (*targetm.asm_out.internal_label) if the `target' label is reached
10871 (the target label has CODE_LABEL_NUMBER equal to arm_target_label).
10872 4 -> 0 final_prescan_insn if the `target' unconditional branch is reached
10873 (the target insn is arm_target_insn).
10875 If the jump clobbers the conditions then we use states 2 and 4.
10877 A similar thing can be done with conditional return insns.
10879 XXX In case the `target' is an unconditional branch, this conditionalising
10880 of the instructions always reduces code size, but not always execution
10881 time. But then, I want to reduce the code size to somewhere near what
10882 /bin/cc produces. */
10884 /* Returns the index of the ARM condition code string in
10885 `arm_condition_codes'. COMPARISON should be an rtx like
10886 `(eq (...) (...))'. */
10887 static enum arm_cond_code
10888 get_arm_condition_code (rtx comparison)
10890 enum machine_mode mode = GET_MODE (XEXP (comparison, 0));
10891 int code;
10892 enum rtx_code comp_code = GET_CODE (comparison);
10894 if (GET_MODE_CLASS (mode) != MODE_CC)
10895 mode = SELECT_CC_MODE (comp_code, XEXP (comparison, 0),
10896 XEXP (comparison, 1));
10898 switch (mode)
10900 case CC_DNEmode: code = ARM_NE; goto dominance;
10901 case CC_DEQmode: code = ARM_EQ; goto dominance;
10902 case CC_DGEmode: code = ARM_GE; goto dominance;
10903 case CC_DGTmode: code = ARM_GT; goto dominance;
10904 case CC_DLEmode: code = ARM_LE; goto dominance;
10905 case CC_DLTmode: code = ARM_LT; goto dominance;
10906 case CC_DGEUmode: code = ARM_CS; goto dominance;
10907 case CC_DGTUmode: code = ARM_HI; goto dominance;
10908 case CC_DLEUmode: code = ARM_LS; goto dominance;
10909 case CC_DLTUmode: code = ARM_CC;
10911 dominance:
10912 if (comp_code != EQ && comp_code != NE)
10913 abort ();
10915 if (comp_code == EQ)
10916 return ARM_INVERSE_CONDITION_CODE (code);
10917 return code;
10919 case CC_NOOVmode:
10920 switch (comp_code)
10922 case NE: return ARM_NE;
10923 case EQ: return ARM_EQ;
10924 case GE: return ARM_PL;
10925 case LT: return ARM_MI;
10926 default: abort ();
10929 case CC_Zmode:
10930 switch (comp_code)
10932 case NE: return ARM_NE;
10933 case EQ: return ARM_EQ;
10934 default: abort ();
10937 case CC_Nmode:
10938 switch (comp_code)
10940 case NE: return ARM_MI;
10941 case EQ: return ARM_PL;
10942 default: abort ();
10945 case CCFPEmode:
10946 case CCFPmode:
10947 /* These encodings assume that AC=1 in the FPA system control
10948 byte. This allows us to handle all cases except UNEQ and
10949 LTGT. */
10950 switch (comp_code)
10952 case GE: return ARM_GE;
10953 case GT: return ARM_GT;
10954 case LE: return ARM_LS;
10955 case LT: return ARM_MI;
10956 case NE: return ARM_NE;
10957 case EQ: return ARM_EQ;
10958 case ORDERED: return ARM_VC;
10959 case UNORDERED: return ARM_VS;
10960 case UNLT: return ARM_LT;
10961 case UNLE: return ARM_LE;
10962 case UNGT: return ARM_HI;
10963 case UNGE: return ARM_PL;
10964 /* UNEQ and LTGT do not have a representation. */
10965 case UNEQ: /* Fall through. */
10966 case LTGT: /* Fall through. */
10967 default: abort ();
10970 case CC_SWPmode:
10971 switch (comp_code)
10973 case NE: return ARM_NE;
10974 case EQ: return ARM_EQ;
10975 case GE: return ARM_LE;
10976 case GT: return ARM_LT;
10977 case LE: return ARM_GE;
10978 case LT: return ARM_GT;
10979 case GEU: return ARM_LS;
10980 case GTU: return ARM_CC;
10981 case LEU: return ARM_CS;
10982 case LTU: return ARM_HI;
10983 default: abort ();
10986 case CC_Cmode:
10987 switch (comp_code)
10989 case LTU: return ARM_CS;
10990 case GEU: return ARM_CC;
10991 default: abort ();
10994 case CCmode:
10995 switch (comp_code)
10997 case NE: return ARM_NE;
10998 case EQ: return ARM_EQ;
10999 case GE: return ARM_GE;
11000 case GT: return ARM_GT;
11001 case LE: return ARM_LE;
11002 case LT: return ARM_LT;
11003 case GEU: return ARM_CS;
11004 case GTU: return ARM_HI;
11005 case LEU: return ARM_LS;
11006 case LTU: return ARM_CC;
11007 default: abort ();
11010 default: abort ();
11013 abort ();
11016 void
11017 arm_final_prescan_insn (rtx insn)
11019 /* BODY will hold the body of INSN. */
11020 rtx body = PATTERN (insn);
11022 /* This will be 1 if trying to repeat the trick, and things need to be
11023 reversed if it appears to fail. */
11024 int reverse = 0;
11026 /* JUMP_CLOBBERS will be one implies that the conditions if a branch is
11027 taken are clobbered, even if the rtl suggests otherwise. It also
11028 means that we have to grub around within the jump expression to find
11029 out what the conditions are when the jump isn't taken. */
11030 int jump_clobbers = 0;
11032 /* If we start with a return insn, we only succeed if we find another one. */
11033 int seeking_return = 0;
11035 /* START_INSN will hold the insn from where we start looking. This is the
11036 first insn after the following code_label if REVERSE is true. */
11037 rtx start_insn = insn;
11039 /* If in state 4, check if the target branch is reached, in order to
11040 change back to state 0. */
11041 if (arm_ccfsm_state == 4)
11043 if (insn == arm_target_insn)
11045 arm_target_insn = NULL;
11046 arm_ccfsm_state = 0;
11048 return;
11051 /* If in state 3, it is possible to repeat the trick, if this insn is an
11052 unconditional branch to a label, and immediately following this branch
11053 is the previous target label which is only used once, and the label this
11054 branch jumps to is not too far off. */
11055 if (arm_ccfsm_state == 3)
11057 if (simplejump_p (insn))
11059 start_insn = next_nonnote_insn (start_insn);
11060 if (GET_CODE (start_insn) == BARRIER)
11062 /* XXX Isn't this always a barrier? */
11063 start_insn = next_nonnote_insn (start_insn);
11065 if (GET_CODE (start_insn) == CODE_LABEL
11066 && CODE_LABEL_NUMBER (start_insn) == arm_target_label
11067 && LABEL_NUSES (start_insn) == 1)
11068 reverse = TRUE;
11069 else
11070 return;
11072 else if (GET_CODE (body) == RETURN)
11074 start_insn = next_nonnote_insn (start_insn);
11075 if (GET_CODE (start_insn) == BARRIER)
11076 start_insn = next_nonnote_insn (start_insn);
11077 if (GET_CODE (start_insn) == CODE_LABEL
11078 && CODE_LABEL_NUMBER (start_insn) == arm_target_label
11079 && LABEL_NUSES (start_insn) == 1)
11081 reverse = TRUE;
11082 seeking_return = 1;
11084 else
11085 return;
11087 else
11088 return;
11091 if (arm_ccfsm_state != 0 && !reverse)
11092 abort ();
11093 if (GET_CODE (insn) != JUMP_INSN)
11094 return;
11096 /* This jump might be paralleled with a clobber of the condition codes
11097 the jump should always come first */
11098 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) > 0)
11099 body = XVECEXP (body, 0, 0);
11101 if (reverse
11102 || (GET_CODE (body) == SET && GET_CODE (SET_DEST (body)) == PC
11103 && GET_CODE (SET_SRC (body)) == IF_THEN_ELSE))
11105 int insns_skipped;
11106 int fail = FALSE, succeed = FALSE;
11107 /* Flag which part of the IF_THEN_ELSE is the LABEL_REF. */
11108 int then_not_else = TRUE;
11109 rtx this_insn = start_insn, label = 0;
11111 /* If the jump cannot be done with one instruction, we cannot
11112 conditionally execute the instruction in the inverse case. */
11113 if (get_attr_conds (insn) == CONDS_JUMP_CLOB)
11115 jump_clobbers = 1;
11116 return;
11119 /* Register the insn jumped to. */
11120 if (reverse)
11122 if (!seeking_return)
11123 label = XEXP (SET_SRC (body), 0);
11125 else if (GET_CODE (XEXP (SET_SRC (body), 1)) == LABEL_REF)
11126 label = XEXP (XEXP (SET_SRC (body), 1), 0);
11127 else if (GET_CODE (XEXP (SET_SRC (body), 2)) == LABEL_REF)
11129 label = XEXP (XEXP (SET_SRC (body), 2), 0);
11130 then_not_else = FALSE;
11132 else if (GET_CODE (XEXP (SET_SRC (body), 1)) == RETURN)
11133 seeking_return = 1;
11134 else if (GET_CODE (XEXP (SET_SRC (body), 2)) == RETURN)
11136 seeking_return = 1;
11137 then_not_else = FALSE;
11139 else
11140 abort ();
11142 /* See how many insns this branch skips, and what kind of insns. If all
11143 insns are okay, and the label or unconditional branch to the same
11144 label is not too far away, succeed. */
11145 for (insns_skipped = 0;
11146 !fail && !succeed && insns_skipped++ < max_insns_skipped;)
11148 rtx scanbody;
11150 this_insn = next_nonnote_insn (this_insn);
11151 if (!this_insn)
11152 break;
11154 switch (GET_CODE (this_insn))
11156 case CODE_LABEL:
11157 /* Succeed if it is the target label, otherwise fail since
11158 control falls in from somewhere else. */
11159 if (this_insn == label)
11161 if (jump_clobbers)
11163 arm_ccfsm_state = 2;
11164 this_insn = next_nonnote_insn (this_insn);
11166 else
11167 arm_ccfsm_state = 1;
11168 succeed = TRUE;
11170 else
11171 fail = TRUE;
11172 break;
11174 case BARRIER:
11175 /* Succeed if the following insn is the target label.
11176 Otherwise fail.
11177 If return insns are used then the last insn in a function
11178 will be a barrier. */
11179 this_insn = next_nonnote_insn (this_insn);
11180 if (this_insn && this_insn == label)
11182 if (jump_clobbers)
11184 arm_ccfsm_state = 2;
11185 this_insn = next_nonnote_insn (this_insn);
11187 else
11188 arm_ccfsm_state = 1;
11189 succeed = TRUE;
11191 else
11192 fail = TRUE;
11193 break;
11195 case CALL_INSN:
11196 /* If using 32-bit addresses the cc is not preserved over
11197 calls. */
11198 if (TARGET_APCS_32)
11200 /* Succeed if the following insn is the target label,
11201 or if the following two insns are a barrier and
11202 the target label. */
11203 this_insn = next_nonnote_insn (this_insn);
11204 if (this_insn && GET_CODE (this_insn) == BARRIER)
11205 this_insn = next_nonnote_insn (this_insn);
11207 if (this_insn && this_insn == label
11208 && insns_skipped < max_insns_skipped)
11210 if (jump_clobbers)
11212 arm_ccfsm_state = 2;
11213 this_insn = next_nonnote_insn (this_insn);
11215 else
11216 arm_ccfsm_state = 1;
11217 succeed = TRUE;
11219 else
11220 fail = TRUE;
11222 break;
11224 case JUMP_INSN:
11225 /* If this is an unconditional branch to the same label, succeed.
11226 If it is to another label, do nothing. If it is conditional,
11227 fail. */
11228 /* XXX Probably, the tests for SET and the PC are
11229 unnecessary. */
11231 scanbody = PATTERN (this_insn);
11232 if (GET_CODE (scanbody) == SET
11233 && GET_CODE (SET_DEST (scanbody)) == PC)
11235 if (GET_CODE (SET_SRC (scanbody)) == LABEL_REF
11236 && XEXP (SET_SRC (scanbody), 0) == label && !reverse)
11238 arm_ccfsm_state = 2;
11239 succeed = TRUE;
11241 else if (GET_CODE (SET_SRC (scanbody)) == IF_THEN_ELSE)
11242 fail = TRUE;
11244 /* Fail if a conditional return is undesirable (eg on a
11245 StrongARM), but still allow this if optimizing for size. */
11246 else if (GET_CODE (scanbody) == RETURN
11247 && !use_return_insn (TRUE, NULL)
11248 && !optimize_size)
11249 fail = TRUE;
11250 else if (GET_CODE (scanbody) == RETURN
11251 && seeking_return)
11253 arm_ccfsm_state = 2;
11254 succeed = TRUE;
11256 else if (GET_CODE (scanbody) == PARALLEL)
11258 switch (get_attr_conds (this_insn))
11260 case CONDS_NOCOND:
11261 break;
11262 default:
11263 fail = TRUE;
11264 break;
11267 else
11268 fail = TRUE; /* Unrecognized jump (eg epilogue). */
11270 break;
11272 case INSN:
11273 /* Instructions using or affecting the condition codes make it
11274 fail. */
11275 scanbody = PATTERN (this_insn);
11276 if (!(GET_CODE (scanbody) == SET
11277 || GET_CODE (scanbody) == PARALLEL)
11278 || get_attr_conds (this_insn) != CONDS_NOCOND)
11279 fail = TRUE;
11281 /* A conditional cirrus instruction must be followed by
11282 a non Cirrus instruction. However, since we
11283 conditionalize instructions in this function and by
11284 the time we get here we can't add instructions
11285 (nops), because shorten_branches() has already been
11286 called, we will disable conditionalizing Cirrus
11287 instructions to be safe. */
11288 if (GET_CODE (scanbody) != USE
11289 && GET_CODE (scanbody) != CLOBBER
11290 && get_attr_cirrus (this_insn) != CIRRUS_NOT)
11291 fail = TRUE;
11292 break;
11294 default:
11295 break;
11298 if (succeed)
11300 if ((!seeking_return) && (arm_ccfsm_state == 1 || reverse))
11301 arm_target_label = CODE_LABEL_NUMBER (label);
11302 else if (seeking_return || arm_ccfsm_state == 2)
11304 while (this_insn && GET_CODE (PATTERN (this_insn)) == USE)
11306 this_insn = next_nonnote_insn (this_insn);
11307 if (this_insn && (GET_CODE (this_insn) == BARRIER
11308 || GET_CODE (this_insn) == CODE_LABEL))
11309 abort ();
11311 if (!this_insn)
11313 /* Oh, dear! we ran off the end.. give up. */
11314 recog (PATTERN (insn), insn, NULL);
11315 arm_ccfsm_state = 0;
11316 arm_target_insn = NULL;
11317 return;
11319 arm_target_insn = this_insn;
11321 else
11322 abort ();
11323 if (jump_clobbers)
11325 if (reverse)
11326 abort ();
11327 arm_current_cc =
11328 get_arm_condition_code (XEXP (XEXP (XEXP (SET_SRC (body),
11329 0), 0), 1));
11330 if (GET_CODE (XEXP (XEXP (SET_SRC (body), 0), 0)) == AND)
11331 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
11332 if (GET_CODE (XEXP (SET_SRC (body), 0)) == NE)
11333 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
11335 else
11337 /* If REVERSE is true, ARM_CURRENT_CC needs to be inverted from
11338 what it was. */
11339 if (!reverse)
11340 arm_current_cc = get_arm_condition_code (XEXP (SET_SRC (body),
11341 0));
11344 if (reverse || then_not_else)
11345 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
11348 /* Restore recog_data (getting the attributes of other insns can
11349 destroy this array, but final.c assumes that it remains intact
11350 across this call; since the insn has been recognized already we
11351 call recog direct). */
11352 recog (PATTERN (insn), insn, NULL);
11356 /* Returns true if REGNO is a valid register
11357 for holding a quantity of tyoe MODE. */
11359 arm_hard_regno_mode_ok (unsigned int regno, enum machine_mode mode)
11361 if (GET_MODE_CLASS (mode) == MODE_CC)
11362 return regno == CC_REGNUM || regno == VFPCC_REGNUM;
11364 if (TARGET_THUMB)
11365 /* For the Thumb we only allow values bigger than SImode in
11366 registers 0 - 6, so that there is always a second low
11367 register available to hold the upper part of the value.
11368 We probably we ought to ensure that the register is the
11369 start of an even numbered register pair. */
11370 return (ARM_NUM_REGS (mode) < 2) || (regno < LAST_LO_REGNUM);
11372 if (IS_CIRRUS_REGNUM (regno))
11373 /* We have outlawed SI values in Cirrus registers because they
11374 reside in the lower 32 bits, but SF values reside in the
11375 upper 32 bits. This causes gcc all sorts of grief. We can't
11376 even split the registers into pairs because Cirrus SI values
11377 get sign extended to 64bits-- aldyh. */
11378 return (GET_MODE_CLASS (mode) == MODE_FLOAT) || (mode == DImode);
11380 if (IS_VFP_REGNUM (regno))
11382 if (mode == SFmode || mode == SImode)
11383 return TRUE;
11385 /* DFmode values are only valid in even register pairs. */
11386 if (mode == DFmode)
11387 return ((regno - FIRST_VFP_REGNUM) & 1) == 0;
11388 return FALSE;
11391 if (IS_IWMMXT_GR_REGNUM (regno))
11392 return mode == SImode;
11394 if (IS_IWMMXT_REGNUM (regno))
11395 return VALID_IWMMXT_REG_MODE (mode);
11397 if (regno <= LAST_ARM_REGNUM)
11398 /* We allow any value to be stored in the general registers. */
11399 return 1;
11401 if ( regno == FRAME_POINTER_REGNUM
11402 || regno == ARG_POINTER_REGNUM)
11403 /* We only allow integers in the fake hard registers. */
11404 return GET_MODE_CLASS (mode) == MODE_INT;
11406 /* The only registers left are the FPA registers
11407 which we only allow to hold FP values. */
11408 return GET_MODE_CLASS (mode) == MODE_FLOAT
11409 && regno >= FIRST_FPA_REGNUM
11410 && regno <= LAST_FPA_REGNUM;
11414 arm_regno_class (int regno)
11416 if (TARGET_THUMB)
11418 if (regno == STACK_POINTER_REGNUM)
11419 return STACK_REG;
11420 if (regno == CC_REGNUM)
11421 return CC_REG;
11422 if (regno < 8)
11423 return LO_REGS;
11424 return HI_REGS;
11427 if ( regno <= LAST_ARM_REGNUM
11428 || regno == FRAME_POINTER_REGNUM
11429 || regno == ARG_POINTER_REGNUM)
11430 return GENERAL_REGS;
11432 if (regno == CC_REGNUM || regno == VFPCC_REGNUM)
11433 return NO_REGS;
11435 if (IS_CIRRUS_REGNUM (regno))
11436 return CIRRUS_REGS;
11438 if (IS_VFP_REGNUM (regno))
11439 return VFP_REGS;
11441 if (IS_IWMMXT_REGNUM (regno))
11442 return IWMMXT_REGS;
11444 if (IS_IWMMXT_GR_REGNUM (regno))
11445 return IWMMXT_GR_REGS;
11447 return FPA_REGS;
11450 /* Handle a special case when computing the offset
11451 of an argument from the frame pointer. */
11453 arm_debugger_arg_offset (int value, rtx addr)
11455 rtx insn;
11457 /* We are only interested if dbxout_parms() failed to compute the offset. */
11458 if (value != 0)
11459 return 0;
11461 /* We can only cope with the case where the address is held in a register. */
11462 if (GET_CODE (addr) != REG)
11463 return 0;
11465 /* If we are using the frame pointer to point at the argument, then
11466 an offset of 0 is correct. */
11467 if (REGNO (addr) == (unsigned) HARD_FRAME_POINTER_REGNUM)
11468 return 0;
11470 /* If we are using the stack pointer to point at the
11471 argument, then an offset of 0 is correct. */
11472 if ((TARGET_THUMB || !frame_pointer_needed)
11473 && REGNO (addr) == SP_REGNUM)
11474 return 0;
11476 /* Oh dear. The argument is pointed to by a register rather
11477 than being held in a register, or being stored at a known
11478 offset from the frame pointer. Since GDB only understands
11479 those two kinds of argument we must translate the address
11480 held in the register into an offset from the frame pointer.
11481 We do this by searching through the insns for the function
11482 looking to see where this register gets its value. If the
11483 register is initialized from the frame pointer plus an offset
11484 then we are in luck and we can continue, otherwise we give up.
11486 This code is exercised by producing debugging information
11487 for a function with arguments like this:
11489 double func (double a, double b, int c, double d) {return d;}
11491 Without this code the stab for parameter 'd' will be set to
11492 an offset of 0 from the frame pointer, rather than 8. */
11494 /* The if() statement says:
11496 If the insn is a normal instruction
11497 and if the insn is setting the value in a register
11498 and if the register being set is the register holding the address of the argument
11499 and if the address is computing by an addition
11500 that involves adding to a register
11501 which is the frame pointer
11502 a constant integer
11504 then... */
11506 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
11508 if ( GET_CODE (insn) == INSN
11509 && GET_CODE (PATTERN (insn)) == SET
11510 && REGNO (XEXP (PATTERN (insn), 0)) == REGNO (addr)
11511 && GET_CODE (XEXP (PATTERN (insn), 1)) == PLUS
11512 && GET_CODE (XEXP (XEXP (PATTERN (insn), 1), 0)) == REG
11513 && REGNO (XEXP (XEXP (PATTERN (insn), 1), 0)) == (unsigned) HARD_FRAME_POINTER_REGNUM
11514 && GET_CODE (XEXP (XEXP (PATTERN (insn), 1), 1)) == CONST_INT
11517 value = INTVAL (XEXP (XEXP (PATTERN (insn), 1), 1));
11519 break;
11523 if (value == 0)
11525 debug_rtx (addr);
11526 warning ("unable to compute real location of stacked parameter");
11527 value = 8; /* XXX magic hack */
11530 return value;
11533 #define def_mbuiltin(MASK, NAME, TYPE, CODE) \
11534 do \
11536 if ((MASK) & insn_flags) \
11537 builtin_function ((NAME), (TYPE), (CODE), BUILT_IN_MD, NULL, NULL_TREE); \
11539 while (0)
11541 struct builtin_description
11543 const unsigned int mask;
11544 const enum insn_code icode;
11545 const char * const name;
11546 const enum arm_builtins code;
11547 const enum rtx_code comparison;
11548 const unsigned int flag;
11551 static const struct builtin_description bdesc_2arg[] =
11553 #define IWMMXT_BUILTIN(code, string, builtin) \
11554 { FL_IWMMXT, CODE_FOR_##code, "__builtin_arm_" string, \
11555 ARM_BUILTIN_##builtin, 0, 0 },
11557 IWMMXT_BUILTIN (addv8qi3, "waddb", WADDB)
11558 IWMMXT_BUILTIN (addv4hi3, "waddh", WADDH)
11559 IWMMXT_BUILTIN (addv2si3, "waddw", WADDW)
11560 IWMMXT_BUILTIN (subv8qi3, "wsubb", WSUBB)
11561 IWMMXT_BUILTIN (subv4hi3, "wsubh", WSUBH)
11562 IWMMXT_BUILTIN (subv2si3, "wsubw", WSUBW)
11563 IWMMXT_BUILTIN (ssaddv8qi3, "waddbss", WADDSSB)
11564 IWMMXT_BUILTIN (ssaddv4hi3, "waddhss", WADDSSH)
11565 IWMMXT_BUILTIN (ssaddv2si3, "waddwss", WADDSSW)
11566 IWMMXT_BUILTIN (sssubv8qi3, "wsubbss", WSUBSSB)
11567 IWMMXT_BUILTIN (sssubv4hi3, "wsubhss", WSUBSSH)
11568 IWMMXT_BUILTIN (sssubv2si3, "wsubwss", WSUBSSW)
11569 IWMMXT_BUILTIN (usaddv8qi3, "waddbus", WADDUSB)
11570 IWMMXT_BUILTIN (usaddv4hi3, "waddhus", WADDUSH)
11571 IWMMXT_BUILTIN (usaddv2si3, "waddwus", WADDUSW)
11572 IWMMXT_BUILTIN (ussubv8qi3, "wsubbus", WSUBUSB)
11573 IWMMXT_BUILTIN (ussubv4hi3, "wsubhus", WSUBUSH)
11574 IWMMXT_BUILTIN (ussubv2si3, "wsubwus", WSUBUSW)
11575 IWMMXT_BUILTIN (mulv4hi3, "wmulul", WMULUL)
11576 IWMMXT_BUILTIN (smulv4hi3_highpart, "wmulsm", WMULSM)
11577 IWMMXT_BUILTIN (umulv4hi3_highpart, "wmulum", WMULUM)
11578 IWMMXT_BUILTIN (eqv8qi3, "wcmpeqb", WCMPEQB)
11579 IWMMXT_BUILTIN (eqv4hi3, "wcmpeqh", WCMPEQH)
11580 IWMMXT_BUILTIN (eqv2si3, "wcmpeqw", WCMPEQW)
11581 IWMMXT_BUILTIN (gtuv8qi3, "wcmpgtub", WCMPGTUB)
11582 IWMMXT_BUILTIN (gtuv4hi3, "wcmpgtuh", WCMPGTUH)
11583 IWMMXT_BUILTIN (gtuv2si3, "wcmpgtuw", WCMPGTUW)
11584 IWMMXT_BUILTIN (gtv8qi3, "wcmpgtsb", WCMPGTSB)
11585 IWMMXT_BUILTIN (gtv4hi3, "wcmpgtsh", WCMPGTSH)
11586 IWMMXT_BUILTIN (gtv2si3, "wcmpgtsw", WCMPGTSW)
11587 IWMMXT_BUILTIN (umaxv8qi3, "wmaxub", WMAXUB)
11588 IWMMXT_BUILTIN (smaxv8qi3, "wmaxsb", WMAXSB)
11589 IWMMXT_BUILTIN (umaxv4hi3, "wmaxuh", WMAXUH)
11590 IWMMXT_BUILTIN (smaxv4hi3, "wmaxsh", WMAXSH)
11591 IWMMXT_BUILTIN (umaxv2si3, "wmaxuw", WMAXUW)
11592 IWMMXT_BUILTIN (smaxv2si3, "wmaxsw", WMAXSW)
11593 IWMMXT_BUILTIN (uminv8qi3, "wminub", WMINUB)
11594 IWMMXT_BUILTIN (sminv8qi3, "wminsb", WMINSB)
11595 IWMMXT_BUILTIN (uminv4hi3, "wminuh", WMINUH)
11596 IWMMXT_BUILTIN (sminv4hi3, "wminsh", WMINSH)
11597 IWMMXT_BUILTIN (uminv2si3, "wminuw", WMINUW)
11598 IWMMXT_BUILTIN (sminv2si3, "wminsw", WMINSW)
11599 IWMMXT_BUILTIN (iwmmxt_anddi3, "wand", WAND)
11600 IWMMXT_BUILTIN (iwmmxt_nanddi3, "wandn", WANDN)
11601 IWMMXT_BUILTIN (iwmmxt_iordi3, "wor", WOR)
11602 IWMMXT_BUILTIN (iwmmxt_xordi3, "wxor", WXOR)
11603 IWMMXT_BUILTIN (iwmmxt_uavgv8qi3, "wavg2b", WAVG2B)
11604 IWMMXT_BUILTIN (iwmmxt_uavgv4hi3, "wavg2h", WAVG2H)
11605 IWMMXT_BUILTIN (iwmmxt_uavgrndv8qi3, "wavg2br", WAVG2BR)
11606 IWMMXT_BUILTIN (iwmmxt_uavgrndv4hi3, "wavg2hr", WAVG2HR)
11607 IWMMXT_BUILTIN (iwmmxt_wunpckilb, "wunpckilb", WUNPCKILB)
11608 IWMMXT_BUILTIN (iwmmxt_wunpckilh, "wunpckilh", WUNPCKILH)
11609 IWMMXT_BUILTIN (iwmmxt_wunpckilw, "wunpckilw", WUNPCKILW)
11610 IWMMXT_BUILTIN (iwmmxt_wunpckihb, "wunpckihb", WUNPCKIHB)
11611 IWMMXT_BUILTIN (iwmmxt_wunpckihh, "wunpckihh", WUNPCKIHH)
11612 IWMMXT_BUILTIN (iwmmxt_wunpckihw, "wunpckihw", WUNPCKIHW)
11613 IWMMXT_BUILTIN (iwmmxt_wmadds, "wmadds", WMADDS)
11614 IWMMXT_BUILTIN (iwmmxt_wmaddu, "wmaddu", WMADDU)
11616 #define IWMMXT_BUILTIN2(code, builtin) \
11617 { FL_IWMMXT, CODE_FOR_##code, NULL, ARM_BUILTIN_##builtin, 0, 0 },
11619 IWMMXT_BUILTIN2 (iwmmxt_wpackhss, WPACKHSS)
11620 IWMMXT_BUILTIN2 (iwmmxt_wpackwss, WPACKWSS)
11621 IWMMXT_BUILTIN2 (iwmmxt_wpackdss, WPACKDSS)
11622 IWMMXT_BUILTIN2 (iwmmxt_wpackhus, WPACKHUS)
11623 IWMMXT_BUILTIN2 (iwmmxt_wpackwus, WPACKWUS)
11624 IWMMXT_BUILTIN2 (iwmmxt_wpackdus, WPACKDUS)
11625 IWMMXT_BUILTIN2 (ashlv4hi3_di, WSLLH)
11626 IWMMXT_BUILTIN2 (ashlv4hi3, WSLLHI)
11627 IWMMXT_BUILTIN2 (ashlv2si3_di, WSLLW)
11628 IWMMXT_BUILTIN2 (ashlv2si3, WSLLWI)
11629 IWMMXT_BUILTIN2 (ashldi3_di, WSLLD)
11630 IWMMXT_BUILTIN2 (ashldi3_iwmmxt, WSLLDI)
11631 IWMMXT_BUILTIN2 (lshrv4hi3_di, WSRLH)
11632 IWMMXT_BUILTIN2 (lshrv4hi3, WSRLHI)
11633 IWMMXT_BUILTIN2 (lshrv2si3_di, WSRLW)
11634 IWMMXT_BUILTIN2 (lshrv2si3, WSRLWI)
11635 IWMMXT_BUILTIN2 (lshrdi3_di, WSRLD)
11636 IWMMXT_BUILTIN2 (lshrdi3_iwmmxt, WSRLDI)
11637 IWMMXT_BUILTIN2 (ashrv4hi3_di, WSRAH)
11638 IWMMXT_BUILTIN2 (ashrv4hi3, WSRAHI)
11639 IWMMXT_BUILTIN2 (ashrv2si3_di, WSRAW)
11640 IWMMXT_BUILTIN2 (ashrv2si3, WSRAWI)
11641 IWMMXT_BUILTIN2 (ashrdi3_di, WSRAD)
11642 IWMMXT_BUILTIN2 (ashrdi3_iwmmxt, WSRADI)
11643 IWMMXT_BUILTIN2 (rorv4hi3_di, WRORH)
11644 IWMMXT_BUILTIN2 (rorv4hi3, WRORHI)
11645 IWMMXT_BUILTIN2 (rorv2si3_di, WRORW)
11646 IWMMXT_BUILTIN2 (rorv2si3, WRORWI)
11647 IWMMXT_BUILTIN2 (rordi3_di, WRORD)
11648 IWMMXT_BUILTIN2 (rordi3, WRORDI)
11649 IWMMXT_BUILTIN2 (iwmmxt_wmacuz, WMACUZ)
11650 IWMMXT_BUILTIN2 (iwmmxt_wmacsz, WMACSZ)
11653 static const struct builtin_description bdesc_1arg[] =
11655 IWMMXT_BUILTIN (iwmmxt_tmovmskb, "tmovmskb", TMOVMSKB)
11656 IWMMXT_BUILTIN (iwmmxt_tmovmskh, "tmovmskh", TMOVMSKH)
11657 IWMMXT_BUILTIN (iwmmxt_tmovmskw, "tmovmskw", TMOVMSKW)
11658 IWMMXT_BUILTIN (iwmmxt_waccb, "waccb", WACCB)
11659 IWMMXT_BUILTIN (iwmmxt_wacch, "wacch", WACCH)
11660 IWMMXT_BUILTIN (iwmmxt_waccw, "waccw", WACCW)
11661 IWMMXT_BUILTIN (iwmmxt_wunpckehub, "wunpckehub", WUNPCKEHUB)
11662 IWMMXT_BUILTIN (iwmmxt_wunpckehuh, "wunpckehuh", WUNPCKEHUH)
11663 IWMMXT_BUILTIN (iwmmxt_wunpckehuw, "wunpckehuw", WUNPCKEHUW)
11664 IWMMXT_BUILTIN (iwmmxt_wunpckehsb, "wunpckehsb", WUNPCKEHSB)
11665 IWMMXT_BUILTIN (iwmmxt_wunpckehsh, "wunpckehsh", WUNPCKEHSH)
11666 IWMMXT_BUILTIN (iwmmxt_wunpckehsw, "wunpckehsw", WUNPCKEHSW)
11667 IWMMXT_BUILTIN (iwmmxt_wunpckelub, "wunpckelub", WUNPCKELUB)
11668 IWMMXT_BUILTIN (iwmmxt_wunpckeluh, "wunpckeluh", WUNPCKELUH)
11669 IWMMXT_BUILTIN (iwmmxt_wunpckeluw, "wunpckeluw", WUNPCKELUW)
11670 IWMMXT_BUILTIN (iwmmxt_wunpckelsb, "wunpckelsb", WUNPCKELSB)
11671 IWMMXT_BUILTIN (iwmmxt_wunpckelsh, "wunpckelsh", WUNPCKELSH)
11672 IWMMXT_BUILTIN (iwmmxt_wunpckelsw, "wunpckelsw", WUNPCKELSW)
11675 /* Set up all the iWMMXt builtins. This is
11676 not called if TARGET_IWMMXT is zero. */
11678 static void
11679 arm_init_iwmmxt_builtins (void)
11681 const struct builtin_description * d;
11682 size_t i;
11683 tree endlink = void_list_node;
11685 tree V2SI_type_node = build_vector_type_for_mode (intSI_type_node, V2SImode);
11686 tree V4HI_type_node = build_vector_type_for_mode (intHI_type_node, V4HImode);
11687 tree V8QI_type_node = build_vector_type_for_mode (intQI_type_node, V8QImode);
11689 tree int_ftype_int
11690 = build_function_type (integer_type_node,
11691 tree_cons (NULL_TREE, integer_type_node, endlink));
11692 tree v8qi_ftype_v8qi_v8qi_int
11693 = build_function_type (V8QI_type_node,
11694 tree_cons (NULL_TREE, V8QI_type_node,
11695 tree_cons (NULL_TREE, V8QI_type_node,
11696 tree_cons (NULL_TREE,
11697 integer_type_node,
11698 endlink))));
11699 tree v4hi_ftype_v4hi_int
11700 = build_function_type (V4HI_type_node,
11701 tree_cons (NULL_TREE, V4HI_type_node,
11702 tree_cons (NULL_TREE, integer_type_node,
11703 endlink)));
11704 tree v2si_ftype_v2si_int
11705 = build_function_type (V2SI_type_node,
11706 tree_cons (NULL_TREE, V2SI_type_node,
11707 tree_cons (NULL_TREE, integer_type_node,
11708 endlink)));
11709 tree v2si_ftype_di_di
11710 = build_function_type (V2SI_type_node,
11711 tree_cons (NULL_TREE, long_long_integer_type_node,
11712 tree_cons (NULL_TREE, long_long_integer_type_node,
11713 endlink)));
11714 tree di_ftype_di_int
11715 = build_function_type (long_long_integer_type_node,
11716 tree_cons (NULL_TREE, long_long_integer_type_node,
11717 tree_cons (NULL_TREE, integer_type_node,
11718 endlink)));
11719 tree di_ftype_di_int_int
11720 = build_function_type (long_long_integer_type_node,
11721 tree_cons (NULL_TREE, long_long_integer_type_node,
11722 tree_cons (NULL_TREE, integer_type_node,
11723 tree_cons (NULL_TREE,
11724 integer_type_node,
11725 endlink))));
11726 tree int_ftype_v8qi
11727 = build_function_type (integer_type_node,
11728 tree_cons (NULL_TREE, V8QI_type_node,
11729 endlink));
11730 tree int_ftype_v4hi
11731 = build_function_type (integer_type_node,
11732 tree_cons (NULL_TREE, V4HI_type_node,
11733 endlink));
11734 tree int_ftype_v2si
11735 = build_function_type (integer_type_node,
11736 tree_cons (NULL_TREE, V2SI_type_node,
11737 endlink));
11738 tree int_ftype_v8qi_int
11739 = build_function_type (integer_type_node,
11740 tree_cons (NULL_TREE, V8QI_type_node,
11741 tree_cons (NULL_TREE, integer_type_node,
11742 endlink)));
11743 tree int_ftype_v4hi_int
11744 = build_function_type (integer_type_node,
11745 tree_cons (NULL_TREE, V4HI_type_node,
11746 tree_cons (NULL_TREE, integer_type_node,
11747 endlink)));
11748 tree int_ftype_v2si_int
11749 = build_function_type (integer_type_node,
11750 tree_cons (NULL_TREE, V2SI_type_node,
11751 tree_cons (NULL_TREE, integer_type_node,
11752 endlink)));
11753 tree v8qi_ftype_v8qi_int_int
11754 = build_function_type (V8QI_type_node,
11755 tree_cons (NULL_TREE, V8QI_type_node,
11756 tree_cons (NULL_TREE, integer_type_node,
11757 tree_cons (NULL_TREE,
11758 integer_type_node,
11759 endlink))));
11760 tree v4hi_ftype_v4hi_int_int
11761 = build_function_type (V4HI_type_node,
11762 tree_cons (NULL_TREE, V4HI_type_node,
11763 tree_cons (NULL_TREE, integer_type_node,
11764 tree_cons (NULL_TREE,
11765 integer_type_node,
11766 endlink))));
11767 tree v2si_ftype_v2si_int_int
11768 = build_function_type (V2SI_type_node,
11769 tree_cons (NULL_TREE, V2SI_type_node,
11770 tree_cons (NULL_TREE, integer_type_node,
11771 tree_cons (NULL_TREE,
11772 integer_type_node,
11773 endlink))));
11774 /* Miscellaneous. */
11775 tree v8qi_ftype_v4hi_v4hi
11776 = build_function_type (V8QI_type_node,
11777 tree_cons (NULL_TREE, V4HI_type_node,
11778 tree_cons (NULL_TREE, V4HI_type_node,
11779 endlink)));
11780 tree v4hi_ftype_v2si_v2si
11781 = build_function_type (V4HI_type_node,
11782 tree_cons (NULL_TREE, V2SI_type_node,
11783 tree_cons (NULL_TREE, V2SI_type_node,
11784 endlink)));
11785 tree v2si_ftype_v4hi_v4hi
11786 = build_function_type (V2SI_type_node,
11787 tree_cons (NULL_TREE, V4HI_type_node,
11788 tree_cons (NULL_TREE, V4HI_type_node,
11789 endlink)));
11790 tree v2si_ftype_v8qi_v8qi
11791 = build_function_type (V2SI_type_node,
11792 tree_cons (NULL_TREE, V8QI_type_node,
11793 tree_cons (NULL_TREE, V8QI_type_node,
11794 endlink)));
11795 tree v4hi_ftype_v4hi_di
11796 = build_function_type (V4HI_type_node,
11797 tree_cons (NULL_TREE, V4HI_type_node,
11798 tree_cons (NULL_TREE,
11799 long_long_integer_type_node,
11800 endlink)));
11801 tree v2si_ftype_v2si_di
11802 = build_function_type (V2SI_type_node,
11803 tree_cons (NULL_TREE, V2SI_type_node,
11804 tree_cons (NULL_TREE,
11805 long_long_integer_type_node,
11806 endlink)));
11807 tree void_ftype_int_int
11808 = build_function_type (void_type_node,
11809 tree_cons (NULL_TREE, integer_type_node,
11810 tree_cons (NULL_TREE, integer_type_node,
11811 endlink)));
11812 tree di_ftype_void
11813 = build_function_type (long_long_unsigned_type_node, endlink);
11814 tree di_ftype_v8qi
11815 = build_function_type (long_long_integer_type_node,
11816 tree_cons (NULL_TREE, V8QI_type_node,
11817 endlink));
11818 tree di_ftype_v4hi
11819 = build_function_type (long_long_integer_type_node,
11820 tree_cons (NULL_TREE, V4HI_type_node,
11821 endlink));
11822 tree di_ftype_v2si
11823 = build_function_type (long_long_integer_type_node,
11824 tree_cons (NULL_TREE, V2SI_type_node,
11825 endlink));
11826 tree v2si_ftype_v4hi
11827 = build_function_type (V2SI_type_node,
11828 tree_cons (NULL_TREE, V4HI_type_node,
11829 endlink));
11830 tree v4hi_ftype_v8qi
11831 = build_function_type (V4HI_type_node,
11832 tree_cons (NULL_TREE, V8QI_type_node,
11833 endlink));
11835 tree di_ftype_di_v4hi_v4hi
11836 = build_function_type (long_long_unsigned_type_node,
11837 tree_cons (NULL_TREE,
11838 long_long_unsigned_type_node,
11839 tree_cons (NULL_TREE, V4HI_type_node,
11840 tree_cons (NULL_TREE,
11841 V4HI_type_node,
11842 endlink))));
11844 tree di_ftype_v4hi_v4hi
11845 = build_function_type (long_long_unsigned_type_node,
11846 tree_cons (NULL_TREE, V4HI_type_node,
11847 tree_cons (NULL_TREE, V4HI_type_node,
11848 endlink)));
11850 /* Normal vector binops. */
11851 tree v8qi_ftype_v8qi_v8qi
11852 = build_function_type (V8QI_type_node,
11853 tree_cons (NULL_TREE, V8QI_type_node,
11854 tree_cons (NULL_TREE, V8QI_type_node,
11855 endlink)));
11856 tree v4hi_ftype_v4hi_v4hi
11857 = build_function_type (V4HI_type_node,
11858 tree_cons (NULL_TREE, V4HI_type_node,
11859 tree_cons (NULL_TREE, V4HI_type_node,
11860 endlink)));
11861 tree v2si_ftype_v2si_v2si
11862 = build_function_type (V2SI_type_node,
11863 tree_cons (NULL_TREE, V2SI_type_node,
11864 tree_cons (NULL_TREE, V2SI_type_node,
11865 endlink)));
11866 tree di_ftype_di_di
11867 = build_function_type (long_long_unsigned_type_node,
11868 tree_cons (NULL_TREE, long_long_unsigned_type_node,
11869 tree_cons (NULL_TREE,
11870 long_long_unsigned_type_node,
11871 endlink)));
11873 /* Add all builtins that are more or less simple operations on two
11874 operands. */
11875 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
11877 /* Use one of the operands; the target can have a different mode for
11878 mask-generating compares. */
11879 enum machine_mode mode;
11880 tree type;
11882 if (d->name == 0)
11883 continue;
11885 mode = insn_data[d->icode].operand[1].mode;
11887 switch (mode)
11889 case V8QImode:
11890 type = v8qi_ftype_v8qi_v8qi;
11891 break;
11892 case V4HImode:
11893 type = v4hi_ftype_v4hi_v4hi;
11894 break;
11895 case V2SImode:
11896 type = v2si_ftype_v2si_v2si;
11897 break;
11898 case DImode:
11899 type = di_ftype_di_di;
11900 break;
11902 default:
11903 abort ();
11906 def_mbuiltin (d->mask, d->name, type, d->code);
11909 /* Add the remaining MMX insns with somewhat more complicated types. */
11910 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wzero", di_ftype_void, ARM_BUILTIN_WZERO);
11911 def_mbuiltin (FL_IWMMXT, "__builtin_arm_setwcx", void_ftype_int_int, ARM_BUILTIN_SETWCX);
11912 def_mbuiltin (FL_IWMMXT, "__builtin_arm_getwcx", int_ftype_int, ARM_BUILTIN_GETWCX);
11914 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSLLH);
11915 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllw", v2si_ftype_v2si_di, ARM_BUILTIN_WSLLW);
11916 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wslld", di_ftype_di_di, ARM_BUILTIN_WSLLD);
11917 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSLLHI);
11918 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllwi", v2si_ftype_v2si_int, ARM_BUILTIN_WSLLWI);
11919 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wslldi", di_ftype_di_int, ARM_BUILTIN_WSLLDI);
11921 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSRLH);
11922 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlw", v2si_ftype_v2si_di, ARM_BUILTIN_WSRLW);
11923 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrld", di_ftype_di_di, ARM_BUILTIN_WSRLD);
11924 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSRLHI);
11925 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlwi", v2si_ftype_v2si_int, ARM_BUILTIN_WSRLWI);
11926 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrldi", di_ftype_di_int, ARM_BUILTIN_WSRLDI);
11928 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrah", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSRAH);
11929 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsraw", v2si_ftype_v2si_di, ARM_BUILTIN_WSRAW);
11930 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrad", di_ftype_di_di, ARM_BUILTIN_WSRAD);
11931 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrahi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSRAHI);
11932 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrawi", v2si_ftype_v2si_int, ARM_BUILTIN_WSRAWI);
11933 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsradi", di_ftype_di_int, ARM_BUILTIN_WSRADI);
11935 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WRORH);
11936 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorw", v2si_ftype_v2si_di, ARM_BUILTIN_WRORW);
11937 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrord", di_ftype_di_di, ARM_BUILTIN_WRORD);
11938 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WRORHI);
11939 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorwi", v2si_ftype_v2si_int, ARM_BUILTIN_WRORWI);
11940 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrordi", di_ftype_di_int, ARM_BUILTIN_WRORDI);
11942 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wshufh", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSHUFH);
11944 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadb", v2si_ftype_v8qi_v8qi, ARM_BUILTIN_WSADB);
11945 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadh", v2si_ftype_v4hi_v4hi, ARM_BUILTIN_WSADH);
11946 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadbz", v2si_ftype_v8qi_v8qi, ARM_BUILTIN_WSADBZ);
11947 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadhz", v2si_ftype_v4hi_v4hi, ARM_BUILTIN_WSADHZ);
11949 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsb", int_ftype_v8qi_int, ARM_BUILTIN_TEXTRMSB);
11950 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsh", int_ftype_v4hi_int, ARM_BUILTIN_TEXTRMSH);
11951 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsw", int_ftype_v2si_int, ARM_BUILTIN_TEXTRMSW);
11952 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmub", int_ftype_v8qi_int, ARM_BUILTIN_TEXTRMUB);
11953 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmuh", int_ftype_v4hi_int, ARM_BUILTIN_TEXTRMUH);
11954 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmuw", int_ftype_v2si_int, ARM_BUILTIN_TEXTRMUW);
11955 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrb", v8qi_ftype_v8qi_int_int, ARM_BUILTIN_TINSRB);
11956 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrh", v4hi_ftype_v4hi_int_int, ARM_BUILTIN_TINSRH);
11957 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrw", v2si_ftype_v2si_int_int, ARM_BUILTIN_TINSRW);
11959 def_mbuiltin (FL_IWMMXT, "__builtin_arm_waccb", di_ftype_v8qi, ARM_BUILTIN_WACCB);
11960 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wacch", di_ftype_v4hi, ARM_BUILTIN_WACCH);
11961 def_mbuiltin (FL_IWMMXT, "__builtin_arm_waccw", di_ftype_v2si, ARM_BUILTIN_WACCW);
11963 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskb", int_ftype_v8qi, ARM_BUILTIN_TMOVMSKB);
11964 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskh", int_ftype_v4hi, ARM_BUILTIN_TMOVMSKH);
11965 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskw", int_ftype_v2si, ARM_BUILTIN_TMOVMSKW);
11967 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackhss", v8qi_ftype_v4hi_v4hi, ARM_BUILTIN_WPACKHSS);
11968 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackhus", v8qi_ftype_v4hi_v4hi, ARM_BUILTIN_WPACKHUS);
11969 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackwus", v4hi_ftype_v2si_v2si, ARM_BUILTIN_WPACKWUS);
11970 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackwss", v4hi_ftype_v2si_v2si, ARM_BUILTIN_WPACKWSS);
11971 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackdus", v2si_ftype_di_di, ARM_BUILTIN_WPACKDUS);
11972 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackdss", v2si_ftype_di_di, ARM_BUILTIN_WPACKDSS);
11974 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehub", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKEHUB);
11975 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehuh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKEHUH);
11976 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehuw", di_ftype_v2si, ARM_BUILTIN_WUNPCKEHUW);
11977 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsb", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKEHSB);
11978 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKEHSH);
11979 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsw", di_ftype_v2si, ARM_BUILTIN_WUNPCKEHSW);
11980 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelub", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKELUB);
11981 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckeluh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKELUH);
11982 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckeluw", di_ftype_v2si, ARM_BUILTIN_WUNPCKELUW);
11983 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsb", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKELSB);
11984 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKELSH);
11985 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsw", di_ftype_v2si, ARM_BUILTIN_WUNPCKELSW);
11987 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacs", di_ftype_di_v4hi_v4hi, ARM_BUILTIN_WMACS);
11988 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacsz", di_ftype_v4hi_v4hi, ARM_BUILTIN_WMACSZ);
11989 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacu", di_ftype_di_v4hi_v4hi, ARM_BUILTIN_WMACU);
11990 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacuz", di_ftype_v4hi_v4hi, ARM_BUILTIN_WMACUZ);
11992 def_mbuiltin (FL_IWMMXT, "__builtin_arm_walign", v8qi_ftype_v8qi_v8qi_int, ARM_BUILTIN_WALIGN);
11993 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmia", di_ftype_di_int_int, ARM_BUILTIN_TMIA);
11994 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiaph", di_ftype_di_int_int, ARM_BUILTIN_TMIAPH);
11995 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiabb", di_ftype_di_int_int, ARM_BUILTIN_TMIABB);
11996 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiabt", di_ftype_di_int_int, ARM_BUILTIN_TMIABT);
11997 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiatb", di_ftype_di_int_int, ARM_BUILTIN_TMIATB);
11998 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiatt", di_ftype_di_int_int, ARM_BUILTIN_TMIATT);
12001 static void
12002 arm_init_builtins (void)
12004 if (TARGET_REALLY_IWMMXT)
12005 arm_init_iwmmxt_builtins ();
12008 /* Errors in the source file can cause expand_expr to return const0_rtx
12009 where we expect a vector. To avoid crashing, use one of the vector
12010 clear instructions. */
12012 static rtx
12013 safe_vector_operand (rtx x, enum machine_mode mode)
12015 if (x != const0_rtx)
12016 return x;
12017 x = gen_reg_rtx (mode);
12019 emit_insn (gen_iwmmxt_clrdi (mode == DImode ? x
12020 : gen_rtx_SUBREG (DImode, x, 0)));
12021 return x;
12024 /* Subroutine of arm_expand_builtin to take care of binop insns. */
12026 static rtx
12027 arm_expand_binop_builtin (enum insn_code icode,
12028 tree arglist, rtx target)
12030 rtx pat;
12031 tree arg0 = TREE_VALUE (arglist);
12032 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12033 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12034 rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12035 enum machine_mode tmode = insn_data[icode].operand[0].mode;
12036 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
12037 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
12039 if (VECTOR_MODE_P (mode0))
12040 op0 = safe_vector_operand (op0, mode0);
12041 if (VECTOR_MODE_P (mode1))
12042 op1 = safe_vector_operand (op1, mode1);
12044 if (! target
12045 || GET_MODE (target) != tmode
12046 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12047 target = gen_reg_rtx (tmode);
12049 /* In case the insn wants input operands in modes different from
12050 the result, abort. */
12051 if (GET_MODE (op0) != mode0 || GET_MODE (op1) != mode1)
12052 abort ();
12054 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12055 op0 = copy_to_mode_reg (mode0, op0);
12056 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12057 op1 = copy_to_mode_reg (mode1, op1);
12059 pat = GEN_FCN (icode) (target, op0, op1);
12060 if (! pat)
12061 return 0;
12062 emit_insn (pat);
12063 return target;
12066 /* Subroutine of arm_expand_builtin to take care of unop insns. */
12068 static rtx
12069 arm_expand_unop_builtin (enum insn_code icode,
12070 tree arglist, rtx target, int do_load)
12072 rtx pat;
12073 tree arg0 = TREE_VALUE (arglist);
12074 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12075 enum machine_mode tmode = insn_data[icode].operand[0].mode;
12076 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
12078 if (! target
12079 || GET_MODE (target) != tmode
12080 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12081 target = gen_reg_rtx (tmode);
12082 if (do_load)
12083 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
12084 else
12086 if (VECTOR_MODE_P (mode0))
12087 op0 = safe_vector_operand (op0, mode0);
12089 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12090 op0 = copy_to_mode_reg (mode0, op0);
12093 pat = GEN_FCN (icode) (target, op0);
12094 if (! pat)
12095 return 0;
12096 emit_insn (pat);
12097 return target;
12100 /* Expand an expression EXP that calls a built-in function,
12101 with result going to TARGET if that's convenient
12102 (and in mode MODE if that's convenient).
12103 SUBTARGET may be used as the target for computing one of EXP's operands.
12104 IGNORE is nonzero if the value is to be ignored. */
12106 static rtx
12107 arm_expand_builtin (tree exp,
12108 rtx target,
12109 rtx subtarget ATTRIBUTE_UNUSED,
12110 enum machine_mode mode ATTRIBUTE_UNUSED,
12111 int ignore ATTRIBUTE_UNUSED)
12113 const struct builtin_description * d;
12114 enum insn_code icode;
12115 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
12116 tree arglist = TREE_OPERAND (exp, 1);
12117 tree arg0;
12118 tree arg1;
12119 tree arg2;
12120 rtx op0;
12121 rtx op1;
12122 rtx op2;
12123 rtx pat;
12124 int fcode = DECL_FUNCTION_CODE (fndecl);
12125 size_t i;
12126 enum machine_mode tmode;
12127 enum machine_mode mode0;
12128 enum machine_mode mode1;
12129 enum machine_mode mode2;
12131 switch (fcode)
12133 case ARM_BUILTIN_TEXTRMSB:
12134 case ARM_BUILTIN_TEXTRMUB:
12135 case ARM_BUILTIN_TEXTRMSH:
12136 case ARM_BUILTIN_TEXTRMUH:
12137 case ARM_BUILTIN_TEXTRMSW:
12138 case ARM_BUILTIN_TEXTRMUW:
12139 icode = (fcode == ARM_BUILTIN_TEXTRMSB ? CODE_FOR_iwmmxt_textrmsb
12140 : fcode == ARM_BUILTIN_TEXTRMUB ? CODE_FOR_iwmmxt_textrmub
12141 : fcode == ARM_BUILTIN_TEXTRMSH ? CODE_FOR_iwmmxt_textrmsh
12142 : fcode == ARM_BUILTIN_TEXTRMUH ? CODE_FOR_iwmmxt_textrmuh
12143 : CODE_FOR_iwmmxt_textrmw);
12145 arg0 = TREE_VALUE (arglist);
12146 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12147 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12148 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12149 tmode = insn_data[icode].operand[0].mode;
12150 mode0 = insn_data[icode].operand[1].mode;
12151 mode1 = insn_data[icode].operand[2].mode;
12153 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12154 op0 = copy_to_mode_reg (mode0, op0);
12155 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12157 /* @@@ better error message */
12158 error ("selector must be an immediate");
12159 return gen_reg_rtx (tmode);
12161 if (target == 0
12162 || GET_MODE (target) != tmode
12163 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12164 target = gen_reg_rtx (tmode);
12165 pat = GEN_FCN (icode) (target, op0, op1);
12166 if (! pat)
12167 return 0;
12168 emit_insn (pat);
12169 return target;
12171 case ARM_BUILTIN_TINSRB:
12172 case ARM_BUILTIN_TINSRH:
12173 case ARM_BUILTIN_TINSRW:
12174 icode = (fcode == ARM_BUILTIN_TINSRB ? CODE_FOR_iwmmxt_tinsrb
12175 : fcode == ARM_BUILTIN_TINSRH ? CODE_FOR_iwmmxt_tinsrh
12176 : CODE_FOR_iwmmxt_tinsrw);
12177 arg0 = TREE_VALUE (arglist);
12178 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12179 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
12180 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12181 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12182 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
12183 tmode = insn_data[icode].operand[0].mode;
12184 mode0 = insn_data[icode].operand[1].mode;
12185 mode1 = insn_data[icode].operand[2].mode;
12186 mode2 = insn_data[icode].operand[3].mode;
12188 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12189 op0 = copy_to_mode_reg (mode0, op0);
12190 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12191 op1 = copy_to_mode_reg (mode1, op1);
12192 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
12194 /* @@@ better error message */
12195 error ("selector must be an immediate");
12196 return const0_rtx;
12198 if (target == 0
12199 || GET_MODE (target) != tmode
12200 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12201 target = gen_reg_rtx (tmode);
12202 pat = GEN_FCN (icode) (target, op0, op1, op2);
12203 if (! pat)
12204 return 0;
12205 emit_insn (pat);
12206 return target;
12208 case ARM_BUILTIN_SETWCX:
12209 arg0 = TREE_VALUE (arglist);
12210 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12211 op0 = force_reg (SImode, expand_expr (arg0, NULL_RTX, VOIDmode, 0));
12212 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12213 emit_insn (gen_iwmmxt_tmcr (op1, op0));
12214 return 0;
12216 case ARM_BUILTIN_GETWCX:
12217 arg0 = TREE_VALUE (arglist);
12218 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12219 target = gen_reg_rtx (SImode);
12220 emit_insn (gen_iwmmxt_tmrc (target, op0));
12221 return target;
12223 case ARM_BUILTIN_WSHUFH:
12224 icode = CODE_FOR_iwmmxt_wshufh;
12225 arg0 = TREE_VALUE (arglist);
12226 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12227 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12228 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12229 tmode = insn_data[icode].operand[0].mode;
12230 mode1 = insn_data[icode].operand[1].mode;
12231 mode2 = insn_data[icode].operand[2].mode;
12233 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
12234 op0 = copy_to_mode_reg (mode1, op0);
12235 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
12237 /* @@@ better error message */
12238 error ("mask must be an immediate");
12239 return const0_rtx;
12241 if (target == 0
12242 || GET_MODE (target) != tmode
12243 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12244 target = gen_reg_rtx (tmode);
12245 pat = GEN_FCN (icode) (target, op0, op1);
12246 if (! pat)
12247 return 0;
12248 emit_insn (pat);
12249 return target;
12251 case ARM_BUILTIN_WSADB:
12252 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadb, arglist, target);
12253 case ARM_BUILTIN_WSADH:
12254 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadh, arglist, target);
12255 case ARM_BUILTIN_WSADBZ:
12256 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadbz, arglist, target);
12257 case ARM_BUILTIN_WSADHZ:
12258 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadhz, arglist, target);
12260 /* Several three-argument builtins. */
12261 case ARM_BUILTIN_WMACS:
12262 case ARM_BUILTIN_WMACU:
12263 case ARM_BUILTIN_WALIGN:
12264 case ARM_BUILTIN_TMIA:
12265 case ARM_BUILTIN_TMIAPH:
12266 case ARM_BUILTIN_TMIATT:
12267 case ARM_BUILTIN_TMIATB:
12268 case ARM_BUILTIN_TMIABT:
12269 case ARM_BUILTIN_TMIABB:
12270 icode = (fcode == ARM_BUILTIN_WMACS ? CODE_FOR_iwmmxt_wmacs
12271 : fcode == ARM_BUILTIN_WMACU ? CODE_FOR_iwmmxt_wmacu
12272 : fcode == ARM_BUILTIN_TMIA ? CODE_FOR_iwmmxt_tmia
12273 : fcode == ARM_BUILTIN_TMIAPH ? CODE_FOR_iwmmxt_tmiaph
12274 : fcode == ARM_BUILTIN_TMIABB ? CODE_FOR_iwmmxt_tmiabb
12275 : fcode == ARM_BUILTIN_TMIABT ? CODE_FOR_iwmmxt_tmiabt
12276 : fcode == ARM_BUILTIN_TMIATB ? CODE_FOR_iwmmxt_tmiatb
12277 : fcode == ARM_BUILTIN_TMIATT ? CODE_FOR_iwmmxt_tmiatt
12278 : CODE_FOR_iwmmxt_walign);
12279 arg0 = TREE_VALUE (arglist);
12280 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12281 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
12282 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12283 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12284 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
12285 tmode = insn_data[icode].operand[0].mode;
12286 mode0 = insn_data[icode].operand[1].mode;
12287 mode1 = insn_data[icode].operand[2].mode;
12288 mode2 = insn_data[icode].operand[3].mode;
12290 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12291 op0 = copy_to_mode_reg (mode0, op0);
12292 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12293 op1 = copy_to_mode_reg (mode1, op1);
12294 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
12295 op2 = copy_to_mode_reg (mode2, op2);
12296 if (target == 0
12297 || GET_MODE (target) != tmode
12298 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12299 target = gen_reg_rtx (tmode);
12300 pat = GEN_FCN (icode) (target, op0, op1, op2);
12301 if (! pat)
12302 return 0;
12303 emit_insn (pat);
12304 return target;
12306 case ARM_BUILTIN_WZERO:
12307 target = gen_reg_rtx (DImode);
12308 emit_insn (gen_iwmmxt_clrdi (target));
12309 return target;
12311 default:
12312 break;
12315 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
12316 if (d->code == (const enum arm_builtins) fcode)
12317 return arm_expand_binop_builtin (d->icode, arglist, target);
12319 for (i = 0, d = bdesc_1arg; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
12320 if (d->code == (const enum arm_builtins) fcode)
12321 return arm_expand_unop_builtin (d->icode, arglist, target, 0);
12323 /* @@@ Should really do something sensible here. */
12324 return NULL_RTX;
12327 /* Recursively search through all of the blocks in a function
12328 checking to see if any of the variables created in that
12329 function match the RTX called 'orig'. If they do then
12330 replace them with the RTX called 'new'. */
12331 static void
12332 replace_symbols_in_block (tree block, rtx orig, rtx new)
12334 for (; block; block = BLOCK_CHAIN (block))
12336 tree sym;
12338 if (!TREE_USED (block))
12339 continue;
12341 for (sym = BLOCK_VARS (block); sym; sym = TREE_CHAIN (sym))
12343 if ( (DECL_NAME (sym) == 0 && TREE_CODE (sym) != TYPE_DECL)
12344 || DECL_IGNORED_P (sym)
12345 || TREE_CODE (sym) != VAR_DECL
12346 || DECL_EXTERNAL (sym)
12347 || !rtx_equal_p (DECL_RTL (sym), orig)
12349 continue;
12351 SET_DECL_RTL (sym, new);
12354 replace_symbols_in_block (BLOCK_SUBBLOCKS (block), orig, new);
12358 /* Return the number (counting from 0) of
12359 the least significant set bit in MASK. */
12361 inline static int
12362 number_of_first_bit_set (int mask)
12364 int bit;
12366 for (bit = 0;
12367 (mask & (1 << bit)) == 0;
12368 ++bit)
12369 continue;
12371 return bit;
12374 /* Generate code to return from a thumb function.
12375 If 'reg_containing_return_addr' is -1, then the return address is
12376 actually on the stack, at the stack pointer. */
12377 static void
12378 thumb_exit (FILE *f, int reg_containing_return_addr, rtx eh_ofs)
12380 unsigned regs_available_for_popping;
12381 unsigned regs_to_pop;
12382 int pops_needed;
12383 unsigned available;
12384 unsigned required;
12385 int mode;
12386 int size;
12387 int restore_a4 = FALSE;
12389 /* Compute the registers we need to pop. */
12390 regs_to_pop = 0;
12391 pops_needed = 0;
12393 /* There is an assumption here, that if eh_ofs is not NULL, the
12394 normal return address will have been pushed. */
12395 if (reg_containing_return_addr == -1 || eh_ofs)
12397 /* When we are generating a return for __builtin_eh_return,
12398 reg_containing_return_addr must specify the return regno. */
12399 if (eh_ofs && reg_containing_return_addr == -1)
12400 abort ();
12402 regs_to_pop |= 1 << LR_REGNUM;
12403 ++pops_needed;
12406 if (TARGET_BACKTRACE)
12408 /* Restore the (ARM) frame pointer and stack pointer. */
12409 regs_to_pop |= (1 << ARM_HARD_FRAME_POINTER_REGNUM) | (1 << SP_REGNUM);
12410 pops_needed += 2;
12413 /* If there is nothing to pop then just emit the BX instruction and
12414 return. */
12415 if (pops_needed == 0)
12417 if (eh_ofs)
12418 asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, REGNO (eh_ofs));
12420 asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
12421 return;
12423 /* Otherwise if we are not supporting interworking and we have not created
12424 a backtrace structure and the function was not entered in ARM mode then
12425 just pop the return address straight into the PC. */
12426 else if (!TARGET_INTERWORK
12427 && !TARGET_BACKTRACE
12428 && !is_called_in_ARM_mode (current_function_decl))
12430 if (eh_ofs)
12432 asm_fprintf (f, "\tadd\t%r, #4\n", SP_REGNUM);
12433 asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, REGNO (eh_ofs));
12434 asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
12436 else
12437 asm_fprintf (f, "\tpop\t{%r}\n", PC_REGNUM);
12439 return;
12442 /* Find out how many of the (return) argument registers we can corrupt. */
12443 regs_available_for_popping = 0;
12445 /* If returning via __builtin_eh_return, the bottom three registers
12446 all contain information needed for the return. */
12447 if (eh_ofs)
12448 size = 12;
12449 else
12451 #ifdef RTX_CODE
12452 /* If we can deduce the registers used from the function's
12453 return value. This is more reliable that examining
12454 regs_ever_live[] because that will be set if the register is
12455 ever used in the function, not just if the register is used
12456 to hold a return value. */
12458 if (current_function_return_rtx != 0)
12459 mode = GET_MODE (current_function_return_rtx);
12460 else
12461 #endif
12462 mode = DECL_MODE (DECL_RESULT (current_function_decl));
12464 size = GET_MODE_SIZE (mode);
12466 if (size == 0)
12468 /* In a void function we can use any argument register.
12469 In a function that returns a structure on the stack
12470 we can use the second and third argument registers. */
12471 if (mode == VOIDmode)
12472 regs_available_for_popping =
12473 (1 << ARG_REGISTER (1))
12474 | (1 << ARG_REGISTER (2))
12475 | (1 << ARG_REGISTER (3));
12476 else
12477 regs_available_for_popping =
12478 (1 << ARG_REGISTER (2))
12479 | (1 << ARG_REGISTER (3));
12481 else if (size <= 4)
12482 regs_available_for_popping =
12483 (1 << ARG_REGISTER (2))
12484 | (1 << ARG_REGISTER (3));
12485 else if (size <= 8)
12486 regs_available_for_popping =
12487 (1 << ARG_REGISTER (3));
12490 /* Match registers to be popped with registers into which we pop them. */
12491 for (available = regs_available_for_popping,
12492 required = regs_to_pop;
12493 required != 0 && available != 0;
12494 available &= ~(available & - available),
12495 required &= ~(required & - required))
12496 -- pops_needed;
12498 /* If we have any popping registers left over, remove them. */
12499 if (available > 0)
12500 regs_available_for_popping &= ~available;
12502 /* Otherwise if we need another popping register we can use
12503 the fourth argument register. */
12504 else if (pops_needed)
12506 /* If we have not found any free argument registers and
12507 reg a4 contains the return address, we must move it. */
12508 if (regs_available_for_popping == 0
12509 && reg_containing_return_addr == LAST_ARG_REGNUM)
12511 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM, LAST_ARG_REGNUM);
12512 reg_containing_return_addr = LR_REGNUM;
12514 else if (size > 12)
12516 /* Register a4 is being used to hold part of the return value,
12517 but we have dire need of a free, low register. */
12518 restore_a4 = TRUE;
12520 asm_fprintf (f, "\tmov\t%r, %r\n",IP_REGNUM, LAST_ARG_REGNUM);
12523 if (reg_containing_return_addr != LAST_ARG_REGNUM)
12525 /* The fourth argument register is available. */
12526 regs_available_for_popping |= 1 << LAST_ARG_REGNUM;
12528 --pops_needed;
12532 /* Pop as many registers as we can. */
12533 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
12534 regs_available_for_popping);
12536 /* Process the registers we popped. */
12537 if (reg_containing_return_addr == -1)
12539 /* The return address was popped into the lowest numbered register. */
12540 regs_to_pop &= ~(1 << LR_REGNUM);
12542 reg_containing_return_addr =
12543 number_of_first_bit_set (regs_available_for_popping);
12545 /* Remove this register for the mask of available registers, so that
12546 the return address will not be corrupted by further pops. */
12547 regs_available_for_popping &= ~(1 << reg_containing_return_addr);
12550 /* If we popped other registers then handle them here. */
12551 if (regs_available_for_popping)
12553 int frame_pointer;
12555 /* Work out which register currently contains the frame pointer. */
12556 frame_pointer = number_of_first_bit_set (regs_available_for_popping);
12558 /* Move it into the correct place. */
12559 asm_fprintf (f, "\tmov\t%r, %r\n",
12560 ARM_HARD_FRAME_POINTER_REGNUM, frame_pointer);
12562 /* (Temporarily) remove it from the mask of popped registers. */
12563 regs_available_for_popping &= ~(1 << frame_pointer);
12564 regs_to_pop &= ~(1 << ARM_HARD_FRAME_POINTER_REGNUM);
12566 if (regs_available_for_popping)
12568 int stack_pointer;
12570 /* We popped the stack pointer as well,
12571 find the register that contains it. */
12572 stack_pointer = number_of_first_bit_set (regs_available_for_popping);
12574 /* Move it into the stack register. */
12575 asm_fprintf (f, "\tmov\t%r, %r\n", SP_REGNUM, stack_pointer);
12577 /* At this point we have popped all necessary registers, so
12578 do not worry about restoring regs_available_for_popping
12579 to its correct value:
12581 assert (pops_needed == 0)
12582 assert (regs_available_for_popping == (1 << frame_pointer))
12583 assert (regs_to_pop == (1 << STACK_POINTER)) */
12585 else
12587 /* Since we have just move the popped value into the frame
12588 pointer, the popping register is available for reuse, and
12589 we know that we still have the stack pointer left to pop. */
12590 regs_available_for_popping |= (1 << frame_pointer);
12594 /* If we still have registers left on the stack, but we no longer have
12595 any registers into which we can pop them, then we must move the return
12596 address into the link register and make available the register that
12597 contained it. */
12598 if (regs_available_for_popping == 0 && pops_needed > 0)
12600 regs_available_for_popping |= 1 << reg_containing_return_addr;
12602 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM,
12603 reg_containing_return_addr);
12605 reg_containing_return_addr = LR_REGNUM;
12608 /* If we have registers left on the stack then pop some more.
12609 We know that at most we will want to pop FP and SP. */
12610 if (pops_needed > 0)
12612 int popped_into;
12613 int move_to;
12615 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
12616 regs_available_for_popping);
12618 /* We have popped either FP or SP.
12619 Move whichever one it is into the correct register. */
12620 popped_into = number_of_first_bit_set (regs_available_for_popping);
12621 move_to = number_of_first_bit_set (regs_to_pop);
12623 asm_fprintf (f, "\tmov\t%r, %r\n", move_to, popped_into);
12625 regs_to_pop &= ~(1 << move_to);
12627 --pops_needed;
12630 /* If we still have not popped everything then we must have only
12631 had one register available to us and we are now popping the SP. */
12632 if (pops_needed > 0)
12634 int popped_into;
12636 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
12637 regs_available_for_popping);
12639 popped_into = number_of_first_bit_set (regs_available_for_popping);
12641 asm_fprintf (f, "\tmov\t%r, %r\n", SP_REGNUM, popped_into);
12643 assert (regs_to_pop == (1 << STACK_POINTER))
12644 assert (pops_needed == 1)
12648 /* If necessary restore the a4 register. */
12649 if (restore_a4)
12651 if (reg_containing_return_addr != LR_REGNUM)
12653 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM, LAST_ARG_REGNUM);
12654 reg_containing_return_addr = LR_REGNUM;
12657 asm_fprintf (f, "\tmov\t%r, %r\n", LAST_ARG_REGNUM, IP_REGNUM);
12660 if (eh_ofs)
12661 asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, REGNO (eh_ofs));
12663 /* Return to caller. */
12664 asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
12667 /* Emit code to push or pop registers to or from the stack. F is the
12668 assembly file. MASK is the registers to push or pop. PUSH is
12669 nonzero if we should push, and zero if we should pop. For debugging
12670 output, if pushing, adjust CFA_OFFSET by the amount of space added
12671 to the stack. REAL_REGS should have the same number of bits set as
12672 MASK, and will be used instead (in the same order) to describe which
12673 registers were saved - this is used to mark the save slots when we
12674 push high registers after moving them to low registers. */
12675 static void
12676 thumb_pushpop (FILE *f, int mask, int push, int *cfa_offset, int real_regs)
12678 int regno;
12679 int lo_mask = mask & 0xFF;
12680 int pushed_words = 0;
12682 if (lo_mask == 0 && !push && (mask & (1 << 15)))
12684 /* Special case. Do not generate a POP PC statement here, do it in
12685 thumb_exit() */
12686 thumb_exit (f, -1, NULL_RTX);
12687 return;
12690 fprintf (f, "\t%s\t{", push ? "push" : "pop");
12692 /* Look at the low registers first. */
12693 for (regno = 0; regno <= LAST_LO_REGNUM; regno++, lo_mask >>= 1)
12695 if (lo_mask & 1)
12697 asm_fprintf (f, "%r", regno);
12699 if ((lo_mask & ~1) != 0)
12700 fprintf (f, ", ");
12702 pushed_words++;
12706 if (push && (mask & (1 << LR_REGNUM)))
12708 /* Catch pushing the LR. */
12709 if (mask & 0xFF)
12710 fprintf (f, ", ");
12712 asm_fprintf (f, "%r", LR_REGNUM);
12714 pushed_words++;
12716 else if (!push && (mask & (1 << PC_REGNUM)))
12718 /* Catch popping the PC. */
12719 if (TARGET_INTERWORK || TARGET_BACKTRACE)
12721 /* The PC is never poped directly, instead
12722 it is popped into r3 and then BX is used. */
12723 fprintf (f, "}\n");
12725 thumb_exit (f, -1, NULL_RTX);
12727 return;
12729 else
12731 if (mask & 0xFF)
12732 fprintf (f, ", ");
12734 asm_fprintf (f, "%r", PC_REGNUM);
12738 fprintf (f, "}\n");
12740 if (push && pushed_words && dwarf2out_do_frame ())
12742 char *l = dwarf2out_cfi_label ();
12743 int pushed_mask = real_regs;
12745 *cfa_offset += pushed_words * 4;
12746 dwarf2out_def_cfa (l, SP_REGNUM, *cfa_offset);
12748 pushed_words = 0;
12749 pushed_mask = real_regs;
12750 for (regno = 0; regno <= 14; regno++, pushed_mask >>= 1)
12752 if (pushed_mask & 1)
12753 dwarf2out_reg_save (l, regno, 4 * pushed_words++ - *cfa_offset);
12758 void
12759 thumb_final_prescan_insn (rtx insn)
12761 if (flag_print_asm_name)
12762 asm_fprintf (asm_out_file, "%@ 0x%04x\n",
12763 INSN_ADDRESSES (INSN_UID (insn)));
12767 thumb_shiftable_const (unsigned HOST_WIDE_INT val)
12769 unsigned HOST_WIDE_INT mask = 0xff;
12770 int i;
12772 if (val == 0) /* XXX */
12773 return 0;
12775 for (i = 0; i < 25; i++)
12776 if ((val & (mask << i)) == val)
12777 return 1;
12779 return 0;
12782 /* Returns nonzero if the current function contains,
12783 or might contain a far jump. */
12784 static int
12785 thumb_far_jump_used_p (void)
12787 rtx insn;
12789 /* This test is only important for leaf functions. */
12790 /* assert (!leaf_function_p ()); */
12792 /* If we have already decided that far jumps may be used,
12793 do not bother checking again, and always return true even if
12794 it turns out that they are not being used. Once we have made
12795 the decision that far jumps are present (and that hence the link
12796 register will be pushed onto the stack) we cannot go back on it. */
12797 if (cfun->machine->far_jump_used)
12798 return 1;
12800 /* If this function is not being called from the prologue/epilogue
12801 generation code then it must be being called from the
12802 INITIAL_ELIMINATION_OFFSET macro. */
12803 if (!(ARM_DOUBLEWORD_ALIGN || reload_completed))
12805 /* In this case we know that we are being asked about the elimination
12806 of the arg pointer register. If that register is not being used,
12807 then there are no arguments on the stack, and we do not have to
12808 worry that a far jump might force the prologue to push the link
12809 register, changing the stack offsets. In this case we can just
12810 return false, since the presence of far jumps in the function will
12811 not affect stack offsets.
12813 If the arg pointer is live (or if it was live, but has now been
12814 eliminated and so set to dead) then we do have to test to see if
12815 the function might contain a far jump. This test can lead to some
12816 false negatives, since before reload is completed, then length of
12817 branch instructions is not known, so gcc defaults to returning their
12818 longest length, which in turn sets the far jump attribute to true.
12820 A false negative will not result in bad code being generated, but it
12821 will result in a needless push and pop of the link register. We
12822 hope that this does not occur too often.
12824 If we need doubleword stack alignment this could affect the other
12825 elimination offsets so we can't risk getting it wrong. */
12826 if (regs_ever_live [ARG_POINTER_REGNUM])
12827 cfun->machine->arg_pointer_live = 1;
12828 else if (!cfun->machine->arg_pointer_live)
12829 return 0;
12832 /* Check to see if the function contains a branch
12833 insn with the far jump attribute set. */
12834 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
12836 if (GET_CODE (insn) == JUMP_INSN
12837 /* Ignore tablejump patterns. */
12838 && GET_CODE (PATTERN (insn)) != ADDR_VEC
12839 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC
12840 && get_attr_far_jump (insn) == FAR_JUMP_YES
12843 /* Record the fact that we have decided that
12844 the function does use far jumps. */
12845 cfun->machine->far_jump_used = 1;
12846 return 1;
12850 return 0;
12853 /* Return nonzero if FUNC must be entered in ARM mode. */
12855 is_called_in_ARM_mode (tree func)
12857 if (TREE_CODE (func) != FUNCTION_DECL)
12858 abort ();
12860 /* Ignore the problem about functions whoes address is taken. */
12861 if (TARGET_CALLEE_INTERWORKING && TREE_PUBLIC (func))
12862 return TRUE;
12864 #ifdef ARM_PE
12865 return lookup_attribute ("interfacearm", DECL_ATTRIBUTES (func)) != NULL_TREE;
12866 #else
12867 return FALSE;
12868 #endif
12871 /* The bits which aren't usefully expanded as rtl. */
12872 const char *
12873 thumb_unexpanded_epilogue (void)
12875 int regno;
12876 int live_regs_mask = 0;
12877 int high_regs_pushed = 0;
12878 int leaf_function = leaf_function_p ();
12879 int had_to_push_lr;
12880 rtx eh_ofs = cfun->machine->eh_epilogue_sp_ofs;
12882 if (return_used_this_function)
12883 return "";
12885 if (IS_NAKED (arm_current_func_type ()))
12886 return "";
12888 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
12889 if (THUMB_REG_PUSHED_P (regno))
12890 live_regs_mask |= 1 << regno;
12892 for (regno = 8; regno < 13; regno++)
12893 if (THUMB_REG_PUSHED_P (regno))
12894 high_regs_pushed++;
12896 /* The prolog may have pushed some high registers to use as
12897 work registers. eg the testsuite file:
12898 gcc/testsuite/gcc/gcc.c-torture/execute/complex-2.c
12899 compiles to produce:
12900 push {r4, r5, r6, r7, lr}
12901 mov r7, r9
12902 mov r6, r8
12903 push {r6, r7}
12904 as part of the prolog. We have to undo that pushing here. */
12906 if (high_regs_pushed)
12908 int mask = live_regs_mask;
12909 int next_hi_reg;
12910 int size;
12911 int mode;
12913 #ifdef RTX_CODE
12914 /* If we can deduce the registers used from the function's return value.
12915 This is more reliable that examining regs_ever_live[] because that
12916 will be set if the register is ever used in the function, not just if
12917 the register is used to hold a return value. */
12919 if (current_function_return_rtx != 0)
12920 mode = GET_MODE (current_function_return_rtx);
12921 else
12922 #endif
12923 mode = DECL_MODE (DECL_RESULT (current_function_decl));
12925 size = GET_MODE_SIZE (mode);
12927 /* Unless we are returning a type of size > 12 register r3 is
12928 available. */
12929 if (size < 13)
12930 mask |= 1 << 3;
12932 if (mask == 0)
12933 /* Oh dear! We have no low registers into which we can pop
12934 high registers! */
12935 internal_error
12936 ("no low registers available for popping high registers");
12938 for (next_hi_reg = 8; next_hi_reg < 13; next_hi_reg++)
12939 if (THUMB_REG_PUSHED_P (next_hi_reg))
12940 break;
12942 while (high_regs_pushed)
12944 /* Find lo register(s) into which the high register(s) can
12945 be popped. */
12946 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
12948 if (mask & (1 << regno))
12949 high_regs_pushed--;
12950 if (high_regs_pushed == 0)
12951 break;
12954 mask &= (2 << regno) - 1; /* A noop if regno == 8 */
12956 /* Pop the values into the low register(s). */
12957 thumb_pushpop (asm_out_file, mask, 0, NULL, mask);
12959 /* Move the value(s) into the high registers. */
12960 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
12962 if (mask & (1 << regno))
12964 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", next_hi_reg,
12965 regno);
12967 for (next_hi_reg++; next_hi_reg < 13; next_hi_reg++)
12968 if (THUMB_REG_PUSHED_P (next_hi_reg))
12969 break;
12975 had_to_push_lr = (live_regs_mask || !leaf_function
12976 || thumb_far_jump_used_p ());
12978 if (TARGET_BACKTRACE
12979 && ((live_regs_mask & 0xFF) == 0)
12980 && regs_ever_live [LAST_ARG_REGNUM] != 0)
12982 /* The stack backtrace structure creation code had to
12983 push R7 in order to get a work register, so we pop
12984 it now. */
12985 live_regs_mask |= (1 << LAST_LO_REGNUM);
12988 if (current_function_pretend_args_size == 0 || TARGET_BACKTRACE)
12990 if (had_to_push_lr
12991 && !is_called_in_ARM_mode (current_function_decl)
12992 && !eh_ofs)
12993 live_regs_mask |= 1 << PC_REGNUM;
12995 /* Either no argument registers were pushed or a backtrace
12996 structure was created which includes an adjusted stack
12997 pointer, so just pop everything. */
12998 if (live_regs_mask)
12999 thumb_pushpop (asm_out_file, live_regs_mask, FALSE, NULL,
13000 live_regs_mask);
13002 if (eh_ofs)
13003 thumb_exit (asm_out_file, 2, eh_ofs);
13004 /* We have either just popped the return address into the
13005 PC or it is was kept in LR for the entire function or
13006 it is still on the stack because we do not want to
13007 return by doing a pop {pc}. */
13008 else if ((live_regs_mask & (1 << PC_REGNUM)) == 0)
13009 thumb_exit (asm_out_file,
13010 (had_to_push_lr
13011 && is_called_in_ARM_mode (current_function_decl)) ?
13012 -1 : LR_REGNUM, NULL_RTX);
13014 else
13016 /* Pop everything but the return address. */
13017 live_regs_mask &= ~(1 << PC_REGNUM);
13019 if (live_regs_mask)
13020 thumb_pushpop (asm_out_file, live_regs_mask, FALSE, NULL,
13021 live_regs_mask);
13023 if (had_to_push_lr)
13024 /* Get the return address into a temporary register. */
13025 thumb_pushpop (asm_out_file, 1 << LAST_ARG_REGNUM, 0, NULL,
13026 1 << LAST_ARG_REGNUM);
13028 /* Remove the argument registers that were pushed onto the stack. */
13029 asm_fprintf (asm_out_file, "\tadd\t%r, %r, #%d\n",
13030 SP_REGNUM, SP_REGNUM,
13031 current_function_pretend_args_size);
13033 if (eh_ofs)
13034 thumb_exit (asm_out_file, 2, eh_ofs);
13035 else
13036 thumb_exit (asm_out_file,
13037 had_to_push_lr ? LAST_ARG_REGNUM : LR_REGNUM, NULL_RTX);
13040 return "";
13043 /* Functions to save and restore machine-specific function data. */
13044 static struct machine_function *
13045 arm_init_machine_status (void)
13047 struct machine_function *machine;
13048 machine = (machine_function *) ggc_alloc_cleared (sizeof (machine_function));
13050 #if ARM_FT_UNKNOWN != 0
13051 machine->func_type = ARM_FT_UNKNOWN;
13052 #endif
13053 return machine;
13056 /* Return an RTX indicating where the return address to the
13057 calling function can be found. */
13059 arm_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
13061 if (count != 0)
13062 return NULL_RTX;
13064 if (TARGET_APCS_32)
13065 return get_hard_reg_initial_val (Pmode, LR_REGNUM);
13066 else
13068 rtx lr = gen_rtx_AND (Pmode, gen_rtx_REG (Pmode, LR_REGNUM),
13069 GEN_INT (RETURN_ADDR_MASK26));
13070 return get_func_hard_reg_initial_val (cfun, lr);
13074 /* Do anything needed before RTL is emitted for each function. */
13075 void
13076 arm_init_expanders (void)
13078 /* Arrange to initialize and mark the machine per-function status. */
13079 init_machine_status = arm_init_machine_status;
13083 /* Like arm_compute_initial_elimination offset. Simpler because
13084 THUMB_HARD_FRAME_POINTER isn't actually the ABI specified frame pointer. */
13086 HOST_WIDE_INT
13087 thumb_compute_initial_elimination_offset (unsigned int from, unsigned int to)
13089 arm_stack_offsets *offsets;
13091 offsets = arm_get_frame_offsets ();
13093 switch (from)
13095 case ARG_POINTER_REGNUM:
13096 switch (to)
13098 case STACK_POINTER_REGNUM:
13099 return offsets->outgoing_args - offsets->saved_args;
13101 case FRAME_POINTER_REGNUM:
13102 return offsets->soft_frame - offsets->saved_args;
13104 case THUMB_HARD_FRAME_POINTER_REGNUM:
13105 case ARM_HARD_FRAME_POINTER_REGNUM:
13106 return offsets->saved_regs - offsets->saved_args;
13108 default:
13109 abort();
13111 break;
13113 case FRAME_POINTER_REGNUM:
13114 switch (to)
13116 case STACK_POINTER_REGNUM:
13117 return offsets->outgoing_args - offsets->soft_frame;
13119 case THUMB_HARD_FRAME_POINTER_REGNUM:
13120 case ARM_HARD_FRAME_POINTER_REGNUM:
13121 return offsets->saved_regs - offsets->soft_frame;
13123 default:
13124 abort();
13126 break;
13128 default:
13129 abort ();
13134 /* Generate the rest of a function's prologue. */
13135 void
13136 thumb_expand_prologue (void)
13138 rtx insn, dwarf;
13140 HOST_WIDE_INT amount;
13141 arm_stack_offsets *offsets;
13142 unsigned long func_type;
13144 func_type = arm_current_func_type ();
13146 /* Naked functions don't have prologues. */
13147 if (IS_NAKED (func_type))
13148 return;
13150 if (IS_INTERRUPT (func_type))
13152 error ("interrupt Service Routines cannot be coded in Thumb mode");
13153 return;
13156 offsets = arm_get_frame_offsets ();
13158 if (frame_pointer_needed)
13160 insn = emit_insn (gen_movsi (hard_frame_pointer_rtx,
13161 stack_pointer_rtx));
13162 RTX_FRAME_RELATED_P (insn) = 1;
13165 amount = offsets->outgoing_args - offsets->saved_regs;
13166 if (amount)
13168 if (amount < 512)
13170 insn = emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
13171 GEN_INT (- amount)));
13172 RTX_FRAME_RELATED_P (insn) = 1;
13174 else
13176 int regno;
13177 rtx reg;
13179 /* The stack decrement is too big for an immediate value in a single
13180 insn. In theory we could issue multiple subtracts, but after
13181 three of them it becomes more space efficient to place the full
13182 value in the constant pool and load into a register. (Also the
13183 ARM debugger really likes to see only one stack decrement per
13184 function). So instead we look for a scratch register into which
13185 we can load the decrement, and then we subtract this from the
13186 stack pointer. Unfortunately on the thumb the only available
13187 scratch registers are the argument registers, and we cannot use
13188 these as they may hold arguments to the function. Instead we
13189 attempt to locate a call preserved register which is used by this
13190 function. If we can find one, then we know that it will have
13191 been pushed at the start of the prologue and so we can corrupt
13192 it now. */
13193 for (regno = LAST_ARG_REGNUM + 1; regno <= LAST_LO_REGNUM; regno++)
13194 if (THUMB_REG_PUSHED_P (regno)
13195 && !(frame_pointer_needed
13196 && (regno == THUMB_HARD_FRAME_POINTER_REGNUM)))
13197 break;
13199 if (regno > LAST_LO_REGNUM) /* Very unlikely. */
13201 rtx spare = gen_rtx_REG (SImode, IP_REGNUM);
13203 /* Choose an arbitrary, non-argument low register. */
13204 reg = gen_rtx_REG (SImode, LAST_LO_REGNUM);
13206 /* Save it by copying it into a high, scratch register. */
13207 emit_insn (gen_movsi (spare, reg));
13208 /* Add a USE to stop propagate_one_insn() from barfing. */
13209 emit_insn (gen_prologue_use (spare));
13211 /* Decrement the stack. */
13212 emit_insn (gen_movsi (reg, GEN_INT (- amount)));
13213 insn = emit_insn (gen_addsi3 (stack_pointer_rtx,
13214 stack_pointer_rtx, reg));
13215 RTX_FRAME_RELATED_P (insn) = 1;
13216 dwarf = gen_rtx_SET (SImode, stack_pointer_rtx,
13217 plus_constant (stack_pointer_rtx,
13218 GEN_INT (- amount)));
13219 RTX_FRAME_RELATED_P (dwarf) = 1;
13220 REG_NOTES (insn)
13221 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
13222 REG_NOTES (insn));
13224 /* Restore the low register's original value. */
13225 emit_insn (gen_movsi (reg, spare));
13227 /* Emit a USE of the restored scratch register, so that flow
13228 analysis will not consider the restore redundant. The
13229 register won't be used again in this function and isn't
13230 restored by the epilogue. */
13231 emit_insn (gen_prologue_use (reg));
13233 else
13235 reg = gen_rtx_REG (SImode, regno);
13237 emit_insn (gen_movsi (reg, GEN_INT (- amount)));
13239 insn = emit_insn (gen_addsi3 (stack_pointer_rtx,
13240 stack_pointer_rtx, reg));
13241 RTX_FRAME_RELATED_P (insn) = 1;
13242 dwarf = gen_rtx_SET (SImode, stack_pointer_rtx,
13243 plus_constant (stack_pointer_rtx,
13244 GEN_INT (- amount)));
13245 RTX_FRAME_RELATED_P (dwarf) = 1;
13246 REG_NOTES (insn)
13247 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
13248 REG_NOTES (insn));
13251 /* If the frame pointer is needed, emit a special barrier that
13252 will prevent the scheduler from moving stores to the frame
13253 before the stack adjustment. */
13254 if (frame_pointer_needed)
13255 emit_insn (gen_stack_tie (stack_pointer_rtx,
13256 hard_frame_pointer_rtx));
13259 if (current_function_profile || TARGET_NO_SCHED_PRO)
13260 emit_insn (gen_blockage ());
13263 void
13264 thumb_expand_epilogue (void)
13266 HOST_WIDE_INT amount;
13267 arm_stack_offsets *offsets;
13268 int regno;
13270 /* Naked functions don't have prologues. */
13271 if (IS_NAKED (arm_current_func_type ()))
13272 return;
13274 offsets = arm_get_frame_offsets ();
13275 amount = offsets->outgoing_args - offsets->saved_regs;
13277 if (frame_pointer_needed)
13278 emit_insn (gen_movsi (stack_pointer_rtx, hard_frame_pointer_rtx));
13279 else if (amount)
13281 if (amount < 512)
13282 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
13283 GEN_INT (amount)));
13284 else
13286 /* r3 is always free in the epilogue. */
13287 rtx reg = gen_rtx_REG (SImode, LAST_ARG_REGNUM);
13289 emit_insn (gen_movsi (reg, GEN_INT (amount)));
13290 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, reg));
13294 /* Emit a USE (stack_pointer_rtx), so that
13295 the stack adjustment will not be deleted. */
13296 emit_insn (gen_prologue_use (stack_pointer_rtx));
13298 if (current_function_profile || TARGET_NO_SCHED_PRO)
13299 emit_insn (gen_blockage ());
13301 /* Emit a clobber for each insn that will be restored in the epilogue,
13302 so that flow2 will get register lifetimes correct. */
13303 for (regno = 0; regno < 13; regno++)
13304 if (regs_ever_live[regno] && !call_used_regs[regno])
13305 emit_insn (gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, regno)));
13307 if (! regs_ever_live[LR_REGNUM])
13308 emit_insn (gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, LR_REGNUM)));
13311 static void
13312 thumb_output_function_prologue (FILE *f, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
13314 int live_regs_mask = 0;
13315 int high_regs_pushed = 0;
13316 int cfa_offset = 0;
13317 int regno;
13319 if (IS_NAKED (arm_current_func_type ()))
13320 return;
13322 if (is_called_in_ARM_mode (current_function_decl))
13324 const char * name;
13326 if (GET_CODE (DECL_RTL (current_function_decl)) != MEM)
13327 abort ();
13328 if (GET_CODE (XEXP (DECL_RTL (current_function_decl), 0)) != SYMBOL_REF)
13329 abort ();
13330 name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
13332 /* Generate code sequence to switch us into Thumb mode. */
13333 /* The .code 32 directive has already been emitted by
13334 ASM_DECLARE_FUNCTION_NAME. */
13335 asm_fprintf (f, "\torr\t%r, %r, #1\n", IP_REGNUM, PC_REGNUM);
13336 asm_fprintf (f, "\tbx\t%r\n", IP_REGNUM);
13338 /* Generate a label, so that the debugger will notice the
13339 change in instruction sets. This label is also used by
13340 the assembler to bypass the ARM code when this function
13341 is called from a Thumb encoded function elsewhere in the
13342 same file. Hence the definition of STUB_NAME here must
13343 agree with the definition in gas/config/tc-arm.c. */
13345 #define STUB_NAME ".real_start_of"
13347 fprintf (f, "\t.code\t16\n");
13348 #ifdef ARM_PE
13349 if (arm_dllexport_name_p (name))
13350 name = arm_strip_name_encoding (name);
13351 #endif
13352 asm_fprintf (f, "\t.globl %s%U%s\n", STUB_NAME, name);
13353 fprintf (f, "\t.thumb_func\n");
13354 asm_fprintf (f, "%s%U%s:\n", STUB_NAME, name);
13357 if (current_function_pretend_args_size)
13359 if (cfun->machine->uses_anonymous_args)
13361 int num_pushes;
13363 fprintf (f, "\tpush\t{");
13365 num_pushes = ARM_NUM_INTS (current_function_pretend_args_size);
13367 for (regno = LAST_ARG_REGNUM + 1 - num_pushes;
13368 regno <= LAST_ARG_REGNUM;
13369 regno++)
13370 asm_fprintf (f, "%r%s", regno,
13371 regno == LAST_ARG_REGNUM ? "" : ", ");
13373 fprintf (f, "}\n");
13375 else
13376 asm_fprintf (f, "\tsub\t%r, %r, #%d\n",
13377 SP_REGNUM, SP_REGNUM,
13378 current_function_pretend_args_size);
13380 /* We don't need to record the stores for unwinding (would it
13381 help the debugger any if we did?), but record the change in
13382 the stack pointer. */
13383 if (dwarf2out_do_frame ())
13385 char *l = dwarf2out_cfi_label ();
13386 cfa_offset = cfa_offset + current_function_pretend_args_size;
13387 dwarf2out_def_cfa (l, SP_REGNUM, cfa_offset);
13391 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
13392 if (THUMB_REG_PUSHED_P (regno))
13393 live_regs_mask |= 1 << regno;
13395 if (live_regs_mask || !leaf_function_p () || thumb_far_jump_used_p ())
13396 live_regs_mask |= 1 << LR_REGNUM;
13398 if (TARGET_BACKTRACE)
13400 int offset;
13401 int work_register = 0;
13402 int wr;
13404 /* We have been asked to create a stack backtrace structure.
13405 The code looks like this:
13407 0 .align 2
13408 0 func:
13409 0 sub SP, #16 Reserve space for 4 registers.
13410 2 push {R7} Get a work register.
13411 4 add R7, SP, #20 Get the stack pointer before the push.
13412 6 str R7, [SP, #8] Store the stack pointer (before reserving the space).
13413 8 mov R7, PC Get hold of the start of this code plus 12.
13414 10 str R7, [SP, #16] Store it.
13415 12 mov R7, FP Get hold of the current frame pointer.
13416 14 str R7, [SP, #4] Store it.
13417 16 mov R7, LR Get hold of the current return address.
13418 18 str R7, [SP, #12] Store it.
13419 20 add R7, SP, #16 Point at the start of the backtrace structure.
13420 22 mov FP, R7 Put this value into the frame pointer. */
13422 if ((live_regs_mask & 0xFF) == 0)
13424 /* See if the a4 register is free. */
13426 if (regs_ever_live [LAST_ARG_REGNUM] == 0)
13427 work_register = LAST_ARG_REGNUM;
13428 else /* We must push a register of our own. */
13429 live_regs_mask |= (1 << LAST_LO_REGNUM);
13432 if (work_register == 0)
13434 /* Select a register from the list that will be pushed to
13435 use as our work register. */
13436 for (work_register = (LAST_LO_REGNUM + 1); work_register--;)
13437 if ((1 << work_register) & live_regs_mask)
13438 break;
13441 asm_fprintf
13442 (f, "\tsub\t%r, %r, #16\t%@ Create stack backtrace structure\n",
13443 SP_REGNUM, SP_REGNUM);
13445 if (dwarf2out_do_frame ())
13447 char *l = dwarf2out_cfi_label ();
13448 cfa_offset = cfa_offset + 16;
13449 dwarf2out_def_cfa (l, SP_REGNUM, cfa_offset);
13452 if (live_regs_mask)
13453 thumb_pushpop (f, live_regs_mask, 1, &cfa_offset, live_regs_mask);
13455 for (offset = 0, wr = 1 << 15; wr != 0; wr >>= 1)
13456 if (wr & live_regs_mask)
13457 offset += 4;
13459 asm_fprintf (f, "\tadd\t%r, %r, #%d\n", work_register, SP_REGNUM,
13460 offset + 16 + current_function_pretend_args_size);
13462 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13463 offset + 4);
13465 /* Make sure that the instruction fetching the PC is in the right place
13466 to calculate "start of backtrace creation code + 12". */
13467 if (live_regs_mask)
13469 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, PC_REGNUM);
13470 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13471 offset + 12);
13472 asm_fprintf (f, "\tmov\t%r, %r\n", work_register,
13473 ARM_HARD_FRAME_POINTER_REGNUM);
13474 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13475 offset);
13477 else
13479 asm_fprintf (f, "\tmov\t%r, %r\n", work_register,
13480 ARM_HARD_FRAME_POINTER_REGNUM);
13481 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13482 offset);
13483 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, PC_REGNUM);
13484 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13485 offset + 12);
13488 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, LR_REGNUM);
13489 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13490 offset + 8);
13491 asm_fprintf (f, "\tadd\t%r, %r, #%d\n", work_register, SP_REGNUM,
13492 offset + 12);
13493 asm_fprintf (f, "\tmov\t%r, %r\t\t%@ Backtrace structure created\n",
13494 ARM_HARD_FRAME_POINTER_REGNUM, work_register);
13496 else if (live_regs_mask)
13497 thumb_pushpop (f, live_regs_mask, 1, &cfa_offset, live_regs_mask);
13499 for (regno = 8; regno < 13; regno++)
13500 if (THUMB_REG_PUSHED_P (regno))
13501 high_regs_pushed++;
13503 if (high_regs_pushed)
13505 int pushable_regs = 0;
13506 int mask = live_regs_mask & 0xff;
13507 int next_hi_reg;
13509 for (next_hi_reg = 12; next_hi_reg > LAST_LO_REGNUM; next_hi_reg--)
13510 if (THUMB_REG_PUSHED_P (next_hi_reg))
13511 break;
13513 pushable_regs = mask;
13515 if (pushable_regs == 0)
13517 /* Desperation time -- this probably will never happen. */
13518 if (THUMB_REG_PUSHED_P (LAST_ARG_REGNUM))
13519 asm_fprintf (f, "\tmov\t%r, %r\n", IP_REGNUM, LAST_ARG_REGNUM);
13520 mask = 1 << LAST_ARG_REGNUM;
13523 while (high_regs_pushed > 0)
13525 int real_regs_mask = 0;
13527 for (regno = LAST_LO_REGNUM; regno >= 0; regno--)
13529 if (mask & (1 << regno))
13531 asm_fprintf (f, "\tmov\t%r, %r\n", regno, next_hi_reg);
13533 high_regs_pushed--;
13534 real_regs_mask |= (1 << next_hi_reg);
13536 if (high_regs_pushed)
13538 for (next_hi_reg--; next_hi_reg > LAST_LO_REGNUM;
13539 next_hi_reg--)
13540 if (THUMB_REG_PUSHED_P (next_hi_reg))
13541 break;
13543 else
13545 mask &= ~((1 << regno) - 1);
13546 break;
13551 thumb_pushpop (f, mask, 1, &cfa_offset, real_regs_mask);
13554 if (pushable_regs == 0
13555 && (THUMB_REG_PUSHED_P (LAST_ARG_REGNUM)))
13556 asm_fprintf (f, "\tmov\t%r, %r\n", LAST_ARG_REGNUM, IP_REGNUM);
13560 /* Handle the case of a double word load into a low register from
13561 a computed memory address. The computed address may involve a
13562 register which is overwritten by the load. */
13563 const char *
13564 thumb_load_double_from_address (rtx *operands)
13566 rtx addr;
13567 rtx base;
13568 rtx offset;
13569 rtx arg1;
13570 rtx arg2;
13572 if (GET_CODE (operands[0]) != REG)
13573 abort ();
13575 if (GET_CODE (operands[1]) != MEM)
13576 abort ();
13578 /* Get the memory address. */
13579 addr = XEXP (operands[1], 0);
13581 /* Work out how the memory address is computed. */
13582 switch (GET_CODE (addr))
13584 case REG:
13585 operands[2] = gen_rtx_MEM (SImode,
13586 plus_constant (XEXP (operands[1], 0), 4));
13588 if (REGNO (operands[0]) == REGNO (addr))
13590 output_asm_insn ("ldr\t%H0, %2", operands);
13591 output_asm_insn ("ldr\t%0, %1", operands);
13593 else
13595 output_asm_insn ("ldr\t%0, %1", operands);
13596 output_asm_insn ("ldr\t%H0, %2", operands);
13598 break;
13600 case CONST:
13601 /* Compute <address> + 4 for the high order load. */
13602 operands[2] = gen_rtx_MEM (SImode,
13603 plus_constant (XEXP (operands[1], 0), 4));
13605 output_asm_insn ("ldr\t%0, %1", operands);
13606 output_asm_insn ("ldr\t%H0, %2", operands);
13607 break;
13609 case PLUS:
13610 arg1 = XEXP (addr, 0);
13611 arg2 = XEXP (addr, 1);
13613 if (CONSTANT_P (arg1))
13614 base = arg2, offset = arg1;
13615 else
13616 base = arg1, offset = arg2;
13618 if (GET_CODE (base) != REG)
13619 abort ();
13621 /* Catch the case of <address> = <reg> + <reg> */
13622 if (GET_CODE (offset) == REG)
13624 int reg_offset = REGNO (offset);
13625 int reg_base = REGNO (base);
13626 int reg_dest = REGNO (operands[0]);
13628 /* Add the base and offset registers together into the
13629 higher destination register. */
13630 asm_fprintf (asm_out_file, "\tadd\t%r, %r, %r",
13631 reg_dest + 1, reg_base, reg_offset);
13633 /* Load the lower destination register from the address in
13634 the higher destination register. */
13635 asm_fprintf (asm_out_file, "\tldr\t%r, [%r, #0]",
13636 reg_dest, reg_dest + 1);
13638 /* Load the higher destination register from its own address
13639 plus 4. */
13640 asm_fprintf (asm_out_file, "\tldr\t%r, [%r, #4]",
13641 reg_dest + 1, reg_dest + 1);
13643 else
13645 /* Compute <address> + 4 for the high order load. */
13646 operands[2] = gen_rtx_MEM (SImode,
13647 plus_constant (XEXP (operands[1], 0), 4));
13649 /* If the computed address is held in the low order register
13650 then load the high order register first, otherwise always
13651 load the low order register first. */
13652 if (REGNO (operands[0]) == REGNO (base))
13654 output_asm_insn ("ldr\t%H0, %2", operands);
13655 output_asm_insn ("ldr\t%0, %1", operands);
13657 else
13659 output_asm_insn ("ldr\t%0, %1", operands);
13660 output_asm_insn ("ldr\t%H0, %2", operands);
13663 break;
13665 case LABEL_REF:
13666 /* With no registers to worry about we can just load the value
13667 directly. */
13668 operands[2] = gen_rtx_MEM (SImode,
13669 plus_constant (XEXP (operands[1], 0), 4));
13671 output_asm_insn ("ldr\t%H0, %2", operands);
13672 output_asm_insn ("ldr\t%0, %1", operands);
13673 break;
13675 default:
13676 abort ();
13677 break;
13680 return "";
13683 const char *
13684 thumb_output_move_mem_multiple (int n, rtx *operands)
13686 rtx tmp;
13688 switch (n)
13690 case 2:
13691 if (REGNO (operands[4]) > REGNO (operands[5]))
13693 tmp = operands[4];
13694 operands[4] = operands[5];
13695 operands[5] = tmp;
13697 output_asm_insn ("ldmia\t%1!, {%4, %5}", operands);
13698 output_asm_insn ("stmia\t%0!, {%4, %5}", operands);
13699 break;
13701 case 3:
13702 if (REGNO (operands[4]) > REGNO (operands[5]))
13704 tmp = operands[4];
13705 operands[4] = operands[5];
13706 operands[5] = tmp;
13708 if (REGNO (operands[5]) > REGNO (operands[6]))
13710 tmp = operands[5];
13711 operands[5] = operands[6];
13712 operands[6] = tmp;
13714 if (REGNO (operands[4]) > REGNO (operands[5]))
13716 tmp = operands[4];
13717 operands[4] = operands[5];
13718 operands[5] = tmp;
13721 output_asm_insn ("ldmia\t%1!, {%4, %5, %6}", operands);
13722 output_asm_insn ("stmia\t%0!, {%4, %5, %6}", operands);
13723 break;
13725 default:
13726 abort ();
13729 return "";
13732 /* Routines for generating rtl. */
13733 void
13734 thumb_expand_movstrqi (rtx *operands)
13736 rtx out = copy_to_mode_reg (SImode, XEXP (operands[0], 0));
13737 rtx in = copy_to_mode_reg (SImode, XEXP (operands[1], 0));
13738 HOST_WIDE_INT len = INTVAL (operands[2]);
13739 HOST_WIDE_INT offset = 0;
13741 while (len >= 12)
13743 emit_insn (gen_movmem12b (out, in, out, in));
13744 len -= 12;
13747 if (len >= 8)
13749 emit_insn (gen_movmem8b (out, in, out, in));
13750 len -= 8;
13753 if (len >= 4)
13755 rtx reg = gen_reg_rtx (SImode);
13756 emit_insn (gen_movsi (reg, gen_rtx_MEM (SImode, in)));
13757 emit_insn (gen_movsi (gen_rtx_MEM (SImode, out), reg));
13758 len -= 4;
13759 offset += 4;
13762 if (len >= 2)
13764 rtx reg = gen_reg_rtx (HImode);
13765 emit_insn (gen_movhi (reg, gen_rtx_MEM (HImode,
13766 plus_constant (in, offset))));
13767 emit_insn (gen_movhi (gen_rtx_MEM (HImode, plus_constant (out, offset)),
13768 reg));
13769 len -= 2;
13770 offset += 2;
13773 if (len)
13775 rtx reg = gen_reg_rtx (QImode);
13776 emit_insn (gen_movqi (reg, gen_rtx_MEM (QImode,
13777 plus_constant (in, offset))));
13778 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (out, offset)),
13779 reg));
13784 thumb_cmp_operand (rtx op, enum machine_mode mode)
13786 return ((GET_CODE (op) == CONST_INT
13787 && INTVAL (op) < 256
13788 && INTVAL (op) >= 0)
13789 || s_register_operand (op, mode));
13793 thumb_cmpneg_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
13795 return (GET_CODE (op) == CONST_INT
13796 && INTVAL (op) < 0
13797 && INTVAL (op) > -256);
13800 /* Return TRUE if a result can be stored in OP without clobbering the
13801 condition code register. Prior to reload we only accept a
13802 register. After reload we have to be able to handle memory as
13803 well, since a pseudo may not get a hard reg and reload cannot
13804 handle output-reloads on jump insns.
13806 We could possibly handle mem before reload as well, but that might
13807 complicate things with the need to handle increment
13808 side-effects. */
13811 thumb_cbrch_target_operand (rtx op, enum machine_mode mode)
13813 return (s_register_operand (op, mode)
13814 || ((reload_in_progress || reload_completed)
13815 && memory_operand (op, mode)));
13818 /* Handle storing a half-word to memory during reload. */
13819 void
13820 thumb_reload_out_hi (rtx *operands)
13822 emit_insn (gen_thumb_movhi_clobber (operands[0], operands[1], operands[2]));
13825 /* Handle reading a half-word from memory during reload. */
13826 void
13827 thumb_reload_in_hi (rtx *operands ATTRIBUTE_UNUSED)
13829 abort ();
13832 /* Return the length of a function name prefix
13833 that starts with the character 'c'. */
13834 static int
13835 arm_get_strip_length (int c)
13837 switch (c)
13839 ARM_NAME_ENCODING_LENGTHS
13840 default: return 0;
13844 /* Return a pointer to a function's name with any
13845 and all prefix encodings stripped from it. */
13846 const char *
13847 arm_strip_name_encoding (const char *name)
13849 int skip;
13851 while ((skip = arm_get_strip_length (* name)))
13852 name += skip;
13854 return name;
13857 /* If there is a '*' anywhere in the name's prefix, then
13858 emit the stripped name verbatim, otherwise prepend an
13859 underscore if leading underscores are being used. */
13860 void
13861 arm_asm_output_labelref (FILE *stream, const char *name)
13863 int skip;
13864 int verbatim = 0;
13866 while ((skip = arm_get_strip_length (* name)))
13868 verbatim |= (*name == '*');
13869 name += skip;
13872 if (verbatim)
13873 fputs (name, stream);
13874 else
13875 asm_fprintf (stream, "%U%s", name);
13878 rtx aof_pic_label;
13880 #ifdef AOF_ASSEMBLER
13881 /* Special functions only needed when producing AOF syntax assembler. */
13883 struct pic_chain
13885 struct pic_chain * next;
13886 const char * symname;
13889 static struct pic_chain * aof_pic_chain = NULL;
13892 aof_pic_entry (rtx x)
13894 struct pic_chain ** chainp;
13895 int offset;
13897 if (aof_pic_label == NULL_RTX)
13899 aof_pic_label = gen_rtx_SYMBOL_REF (Pmode, "x$adcons");
13902 for (offset = 0, chainp = &aof_pic_chain; *chainp;
13903 offset += 4, chainp = &(*chainp)->next)
13904 if ((*chainp)->symname == XSTR (x, 0))
13905 return plus_constant (aof_pic_label, offset);
13907 *chainp = (struct pic_chain *) xmalloc (sizeof (struct pic_chain));
13908 (*chainp)->next = NULL;
13909 (*chainp)->symname = XSTR (x, 0);
13910 return plus_constant (aof_pic_label, offset);
13913 void
13914 aof_dump_pic_table (FILE *f)
13916 struct pic_chain * chain;
13918 if (aof_pic_chain == NULL)
13919 return;
13921 asm_fprintf (f, "\tAREA |%r$$adcons|, BASED %r\n",
13922 PIC_OFFSET_TABLE_REGNUM,
13923 PIC_OFFSET_TABLE_REGNUM);
13924 fputs ("|x$adcons|\n", f);
13926 for (chain = aof_pic_chain; chain; chain = chain->next)
13928 fputs ("\tDCD\t", f);
13929 assemble_name (f, chain->symname);
13930 fputs ("\n", f);
13934 int arm_text_section_count = 1;
13936 char *
13937 aof_text_section (void )
13939 static char buf[100];
13940 sprintf (buf, "\tAREA |C$$code%d|, CODE, READONLY",
13941 arm_text_section_count++);
13942 if (flag_pic)
13943 strcat (buf, ", PIC, REENTRANT");
13944 return buf;
13947 static int arm_data_section_count = 1;
13949 char *
13950 aof_data_section (void)
13952 static char buf[100];
13953 sprintf (buf, "\tAREA |C$$data%d|, DATA", arm_data_section_count++);
13954 return buf;
13957 /* The AOF assembler is religiously strict about declarations of
13958 imported and exported symbols, so that it is impossible to declare
13959 a function as imported near the beginning of the file, and then to
13960 export it later on. It is, however, possible to delay the decision
13961 until all the functions in the file have been compiled. To get
13962 around this, we maintain a list of the imports and exports, and
13963 delete from it any that are subsequently defined. At the end of
13964 compilation we spit the remainder of the list out before the END
13965 directive. */
13967 struct import
13969 struct import * next;
13970 const char * name;
13973 static struct import * imports_list = NULL;
13975 void
13976 aof_add_import (const char *name)
13978 struct import * new;
13980 for (new = imports_list; new; new = new->next)
13981 if (new->name == name)
13982 return;
13984 new = (struct import *) xmalloc (sizeof (struct import));
13985 new->next = imports_list;
13986 imports_list = new;
13987 new->name = name;
13990 void
13991 aof_delete_import (const char *name)
13993 struct import ** old;
13995 for (old = &imports_list; *old; old = & (*old)->next)
13997 if ((*old)->name == name)
13999 *old = (*old)->next;
14000 return;
14005 int arm_main_function = 0;
14007 static void
14008 aof_dump_imports (FILE *f)
14010 /* The AOF assembler needs this to cause the startup code to be extracted
14011 from the library. Brining in __main causes the whole thing to work
14012 automagically. */
14013 if (arm_main_function)
14015 text_section ();
14016 fputs ("\tIMPORT __main\n", f);
14017 fputs ("\tDCD __main\n", f);
14020 /* Now dump the remaining imports. */
14021 while (imports_list)
14023 fprintf (f, "\tIMPORT\t");
14024 assemble_name (f, imports_list->name);
14025 fputc ('\n', f);
14026 imports_list = imports_list->next;
14030 static void
14031 aof_globalize_label (FILE *stream, const char *name)
14033 default_globalize_label (stream, name);
14034 if (! strcmp (name, "main"))
14035 arm_main_function = 1;
14038 static void
14039 aof_file_start (void)
14041 fputs ("__r0\tRN\t0\n", asm_out_file);
14042 fputs ("__a1\tRN\t0\n", asm_out_file);
14043 fputs ("__a2\tRN\t1\n", asm_out_file);
14044 fputs ("__a3\tRN\t2\n", asm_out_file);
14045 fputs ("__a4\tRN\t3\n", asm_out_file);
14046 fputs ("__v1\tRN\t4\n", asm_out_file);
14047 fputs ("__v2\tRN\t5\n", asm_out_file);
14048 fputs ("__v3\tRN\t6\n", asm_out_file);
14049 fputs ("__v4\tRN\t7\n", asm_out_file);
14050 fputs ("__v5\tRN\t8\n", asm_out_file);
14051 fputs ("__v6\tRN\t9\n", asm_out_file);
14052 fputs ("__sl\tRN\t10\n", asm_out_file);
14053 fputs ("__fp\tRN\t11\n", asm_out_file);
14054 fputs ("__ip\tRN\t12\n", asm_out_file);
14055 fputs ("__sp\tRN\t13\n", asm_out_file);
14056 fputs ("__lr\tRN\t14\n", asm_out_file);
14057 fputs ("__pc\tRN\t15\n", asm_out_file);
14058 fputs ("__f0\tFN\t0\n", asm_out_file);
14059 fputs ("__f1\tFN\t1\n", asm_out_file);
14060 fputs ("__f2\tFN\t2\n", asm_out_file);
14061 fputs ("__f3\tFN\t3\n", asm_out_file);
14062 fputs ("__f4\tFN\t4\n", asm_out_file);
14063 fputs ("__f5\tFN\t5\n", asm_out_file);
14064 fputs ("__f6\tFN\t6\n", asm_out_file);
14065 fputs ("__f7\tFN\t7\n", asm_out_file);
14066 text_section ();
14069 static void
14070 aof_file_end (void)
14072 if (flag_pic)
14073 aof_dump_pic_table (asm_out_file);
14074 aof_dump_imports (asm_out_file);
14075 fputs ("\tEND\n", asm_out_file);
14077 #endif /* AOF_ASSEMBLER */
14079 #ifdef OBJECT_FORMAT_ELF
14080 /* Switch to an arbitrary section NAME with attributes as specified
14081 by FLAGS. ALIGN specifies any known alignment requirements for
14082 the section; 0 if the default should be used.
14084 Differs from the default elf version only in the prefix character
14085 used before the section type. */
14087 static void
14088 arm_elf_asm_named_section (const char *name, unsigned int flags)
14090 char flagchars[10], *f = flagchars;
14092 if (! named_section_first_declaration (name))
14094 fprintf (asm_out_file, "\t.section\t%s\n", name);
14095 return;
14098 if (!(flags & SECTION_DEBUG))
14099 *f++ = 'a';
14100 if (flags & SECTION_WRITE)
14101 *f++ = 'w';
14102 if (flags & SECTION_CODE)
14103 *f++ = 'x';
14104 if (flags & SECTION_SMALL)
14105 *f++ = 's';
14106 if (flags & SECTION_MERGE)
14107 *f++ = 'M';
14108 if (flags & SECTION_STRINGS)
14109 *f++ = 'S';
14110 if (flags & SECTION_TLS)
14111 *f++ = 'T';
14112 *f = '\0';
14114 fprintf (asm_out_file, "\t.section\t%s,\"%s\"", name, flagchars);
14116 if (!(flags & SECTION_NOTYPE))
14118 const char *type;
14120 if (flags & SECTION_BSS)
14121 type = "nobits";
14122 else
14123 type = "progbits";
14125 fprintf (asm_out_file, ",%%%s", type);
14127 if (flags & SECTION_ENTSIZE)
14128 fprintf (asm_out_file, ",%d", flags & SECTION_ENTSIZE);
14131 putc ('\n', asm_out_file);
14133 #endif
14135 #ifndef ARM_PE
14136 /* Symbols in the text segment can be accessed without indirecting via the
14137 constant pool; it may take an extra binary operation, but this is still
14138 faster than indirecting via memory. Don't do this when not optimizing,
14139 since we won't be calculating al of the offsets necessary to do this
14140 simplification. */
14142 static void
14143 arm_encode_section_info (tree decl, rtx rtl, int first)
14145 /* This doesn't work with AOF syntax, since the string table may be in
14146 a different AREA. */
14147 #ifndef AOF_ASSEMBLER
14148 if (optimize > 0 && TREE_CONSTANT (decl))
14149 SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
14150 #endif
14152 /* If we are referencing a function that is weak then encode a long call
14153 flag in the function name, otherwise if the function is static or
14154 or known to be defined in this file then encode a short call flag. */
14155 if (first && TREE_CODE_CLASS (TREE_CODE (decl)) == 'd')
14157 if (TREE_CODE (decl) == FUNCTION_DECL && DECL_WEAK (decl))
14158 arm_encode_call_attribute (decl, LONG_CALL_FLAG_CHAR);
14159 else if (! TREE_PUBLIC (decl))
14160 arm_encode_call_attribute (decl, SHORT_CALL_FLAG_CHAR);
14163 #endif /* !ARM_PE */
14165 static void
14166 arm_internal_label (FILE *stream, const char *prefix, unsigned long labelno)
14168 if (arm_ccfsm_state == 3 && (unsigned) arm_target_label == labelno
14169 && !strcmp (prefix, "L"))
14171 arm_ccfsm_state = 0;
14172 arm_target_insn = NULL;
14174 default_internal_label (stream, prefix, labelno);
14177 /* Output code to add DELTA to the first argument, and then jump
14178 to FUNCTION. Used for C++ multiple inheritance. */
14179 static void
14180 arm_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
14181 HOST_WIDE_INT delta,
14182 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
14183 tree function)
14185 static int thunk_label = 0;
14186 char label[256];
14187 int mi_delta = delta;
14188 const char *const mi_op = mi_delta < 0 ? "sub" : "add";
14189 int shift = 0;
14190 int this_regno = (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function)
14191 ? 1 : 0);
14192 if (mi_delta < 0)
14193 mi_delta = - mi_delta;
14194 if (TARGET_THUMB)
14196 int labelno = thunk_label++;
14197 ASM_GENERATE_INTERNAL_LABEL (label, "LTHUMBFUNC", labelno);
14198 fputs ("\tldr\tr12, ", file);
14199 assemble_name (file, label);
14200 fputc ('\n', file);
14202 while (mi_delta != 0)
14204 if ((mi_delta & (3 << shift)) == 0)
14205 shift += 2;
14206 else
14208 asm_fprintf (file, "\t%s\t%r, %r, #%d\n",
14209 mi_op, this_regno, this_regno,
14210 mi_delta & (0xff << shift));
14211 mi_delta &= ~(0xff << shift);
14212 shift += 8;
14215 if (TARGET_THUMB)
14217 fprintf (file, "\tbx\tr12\n");
14218 ASM_OUTPUT_ALIGN (file, 2);
14219 assemble_name (file, label);
14220 fputs (":\n", file);
14221 assemble_integer (XEXP (DECL_RTL (function), 0), 4, BITS_PER_WORD, 1);
14223 else
14225 fputs ("\tb\t", file);
14226 assemble_name (file, XSTR (XEXP (DECL_RTL (function), 0), 0));
14227 if (NEED_PLT_RELOC)
14228 fputs ("(PLT)", file);
14229 fputc ('\n', file);
14234 arm_emit_vector_const (FILE *file, rtx x)
14236 int i;
14237 const char * pattern;
14239 if (GET_CODE (x) != CONST_VECTOR)
14240 abort ();
14242 switch (GET_MODE (x))
14244 case V2SImode: pattern = "%08x"; break;
14245 case V4HImode: pattern = "%04x"; break;
14246 case V8QImode: pattern = "%02x"; break;
14247 default: abort ();
14250 fprintf (file, "0x");
14251 for (i = CONST_VECTOR_NUNITS (x); i--;)
14253 rtx element;
14255 element = CONST_VECTOR_ELT (x, i);
14256 fprintf (file, pattern, INTVAL (element));
14259 return 1;
14262 const char *
14263 arm_output_load_gr (rtx *operands)
14265 rtx reg;
14266 rtx offset;
14267 rtx wcgr;
14268 rtx sum;
14270 if (GET_CODE (operands [1]) != MEM
14271 || GET_CODE (sum = XEXP (operands [1], 0)) != PLUS
14272 || GET_CODE (reg = XEXP (sum, 0)) != REG
14273 || GET_CODE (offset = XEXP (sum, 1)) != CONST_INT
14274 || ((INTVAL (offset) < 1024) && (INTVAL (offset) > -1024)))
14275 return "wldrw%?\t%0, %1";
14277 /* Fix up an out-of-range load of a GR register. */
14278 output_asm_insn ("str%?\t%0, [sp, #-4]!\t@ Start of GR load expansion", & reg);
14279 wcgr = operands[0];
14280 operands[0] = reg;
14281 output_asm_insn ("ldr%?\t%0, %1", operands);
14283 operands[0] = wcgr;
14284 operands[1] = reg;
14285 output_asm_insn ("tmcr%?\t%0, %1", operands);
14286 output_asm_insn ("ldr%?\t%0, [sp], #4\t@ End of GR load expansion", & reg);
14288 return "";
14291 static rtx
14292 arm_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
14293 int incoming ATTRIBUTE_UNUSED)
14295 #if 0
14296 /* FIXME: The ARM backend has special code to handle structure
14297 returns, and will reserve its own hidden first argument. So
14298 if this macro is enabled a *second* hidden argument will be
14299 reserved, which will break binary compatibility with old
14300 toolchains and also thunk handling. One day this should be
14301 fixed. */
14302 return 0;
14303 #else
14304 /* Register in which address to store a structure value
14305 is passed to a function. */
14306 return gen_rtx_REG (Pmode, ARG_REGISTER (1));
14307 #endif
14310 /* Worker function for TARGET_SETUP_INCOMING_VARARGS.
14312 On the ARM, PRETEND_SIZE is set in order to have the prologue push the last
14313 named arg and all anonymous args onto the stack.
14314 XXX I know the prologue shouldn't be pushing registers, but it is faster
14315 that way. */
14317 static void
14318 arm_setup_incoming_varargs (CUMULATIVE_ARGS *cum,
14319 enum machine_mode mode ATTRIBUTE_UNUSED,
14320 tree type ATTRIBUTE_UNUSED,
14321 int *pretend_size,
14322 int second_time ATTRIBUTE_UNUSED)
14324 cfun->machine->uses_anonymous_args = 1;
14325 if (cum->nregs < NUM_ARG_REGS)
14326 *pretend_size = (NUM_ARG_REGS - cum->nregs) * UNITS_PER_WORD;
14329 /* Return nonzero if the CONSUMER instruction (a store) does not need
14330 PRODUCER's value to calculate the address. */
14333 arm_no_early_store_addr_dep (rtx producer, rtx consumer)
14335 rtx value = PATTERN (producer);
14336 rtx addr = PATTERN (consumer);
14338 if (GET_CODE (value) == COND_EXEC)
14339 value = COND_EXEC_CODE (value);
14340 if (GET_CODE (value) == PARALLEL)
14341 value = XVECEXP (value, 0, 0);
14342 value = XEXP (value, 0);
14343 if (GET_CODE (addr) == COND_EXEC)
14344 addr = COND_EXEC_CODE (addr);
14345 if (GET_CODE (addr) == PARALLEL)
14346 addr = XVECEXP (addr, 0, 0);
14347 addr = XEXP (addr, 0);
14349 return !reg_overlap_mentioned_p (value, addr);
14352 /* Return nonzero if the CONSUMER instruction (an ALU op) does not
14353 have an early register shift value or amount dependency on the
14354 result of PRODUCER. */
14357 arm_no_early_alu_shift_dep (rtx producer, rtx consumer)
14359 rtx value = PATTERN (producer);
14360 rtx op = PATTERN (consumer);
14361 rtx early_op;
14363 if (GET_CODE (value) == COND_EXEC)
14364 value = COND_EXEC_CODE (value);
14365 if (GET_CODE (value) == PARALLEL)
14366 value = XVECEXP (value, 0, 0);
14367 value = XEXP (value, 0);
14368 if (GET_CODE (op) == COND_EXEC)
14369 op = COND_EXEC_CODE (op);
14370 if (GET_CODE (op) == PARALLEL)
14371 op = XVECEXP (op, 0, 0);
14372 op = XEXP (op, 1);
14374 early_op = XEXP (op, 0);
14375 /* This is either an actual independent shift, or a shift applied to
14376 the first operand of another operation. We want the whole shift
14377 operation. */
14378 if (GET_CODE (early_op) == REG)
14379 early_op = op;
14381 return !reg_overlap_mentioned_p (value, early_op);
14384 /* Return nonzero if the CONSUMER instruction (an ALU op) does not
14385 have an early register shift value dependency on the result of
14386 PRODUCER. */
14389 arm_no_early_alu_shift_value_dep (rtx producer, rtx consumer)
14391 rtx value = PATTERN (producer);
14392 rtx op = PATTERN (consumer);
14393 rtx early_op;
14395 if (GET_CODE (value) == COND_EXEC)
14396 value = COND_EXEC_CODE (value);
14397 if (GET_CODE (value) == PARALLEL)
14398 value = XVECEXP (value, 0, 0);
14399 value = XEXP (value, 0);
14400 if (GET_CODE (op) == COND_EXEC)
14401 op = COND_EXEC_CODE (op);
14402 if (GET_CODE (op) == PARALLEL)
14403 op = XVECEXP (op, 0, 0);
14404 op = XEXP (op, 1);
14406 early_op = XEXP (op, 0);
14408 /* This is either an actual independent shift, or a shift applied to
14409 the first operand of another operation. We want the value being
14410 shifted, in either case. */
14411 if (GET_CODE (early_op) != REG)
14412 early_op = XEXP (early_op, 0);
14414 return !reg_overlap_mentioned_p (value, early_op);
14417 /* Return nonzero if the CONSUMER (a mul or mac op) does not
14418 have an early register mult dependency on the result of
14419 PRODUCER. */
14422 arm_no_early_mul_dep (rtx producer, rtx consumer)
14424 rtx value = PATTERN (producer);
14425 rtx op = PATTERN (consumer);
14427 if (GET_CODE (value) == COND_EXEC)
14428 value = COND_EXEC_CODE (value);
14429 if (GET_CODE (value) == PARALLEL)
14430 value = XVECEXP (value, 0, 0);
14431 value = XEXP (value, 0);
14432 if (GET_CODE (op) == COND_EXEC)
14433 op = COND_EXEC_CODE (op);
14434 if (GET_CODE (op) == PARALLEL)
14435 op = XVECEXP (op, 0, 0);
14436 op = XEXP (op, 1);
14438 return (GET_CODE (op) == PLUS
14439 && !reg_overlap_mentioned_p (value, XEXP (op, 0)));