* arm.c (thumb_legitimize_address): New function.
[official-gcc.git] / gcc / config / arm / arm.c
blob0a61e1bc654c9df25af3b8eb14cadb922d01b0ae
1 /* Output routines for GCC for ARM.
2 Copyright (C) 1991, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004 Free Software Foundation, Inc.
4 Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
5 and Martin Simmons (@harleqn.co.uk).
6 More major hacks by Richard Earnshaw (rearnsha@arm.com).
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it
11 under the terms of the GNU General Public License as published
12 by the Free Software Foundation; either version 2, or (at your
13 option) any later version.
15 GCC is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
17 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
18 License for more details.
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING. If not, write to
22 the Free Software Foundation, 59 Temple Place - Suite 330,
23 Boston, MA 02111-1307, USA. */
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "tm.h"
29 #include "rtl.h"
30 #include "tree.h"
31 #include "obstack.h"
32 #include "regs.h"
33 #include "hard-reg-set.h"
34 #include "real.h"
35 #include "insn-config.h"
36 #include "conditions.h"
37 #include "output.h"
38 #include "insn-attr.h"
39 #include "flags.h"
40 #include "reload.h"
41 #include "function.h"
42 #include "expr.h"
43 #include "optabs.h"
44 #include "toplev.h"
45 #include "recog.h"
46 #include "ggc.h"
47 #include "except.h"
48 #include "c-pragma.h"
49 #include "integrate.h"
50 #include "tm_p.h"
51 #include "target.h"
52 #include "target-def.h"
53 #include "debug.h"
55 /* Forward definitions of types. */
56 typedef struct minipool_node Mnode;
57 typedef struct minipool_fixup Mfix;
59 const struct attribute_spec arm_attribute_table[];
61 /* Forward function declarations. */
62 static void arm_add_gc_roots (void);
63 static int arm_gen_constant (enum rtx_code, enum machine_mode, HOST_WIDE_INT,
64 rtx, rtx, int, int);
65 static unsigned bit_count (unsigned long);
66 static int arm_address_register_rtx_p (rtx, int);
67 static int arm_legitimate_index_p (enum machine_mode, rtx, int);
68 static int thumb_base_register_rtx_p (rtx, enum machine_mode, int);
69 inline static int thumb_index_register_rtx_p (rtx, int);
70 static int const_ok_for_op (HOST_WIDE_INT, enum rtx_code);
71 static rtx emit_multi_reg_push (int);
72 static rtx emit_sfm (int, int);
73 #ifndef AOF_ASSEMBLER
74 static bool arm_assemble_integer (rtx, unsigned int, int);
75 #endif
76 static const char *fp_const_from_val (REAL_VALUE_TYPE *);
77 static arm_cc get_arm_condition_code (rtx);
78 static HOST_WIDE_INT int_log2 (HOST_WIDE_INT);
79 static rtx is_jump_table (rtx);
80 static const char *output_multi_immediate (rtx *, const char *, const char *,
81 int, HOST_WIDE_INT);
82 static void print_multi_reg (FILE *, const char *, int, int);
83 static const char *shift_op (rtx, HOST_WIDE_INT *);
84 static struct machine_function *arm_init_machine_status (void);
85 static int number_of_first_bit_set (int);
86 static void replace_symbols_in_block (tree, rtx, rtx);
87 static void thumb_exit (FILE *, int, rtx);
88 static void thumb_pushpop (FILE *, int, int, int *, int);
89 static rtx is_jump_table (rtx);
90 static HOST_WIDE_INT get_jump_table_size (rtx);
91 static Mnode *move_minipool_fix_forward_ref (Mnode *, Mnode *, HOST_WIDE_INT);
92 static Mnode *add_minipool_forward_ref (Mfix *);
93 static Mnode *move_minipool_fix_backward_ref (Mnode *, Mnode *, HOST_WIDE_INT);
94 static Mnode *add_minipool_backward_ref (Mfix *);
95 static void assign_minipool_offsets (Mfix *);
96 static void arm_print_value (FILE *, rtx);
97 static void dump_minipool (rtx);
98 static int arm_barrier_cost (rtx);
99 static Mfix *create_fix_barrier (Mfix *, HOST_WIDE_INT);
100 static void push_minipool_barrier (rtx, HOST_WIDE_INT);
101 static void push_minipool_fix (rtx, HOST_WIDE_INT, rtx *, enum machine_mode,
102 rtx);
103 static void arm_reorg (void);
104 static bool note_invalid_constants (rtx, HOST_WIDE_INT, int);
105 static int current_file_function_operand (rtx);
106 static unsigned long arm_compute_save_reg0_reg12_mask (void);
107 static unsigned long arm_compute_save_reg_mask (void);
108 static unsigned long arm_isr_value (tree);
109 static unsigned long arm_compute_func_type (void);
110 static tree arm_handle_fndecl_attribute (tree *, tree, tree, int, bool *);
111 static tree arm_handle_isr_attribute (tree *, tree, tree, int, bool *);
112 static void arm_output_function_epilogue (FILE *, HOST_WIDE_INT);
113 static void arm_output_function_prologue (FILE *, HOST_WIDE_INT);
114 static void thumb_output_function_prologue (FILE *, HOST_WIDE_INT);
115 static int arm_comp_type_attributes (tree, tree);
116 static void arm_set_default_type_attributes (tree);
117 static int arm_adjust_cost (rtx, rtx, rtx, int);
118 static int arm_use_dfa_pipeline_interface (void);
119 static int count_insns_for_constant (HOST_WIDE_INT, int);
120 static int arm_get_strip_length (int);
121 static bool arm_function_ok_for_sibcall (tree, tree);
122 static void arm_internal_label (FILE *, const char *, unsigned long);
123 static void arm_output_mi_thunk (FILE *, tree, HOST_WIDE_INT, HOST_WIDE_INT,
124 tree);
125 static int arm_rtx_costs_1 (rtx, enum rtx_code, enum rtx_code);
126 static bool arm_slowmul_rtx_costs (rtx, int, int, int *);
127 static bool arm_fastmul_rtx_costs (rtx, int, int, int *);
128 static bool arm_xscale_rtx_costs (rtx, int, int, int *);
129 static bool arm_9e_rtx_costs (rtx, int, int, int *);
130 static int arm_address_cost (rtx);
131 static bool arm_memory_load_p (rtx);
132 static bool arm_cirrus_insn_p (rtx);
133 static void cirrus_reorg (rtx);
134 static void arm_init_builtins (void);
135 static rtx arm_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
136 static void arm_init_iwmmxt_builtins (void);
137 static rtx safe_vector_operand (rtx, enum machine_mode);
138 static rtx arm_expand_binop_builtin (enum insn_code, tree, rtx);
139 static rtx arm_expand_unop_builtin (enum insn_code, tree, rtx, int);
140 static rtx arm_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
142 #ifdef OBJECT_FORMAT_ELF
143 static void arm_elf_asm_named_section (const char *, unsigned int);
144 #endif
145 #ifndef ARM_PE
146 static void arm_encode_section_info (tree, rtx, int);
147 #endif
148 #ifdef AOF_ASSEMBLER
149 static void aof_globalize_label (FILE *, const char *);
150 static void aof_dump_imports (FILE *);
151 static void aof_dump_pic_table (FILE *);
152 static void aof_file_start (void);
153 static void aof_file_end (void);
154 #endif
155 static rtx arm_struct_value_rtx (tree, int);
156 static void arm_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
157 tree, int *, int);
160 /* Initialize the GCC target structure. */
161 #ifdef TARGET_DLLIMPORT_DECL_ATTRIBUTES
162 #undef TARGET_MERGE_DECL_ATTRIBUTES
163 #define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
164 #endif
166 #undef TARGET_ATTRIBUTE_TABLE
167 #define TARGET_ATTRIBUTE_TABLE arm_attribute_table
169 #ifdef AOF_ASSEMBLER
170 #undef TARGET_ASM_BYTE_OP
171 #define TARGET_ASM_BYTE_OP "\tDCB\t"
172 #undef TARGET_ASM_ALIGNED_HI_OP
173 #define TARGET_ASM_ALIGNED_HI_OP "\tDCW\t"
174 #undef TARGET_ASM_ALIGNED_SI_OP
175 #define TARGET_ASM_ALIGNED_SI_OP "\tDCD\t"
176 #undef TARGET_ASM_GLOBALIZE_LABEL
177 #define TARGET_ASM_GLOBALIZE_LABEL aof_globalize_label
178 #undef TARGET_ASM_FILE_START
179 #define TARGET_ASM_FILE_START aof_file_start
180 #undef TARGET_ASM_FILE_END
181 #define TARGET_ASM_FILE_END aof_file_end
182 #else
183 #undef TARGET_ASM_ALIGNED_SI_OP
184 #define TARGET_ASM_ALIGNED_SI_OP NULL
185 #undef TARGET_ASM_INTEGER
186 #define TARGET_ASM_INTEGER arm_assemble_integer
187 #endif
189 #undef TARGET_ASM_FUNCTION_PROLOGUE
190 #define TARGET_ASM_FUNCTION_PROLOGUE arm_output_function_prologue
192 #undef TARGET_ASM_FUNCTION_EPILOGUE
193 #define TARGET_ASM_FUNCTION_EPILOGUE arm_output_function_epilogue
195 #undef TARGET_COMP_TYPE_ATTRIBUTES
196 #define TARGET_COMP_TYPE_ATTRIBUTES arm_comp_type_attributes
198 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
199 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES arm_set_default_type_attributes
201 #undef TARGET_SCHED_ADJUST_COST
202 #define TARGET_SCHED_ADJUST_COST arm_adjust_cost
204 #undef TARGET_SCHED_USE_DFA_PIPELINE_INTERFACE
205 #define TARGET_SCHED_USE_DFA_PIPELINE_INTERFACE arm_use_dfa_pipeline_interface
207 #undef TARGET_ENCODE_SECTION_INFO
208 #ifdef ARM_PE
209 #define TARGET_ENCODE_SECTION_INFO arm_pe_encode_section_info
210 #else
211 #define TARGET_ENCODE_SECTION_INFO arm_encode_section_info
212 #endif
214 #undef TARGET_STRIP_NAME_ENCODING
215 #define TARGET_STRIP_NAME_ENCODING arm_strip_name_encoding
217 #undef TARGET_ASM_INTERNAL_LABEL
218 #define TARGET_ASM_INTERNAL_LABEL arm_internal_label
220 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
221 #define TARGET_FUNCTION_OK_FOR_SIBCALL arm_function_ok_for_sibcall
223 #undef TARGET_ASM_OUTPUT_MI_THUNK
224 #define TARGET_ASM_OUTPUT_MI_THUNK arm_output_mi_thunk
225 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
226 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
228 /* This will be overridden in arm_override_options. */
229 #undef TARGET_RTX_COSTS
230 #define TARGET_RTX_COSTS arm_slowmul_rtx_costs
231 #undef TARGET_ADDRESS_COST
232 #define TARGET_ADDRESS_COST arm_address_cost
234 #undef TARGET_MACHINE_DEPENDENT_REORG
235 #define TARGET_MACHINE_DEPENDENT_REORG arm_reorg
237 #undef TARGET_INIT_BUILTINS
238 #define TARGET_INIT_BUILTINS arm_init_builtins
239 #undef TARGET_EXPAND_BUILTIN
240 #define TARGET_EXPAND_BUILTIN arm_expand_builtin
242 #undef TARGET_PROMOTE_FUNCTION_ARGS
243 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
244 #undef TARGET_PROMOTE_PROTOTYPES
245 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
247 #undef TARGET_STRUCT_VALUE_RTX
248 #define TARGET_STRUCT_VALUE_RTX arm_struct_value_rtx
250 #undef TARGET_SETUP_INCOMING_VARARGS
251 #define TARGET_SETUP_INCOMING_VARARGS arm_setup_incoming_varargs
253 struct gcc_target targetm = TARGET_INITIALIZER;
255 /* Obstack for minipool constant handling. */
256 static struct obstack minipool_obstack;
257 static char * minipool_startobj;
259 /* The maximum number of insns skipped which
260 will be conditionalised if possible. */
261 static int max_insns_skipped = 5;
263 extern FILE * asm_out_file;
265 /* True if we are currently building a constant table. */
266 int making_const_table;
268 /* Define the information needed to generate branch insns. This is
269 stored from the compare operation. */
270 rtx arm_compare_op0, arm_compare_op1;
272 /* The processor for which instructions should be scheduled. */
273 enum processor_type arm_tune = arm_none;
275 /* Which floating point model to use. */
276 enum arm_fp_model arm_fp_model;
278 /* Which floating point hardware is available. */
279 enum fputype arm_fpu_arch;
281 /* Which floating point hardware to schedule for. */
282 enum fputype arm_fpu_tune;
284 /* Whether to use floating point hardware. */
285 enum float_abi_type arm_float_abi;
287 /* What program mode is the cpu running in? 26-bit mode or 32-bit mode. */
288 enum prog_mode_type arm_prgmode;
290 /* Set by the -mfpu=... option. */
291 const char * target_fpu_name = NULL;
293 /* Set by the -mfpe=... option. */
294 const char * target_fpe_name = NULL;
296 /* Set by the -mfloat-abi=... option. */
297 const char * target_float_abi_name = NULL;
299 /* Used to parse -mstructure_size_boundary command line option. */
300 const char * structure_size_string = NULL;
301 int arm_structure_size_boundary = DEFAULT_STRUCTURE_SIZE_BOUNDARY;
303 /* Bit values used to identify processor capabilities. */
304 #define FL_CO_PROC (1 << 0) /* Has external co-processor bus */
305 #define FL_ARCH3M (1 << 1) /* Extended multiply */
306 #define FL_MODE26 (1 << 2) /* 26-bit mode support */
307 #define FL_MODE32 (1 << 3) /* 32-bit mode support */
308 #define FL_ARCH4 (1 << 4) /* Architecture rel 4 */
309 #define FL_ARCH5 (1 << 5) /* Architecture rel 5 */
310 #define FL_THUMB (1 << 6) /* Thumb aware */
311 #define FL_LDSCHED (1 << 7) /* Load scheduling necessary */
312 #define FL_STRONG (1 << 8) /* StrongARM */
313 #define FL_ARCH5E (1 << 9) /* DSP extensions to v5 */
314 #define FL_XSCALE (1 << 10) /* XScale */
315 #define FL_CIRRUS (1 << 11) /* Cirrus/DSP. */
316 #define FL_ARCH6 (1 << 12) /* Architecture rel 6. Adds
317 media instructions. */
318 #define FL_VFPV2 (1 << 13) /* Vector Floating Point V2. */
320 #define FL_IWMMXT (1 << 29) /* XScale v2 or "Intel Wireless MMX technology". */
322 /* The bits in this mask specify which
323 instructions we are allowed to generate. */
324 static unsigned long insn_flags = 0;
326 /* The bits in this mask specify which instruction scheduling options should
327 be used. */
328 static unsigned long tune_flags = 0;
330 /* The following are used in the arm.md file as equivalents to bits
331 in the above two flag variables. */
333 /* Nonzero if this chip supports the ARM Architecture 3M extensions. */
334 int arm_arch3m = 0;
336 /* Nonzero if this chip supports the ARM Architecture 4 extensions. */
337 int arm_arch4 = 0;
339 /* Nonzero if this chip supports the ARM Architecture 5 extensions. */
340 int arm_arch5 = 0;
342 /* Nonzero if this chip supports the ARM Architecture 5E extensions. */
343 int arm_arch5e = 0;
345 /* Nonzero if this chip supports the ARM Architecture 6 extensions. */
346 int arm_arch6 = 0;
348 /* Nonzero if this chip can benefit from load scheduling. */
349 int arm_ld_sched = 0;
351 /* Nonzero if this chip is a StrongARM. */
352 int arm_is_strong = 0;
354 /* Nonzero if this chip supports Intel Wireless MMX technology. */
355 int arm_arch_iwmmxt = 0;
357 /* Nonzero if this chip is an XScale. */
358 int arm_arch_xscale = 0;
360 /* Nonzero if tuning for XScale */
361 int arm_tune_xscale = 0;
363 /* Nonzero if this chip is an ARM6 or an ARM7. */
364 int arm_is_6_or_7 = 0;
366 /* Nonzero if generating Thumb instructions. */
367 int thumb_code = 0;
369 /* In case of a PRE_INC, POST_INC, PRE_DEC, POST_DEC memory reference, we
370 must report the mode of the memory reference from PRINT_OPERAND to
371 PRINT_OPERAND_ADDRESS. */
372 enum machine_mode output_memory_reference_mode;
374 /* The register number to be used for the PIC offset register. */
375 const char * arm_pic_register_string = NULL;
376 int arm_pic_register = INVALID_REGNUM;
378 /* Set to 1 when a return insn is output, this means that the epilogue
379 is not needed. */
380 int return_used_this_function;
382 /* Set to 1 after arm_reorg has started. Reset to start at the start of
383 the next function. */
384 static int after_arm_reorg = 0;
386 /* The maximum number of insns to be used when loading a constant. */
387 static int arm_constant_limit = 3;
389 /* For an explanation of these variables, see final_prescan_insn below. */
390 int arm_ccfsm_state;
391 enum arm_cond_code arm_current_cc;
392 rtx arm_target_insn;
393 int arm_target_label;
395 /* The condition codes of the ARM, and the inverse function. */
396 static const char * const arm_condition_codes[] =
398 "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc",
399 "hi", "ls", "ge", "lt", "gt", "le", "al", "nv"
402 #define streq(string1, string2) (strcmp (string1, string2) == 0)
404 /* Initialization code. */
406 struct processors
408 const char *const name;
409 enum processor_type core;
410 const unsigned long flags;
411 bool (* rtx_costs) (rtx, int, int, int *);
414 /* Not all of these give usefully different compilation alternatives,
415 but there is no simple way of generalizing them. */
416 static const struct processors all_cores[] =
418 /* ARM Cores */
419 #define ARM_CORE(NAME, FLAGS, COSTS) \
420 {#NAME, arm_none, FLAGS, arm_##COSTS##_rtx_costs},
421 #include "arm-cores.def"
422 #undef ARM_CORE
423 {NULL, arm_none, 0, NULL}
426 static const struct processors all_architectures[] =
428 /* ARM Architectures */
429 /* We don't specify rtx_costs here as it will be figured out
430 from the core. */
432 { "armv2", arm2, FL_CO_PROC | FL_MODE26 , NULL},
433 { "armv2a", arm2, FL_CO_PROC | FL_MODE26 , NULL},
434 { "armv3", arm6, FL_CO_PROC | FL_MODE26 | FL_MODE32 , NULL},
435 { "armv3m", arm7m, FL_CO_PROC | FL_MODE26 | FL_MODE32 | FL_ARCH3M , NULL},
436 { "armv4", arm7tdmi, FL_CO_PROC | FL_MODE26 | FL_MODE32 | FL_ARCH3M | FL_ARCH4 , NULL},
437 /* Strictly, FL_MODE26 is a permitted option for v4t, but there are no
438 implementations that support it, so we will leave it out for now. */
439 { "armv4t", arm7tdmi, FL_CO_PROC | FL_MODE32 | FL_ARCH3M | FL_ARCH4 | FL_THUMB , NULL},
440 { "armv5", arm10tdmi, FL_CO_PROC | FL_MODE32 | FL_ARCH3M | FL_ARCH4 | FL_THUMB | FL_ARCH5 , NULL},
441 { "armv5t", arm10tdmi, FL_CO_PROC | FL_MODE32 | FL_ARCH3M | FL_ARCH4 | FL_THUMB | FL_ARCH5 , NULL},
442 { "armv5te", arm1026ejs, FL_CO_PROC | FL_MODE32 | FL_ARCH3M | FL_ARCH4 | FL_THUMB | FL_ARCH5 | FL_ARCH5E , NULL},
443 { "armv6", arm1136js, FL_CO_PROC | FL_MODE32 | FL_ARCH3M | FL_ARCH4 | FL_THUMB | FL_ARCH5 | FL_ARCH5E | FL_ARCH6 , NULL},
444 { "armv6j", arm1136js, FL_CO_PROC | FL_MODE32 | FL_ARCH3M | FL_ARCH4 | FL_THUMB | FL_ARCH5 | FL_ARCH5E | FL_ARCH6 , NULL},
445 { "ep9312", ep9312, FL_MODE32 | FL_ARCH3M | FL_ARCH4 | FL_LDSCHED | FL_CIRRUS , NULL},
446 {"iwmmxt", iwmmxt, FL_MODE32 | FL_ARCH3M | FL_ARCH4 | FL_THUMB | FL_LDSCHED | FL_STRONG | FL_ARCH5 | FL_ARCH5E | FL_XSCALE | FL_IWMMXT , NULL},
447 { NULL, arm_none, 0 , NULL}
450 /* This is a magic structure. The 'string' field is magically filled in
451 with a pointer to the value specified by the user on the command line
452 assuming that the user has specified such a value. */
454 struct arm_cpu_select arm_select[] =
456 /* string name processors */
457 { NULL, "-mcpu=", all_cores },
458 { NULL, "-march=", all_architectures },
459 { NULL, "-mtune=", all_cores }
462 struct fpu_desc
464 const char * name;
465 enum fputype fpu;
469 /* Available values for for -mfpu=. */
471 static const struct fpu_desc all_fpus[] =
473 {"fpa", FPUTYPE_FPA},
474 {"fpe2", FPUTYPE_FPA_EMU2},
475 {"fpe3", FPUTYPE_FPA_EMU2},
476 {"maverick", FPUTYPE_MAVERICK},
477 {"vfp", FPUTYPE_VFP}
481 /* Floating point models used by the different hardware.
482 See fputype in arm.h. */
484 static const enum fputype fp_model_for_fpu[] =
486 /* No FP hardware. */
487 ARM_FP_MODEL_UNKNOWN, /* FPUTYPE_NONE */
488 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA */
489 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA_EMU2 */
490 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA_EMU3 */
491 ARM_FP_MODEL_MAVERICK, /* FPUTYPE_MAVERICK */
492 ARM_FP_MODEL_VFP /* FPUTYPE_VFP */
496 struct float_abi
498 const char * name;
499 enum float_abi_type abi_type;
503 /* Available values for -mfloat-abi=. */
505 static const struct float_abi all_float_abis[] =
507 {"soft", ARM_FLOAT_ABI_SOFT},
508 {"softfp", ARM_FLOAT_ABI_SOFTFP},
509 {"hard", ARM_FLOAT_ABI_HARD}
513 /* Return the number of bits set in VALUE. */
514 static unsigned
515 bit_count (unsigned long value)
517 unsigned long count = 0;
519 while (value)
521 count++;
522 value &= value - 1; /* Clear the least-significant set bit. */
525 return count;
528 /* Fix up any incompatible options that the user has specified.
529 This has now turned into a maze. */
530 void
531 arm_override_options (void)
533 unsigned i;
535 /* Set up the flags based on the cpu/architecture selected by the user. */
536 for (i = ARRAY_SIZE (arm_select); i--;)
538 struct arm_cpu_select * ptr = arm_select + i;
540 if (ptr->string != NULL && ptr->string[0] != '\0')
542 const struct processors * sel;
544 for (sel = ptr->processors; sel->name != NULL; sel++)
545 if (streq (ptr->string, sel->name))
547 /* Determine the processor core for which we should
548 tune code-generation. */
549 if (/* -mcpu= is a sensible default. */
550 i == 0
551 /* If -march= is used, and -mcpu= has not been used,
552 assume that we should tune for a representative
553 CPU from that architecture. */
554 || i == 1
555 /* -mtune= overrides -mcpu= and -march=. */
556 || i == 2)
557 arm_tune = (enum processor_type) (sel - ptr->processors);
559 if (i != 2)
561 /* If we have been given an architecture and a processor
562 make sure that they are compatible. We only generate
563 a warning though, and we prefer the CPU over the
564 architecture. */
565 if (insn_flags != 0 && (insn_flags ^ sel->flags))
566 warning ("switch -mcpu=%s conflicts with -march= switch",
567 ptr->string);
569 insn_flags = sel->flags;
572 break;
575 if (sel->name == NULL)
576 error ("bad value (%s) for %s switch", ptr->string, ptr->name);
580 /* If the user did not specify a processor, choose one for them. */
581 if (insn_flags == 0)
583 const struct processors * sel;
584 unsigned int sought;
585 static const struct cpu_default
587 const int cpu;
588 const char *const name;
590 cpu_defaults[] =
592 { TARGET_CPU_arm2, "arm2" },
593 { TARGET_CPU_arm6, "arm6" },
594 { TARGET_CPU_arm610, "arm610" },
595 { TARGET_CPU_arm710, "arm710" },
596 { TARGET_CPU_arm7m, "arm7m" },
597 { TARGET_CPU_arm7500fe, "arm7500fe" },
598 { TARGET_CPU_arm7tdmi, "arm7tdmi" },
599 { TARGET_CPU_arm8, "arm8" },
600 { TARGET_CPU_arm810, "arm810" },
601 { TARGET_CPU_arm9, "arm9" },
602 { TARGET_CPU_strongarm, "strongarm" },
603 { TARGET_CPU_xscale, "xscale" },
604 { TARGET_CPU_ep9312, "ep9312" },
605 { TARGET_CPU_iwmmxt, "iwmmxt" },
606 { TARGET_CPU_arm926ejs, "arm926ejs" },
607 { TARGET_CPU_arm1026ejs, "arm1026ejs" },
608 { TARGET_CPU_arm1136js, "arm1136js" },
609 { TARGET_CPU_arm1136jfs, "arm1136jfs" },
610 { TARGET_CPU_generic, "arm" },
611 { 0, 0 }
613 const struct cpu_default * def;
615 /* Find the default. */
616 for (def = cpu_defaults; def->name; def++)
617 if (def->cpu == TARGET_CPU_DEFAULT)
618 break;
620 /* Make sure we found the default CPU. */
621 if (def->name == NULL)
622 abort ();
624 /* Find the default CPU's flags. */
625 for (sel = all_cores; sel->name != NULL; sel++)
626 if (streq (def->name, sel->name))
627 break;
629 if (sel->name == NULL)
630 abort ();
632 insn_flags = sel->flags;
634 /* Now check to see if the user has specified some command line
635 switch that require certain abilities from the cpu. */
636 sought = 0;
638 if (TARGET_INTERWORK || TARGET_THUMB)
640 sought |= (FL_THUMB | FL_MODE32);
642 /* Force apcs-32 to be used for interworking. */
643 target_flags |= ARM_FLAG_APCS_32;
645 /* There are no ARM processors that support both APCS-26 and
646 interworking. Therefore we force FL_MODE26 to be removed
647 from insn_flags here (if it was set), so that the search
648 below will always be able to find a compatible processor. */
649 insn_flags &= ~FL_MODE26;
651 else if (!TARGET_APCS_32)
652 sought |= FL_MODE26;
654 if (sought != 0 && ((sought & insn_flags) != sought))
656 /* Try to locate a CPU type that supports all of the abilities
657 of the default CPU, plus the extra abilities requested by
658 the user. */
659 for (sel = all_cores; sel->name != NULL; sel++)
660 if ((sel->flags & sought) == (sought | insn_flags))
661 break;
663 if (sel->name == NULL)
665 unsigned current_bit_count = 0;
666 const struct processors * best_fit = NULL;
668 /* Ideally we would like to issue an error message here
669 saying that it was not possible to find a CPU compatible
670 with the default CPU, but which also supports the command
671 line options specified by the programmer, and so they
672 ought to use the -mcpu=<name> command line option to
673 override the default CPU type.
675 Unfortunately this does not work with multilibing. We
676 need to be able to support multilibs for -mapcs-26 and for
677 -mthumb-interwork and there is no CPU that can support both
678 options. Instead if we cannot find a cpu that has both the
679 characteristics of the default cpu and the given command line
680 options we scan the array again looking for a best match. */
681 for (sel = all_cores; sel->name != NULL; sel++)
682 if ((sel->flags & sought) == sought)
684 unsigned count;
686 count = bit_count (sel->flags & insn_flags);
688 if (count >= current_bit_count)
690 best_fit = sel;
691 current_bit_count = count;
695 if (best_fit == NULL)
696 abort ();
697 else
698 sel = best_fit;
701 insn_flags = sel->flags;
703 if (arm_tune == arm_none)
704 arm_tune = (enum processor_type) (sel - all_cores);
707 /* The processor for which we should tune should now have been
708 chosen. */
709 if (arm_tune == arm_none)
710 abort ();
712 tune_flags = all_cores[(int)arm_tune].flags;
713 targetm.rtx_costs = all_cores[(int)arm_tune].rtx_costs;
715 /* Make sure that the processor choice does not conflict with any of the
716 other command line choices. */
717 if (TARGET_APCS_32 && !(insn_flags & FL_MODE32))
719 /* If APCS-32 was not the default then it must have been set by the
720 user, so issue a warning message. If the user has specified
721 "-mapcs-32 -mcpu=arm2" then we loose here. */
722 if ((TARGET_DEFAULT & ARM_FLAG_APCS_32) == 0)
723 warning ("target CPU does not support APCS-32" );
724 target_flags &= ~ARM_FLAG_APCS_32;
726 else if (!TARGET_APCS_32 && !(insn_flags & FL_MODE26))
728 warning ("target CPU does not support APCS-26" );
729 target_flags |= ARM_FLAG_APCS_32;
732 if (TARGET_INTERWORK && !(insn_flags & FL_THUMB))
734 warning ("target CPU does not support interworking" );
735 target_flags &= ~ARM_FLAG_INTERWORK;
738 if (TARGET_THUMB && !(insn_flags & FL_THUMB))
740 warning ("target CPU does not support THUMB instructions");
741 target_flags &= ~ARM_FLAG_THUMB;
744 if (TARGET_APCS_FRAME && TARGET_THUMB)
746 /* warning ("ignoring -mapcs-frame because -mthumb was used"); */
747 target_flags &= ~ARM_FLAG_APCS_FRAME;
750 /* TARGET_BACKTRACE calls leaf_function_p, which causes a crash if done
751 from here where no function is being compiled currently. */
752 if ((target_flags & (THUMB_FLAG_LEAF_BACKTRACE | THUMB_FLAG_BACKTRACE))
753 && TARGET_ARM)
754 warning ("enabling backtrace support is only meaningful when compiling for the Thumb");
756 if (TARGET_ARM && TARGET_CALLEE_INTERWORKING)
757 warning ("enabling callee interworking support is only meaningful when compiling for the Thumb");
759 if (TARGET_ARM && TARGET_CALLER_INTERWORKING)
760 warning ("enabling caller interworking support is only meaningful when compiling for the Thumb");
762 /* If interworking is enabled then APCS-32 must be selected as well. */
763 if (TARGET_INTERWORK)
765 if (!TARGET_APCS_32)
766 warning ("interworking forces APCS-32 to be used" );
767 target_flags |= ARM_FLAG_APCS_32;
770 if (TARGET_APCS_STACK && !TARGET_APCS_FRAME)
772 warning ("-mapcs-stack-check incompatible with -mno-apcs-frame");
773 target_flags |= ARM_FLAG_APCS_FRAME;
776 if (TARGET_POKE_FUNCTION_NAME)
777 target_flags |= ARM_FLAG_APCS_FRAME;
779 if (TARGET_APCS_REENT && flag_pic)
780 error ("-fpic and -mapcs-reent are incompatible");
782 if (TARGET_APCS_REENT)
783 warning ("APCS reentrant code not supported. Ignored");
785 /* If this target is normally configured to use APCS frames, warn if they
786 are turned off and debugging is turned on. */
787 if (TARGET_ARM
788 && write_symbols != NO_DEBUG
789 && !TARGET_APCS_FRAME
790 && (TARGET_DEFAULT & ARM_FLAG_APCS_FRAME))
791 warning ("-g with -mno-apcs-frame may not give sensible debugging");
793 /* If stack checking is disabled, we can use r10 as the PIC register,
794 which keeps r9 available. */
795 if (flag_pic)
796 arm_pic_register = TARGET_APCS_STACK ? 9 : 10;
798 if (TARGET_APCS_FLOAT)
799 warning ("passing floating point arguments in fp regs not yet supported");
801 /* Initialize boolean versions of the flags, for use in the arm.md file. */
802 arm_arch3m = (insn_flags & FL_ARCH3M) != 0;
803 arm_arch4 = (insn_flags & FL_ARCH4) != 0;
804 arm_arch5 = (insn_flags & FL_ARCH5) != 0;
805 arm_arch5e = (insn_flags & FL_ARCH5E) != 0;
806 arm_arch6 = (insn_flags & FL_ARCH6) != 0;
807 arm_arch_xscale = (insn_flags & FL_XSCALE) != 0;
809 arm_ld_sched = (tune_flags & FL_LDSCHED) != 0;
810 arm_is_strong = (tune_flags & FL_STRONG) != 0;
811 thumb_code = (TARGET_ARM == 0);
812 arm_is_6_or_7 = (((tune_flags & (FL_MODE26 | FL_MODE32))
813 && !(tune_flags & FL_ARCH4))) != 0;
814 arm_tune_xscale = (tune_flags & FL_XSCALE) != 0;
815 arm_arch_iwmmxt = (insn_flags & FL_IWMMXT) != 0;
817 if (TARGET_IWMMXT && (! TARGET_ATPCS))
818 target_flags |= ARM_FLAG_ATPCS;
820 arm_fp_model = ARM_FP_MODEL_UNKNOWN;
821 if (target_fpu_name == NULL && target_fpe_name != NULL)
823 if (streq (target_fpe_name, "2"))
824 target_fpu_name = "fpe2";
825 else if (streq (target_fpe_name, "3"))
826 target_fpu_name = "fpe3";
827 else
828 error ("invalid floating point emulation option: -mfpe=%s",
829 target_fpe_name);
831 if (target_fpu_name != NULL)
833 /* The user specified a FPU. */
834 for (i = 0; i < ARRAY_SIZE (all_fpus); i++)
836 if (streq (all_fpus[i].name, target_fpu_name))
838 arm_fpu_arch = all_fpus[i].fpu;
839 arm_fpu_tune = arm_fpu_arch;
840 arm_fp_model = fp_model_for_fpu[arm_fpu_arch];
841 break;
844 if (arm_fp_model == ARM_FP_MODEL_UNKNOWN)
845 error ("invalid floating point option: -mfpu=%s", target_fpu_name);
847 else
849 #ifdef FPUTYPE_DEFAULT
850 /* Use the default is it is specified for this platform. */
851 arm_fpu_arch = FPUTYPE_DEFAULT;
852 arm_fpu_tune = FPUTYPE_DEFAULT;
853 #else
854 /* Pick one based on CPU type. */
855 if ((insn_flags & FL_VFP) != 0)
856 arm_fpu_arch = FPUTYPE_VFP;
857 else if (insn_flags & FL_CIRRUS)
858 arm_fpu_arch = FPUTYPE_MAVERICK;
859 else
860 arm_fpu_arch = FPUTYPE_FPA_EMU2;
861 #endif
862 if (tune_flags & FL_CO_PROC && arm_fpu_arch == FPUTYPE_FPA_EMU2)
863 arm_fpu_tune = FPUTYPE_FPA;
864 else
865 arm_fpu_tune = arm_fpu_arch;
866 arm_fp_model = fp_model_for_fpu[arm_fpu_arch];
867 if (arm_fp_model == ARM_FP_MODEL_UNKNOWN)
868 abort ();
871 if (target_float_abi_name != NULL)
873 /* The user specified a FP ABI. */
874 for (i = 0; i < ARRAY_SIZE (all_float_abis); i++)
876 if (streq (all_float_abis[i].name, target_float_abi_name))
878 arm_float_abi = all_float_abis[i].abi_type;
879 break;
882 if (i == ARRAY_SIZE (all_float_abis))
883 error ("invalid floating point abi: -mfloat-abi=%s",
884 target_float_abi_name);
886 else
888 /* Use soft-float target flag. */
889 if (target_flags & ARM_FLAG_SOFT_FLOAT)
890 arm_float_abi = ARM_FLOAT_ABI_SOFT;
891 else
892 arm_float_abi = ARM_FLOAT_ABI_HARD;
895 if (arm_float_abi == ARM_FLOAT_ABI_SOFTFP)
896 sorry ("-mfloat-abi=softfp");
897 /* If soft-float is specified then don't use FPU. */
898 if (TARGET_SOFT_FLOAT)
899 arm_fpu_arch = FPUTYPE_NONE;
901 /* For arm2/3 there is no need to do any scheduling if there is only
902 a floating point emulator, or we are doing software floating-point. */
903 if ((TARGET_SOFT_FLOAT
904 || arm_fpu_tune == FPUTYPE_FPA_EMU2
905 || arm_fpu_tune == FPUTYPE_FPA_EMU3)
906 && (tune_flags & FL_MODE32) == 0)
907 flag_schedule_insns = flag_schedule_insns_after_reload = 0;
909 arm_prgmode = TARGET_APCS_32 ? PROG_MODE_PROG32 : PROG_MODE_PROG26;
911 if (structure_size_string != NULL)
913 int size = strtol (structure_size_string, NULL, 0);
915 if (size == 8 || size == 32)
916 arm_structure_size_boundary = size;
917 else
918 warning ("structure size boundary can only be set to 8 or 32");
921 if (arm_pic_register_string != NULL)
923 int pic_register = decode_reg_name (arm_pic_register_string);
925 if (!flag_pic)
926 warning ("-mpic-register= is useless without -fpic");
928 /* Prevent the user from choosing an obviously stupid PIC register. */
929 else if (pic_register < 0 || call_used_regs[pic_register]
930 || pic_register == HARD_FRAME_POINTER_REGNUM
931 || pic_register == STACK_POINTER_REGNUM
932 || pic_register >= PC_REGNUM)
933 error ("unable to use '%s' for PIC register", arm_pic_register_string);
934 else
935 arm_pic_register = pic_register;
938 if (TARGET_THUMB && flag_schedule_insns)
940 /* Don't warn since it's on by default in -O2. */
941 flag_schedule_insns = 0;
944 if (optimize_size)
946 /* There's some dispute as to whether this should be 1 or 2. However,
947 experiments seem to show that in pathological cases a setting of
948 1 degrades less severely than a setting of 2. This could change if
949 other parts of the compiler change their behavior. */
950 arm_constant_limit = 1;
952 /* If optimizing for size, bump the number of instructions that we
953 are prepared to conditionally execute (even on a StrongARM). */
954 max_insns_skipped = 6;
956 else
958 /* For processors with load scheduling, it never costs more than
959 2 cycles to load a constant, and the load scheduler may well
960 reduce that to 1. */
961 if (tune_flags & FL_LDSCHED)
962 arm_constant_limit = 1;
964 /* On XScale the longer latency of a load makes it more difficult
965 to achieve a good schedule, so it's faster to synthesize
966 constants that can be done in two insns. */
967 if (arm_tune_xscale)
968 arm_constant_limit = 2;
970 /* StrongARM has early execution of branches, so a sequence
971 that is worth skipping is shorter. */
972 if (arm_is_strong)
973 max_insns_skipped = 3;
976 /* Register global variables with the garbage collector. */
977 arm_add_gc_roots ();
980 static void
981 arm_add_gc_roots (void)
983 gcc_obstack_init(&minipool_obstack);
984 minipool_startobj = (char *) obstack_alloc (&minipool_obstack, 0);
987 /* A table of known ARM exception types.
988 For use with the interrupt function attribute. */
990 typedef struct
992 const char *const arg;
993 const unsigned long return_value;
995 isr_attribute_arg;
997 static const isr_attribute_arg isr_attribute_args [] =
999 { "IRQ", ARM_FT_ISR },
1000 { "irq", ARM_FT_ISR },
1001 { "FIQ", ARM_FT_FIQ },
1002 { "fiq", ARM_FT_FIQ },
1003 { "ABORT", ARM_FT_ISR },
1004 { "abort", ARM_FT_ISR },
1005 { "ABORT", ARM_FT_ISR },
1006 { "abort", ARM_FT_ISR },
1007 { "UNDEF", ARM_FT_EXCEPTION },
1008 { "undef", ARM_FT_EXCEPTION },
1009 { "SWI", ARM_FT_EXCEPTION },
1010 { "swi", ARM_FT_EXCEPTION },
1011 { NULL, ARM_FT_NORMAL }
1014 /* Returns the (interrupt) function type of the current
1015 function, or ARM_FT_UNKNOWN if the type cannot be determined. */
1017 static unsigned long
1018 arm_isr_value (tree argument)
1020 const isr_attribute_arg * ptr;
1021 const char * arg;
1023 /* No argument - default to IRQ. */
1024 if (argument == NULL_TREE)
1025 return ARM_FT_ISR;
1027 /* Get the value of the argument. */
1028 if (TREE_VALUE (argument) == NULL_TREE
1029 || TREE_CODE (TREE_VALUE (argument)) != STRING_CST)
1030 return ARM_FT_UNKNOWN;
1032 arg = TREE_STRING_POINTER (TREE_VALUE (argument));
1034 /* Check it against the list of known arguments. */
1035 for (ptr = isr_attribute_args; ptr->arg != NULL; ptr++)
1036 if (streq (arg, ptr->arg))
1037 return ptr->return_value;
1039 /* An unrecognized interrupt type. */
1040 return ARM_FT_UNKNOWN;
1043 /* Computes the type of the current function. */
1045 static unsigned long
1046 arm_compute_func_type (void)
1048 unsigned long type = ARM_FT_UNKNOWN;
1049 tree a;
1050 tree attr;
1052 if (TREE_CODE (current_function_decl) != FUNCTION_DECL)
1053 abort ();
1055 /* Decide if the current function is volatile. Such functions
1056 never return, and many memory cycles can be saved by not storing
1057 register values that will never be needed again. This optimization
1058 was added to speed up context switching in a kernel application. */
1059 if (optimize > 0
1060 && current_function_nothrow
1061 && TREE_THIS_VOLATILE (current_function_decl))
1062 type |= ARM_FT_VOLATILE;
1064 if (current_function_needs_context)
1065 type |= ARM_FT_NESTED;
1067 attr = DECL_ATTRIBUTES (current_function_decl);
1069 a = lookup_attribute ("naked", attr);
1070 if (a != NULL_TREE)
1071 type |= ARM_FT_NAKED;
1073 if (cfun->machine->eh_epilogue_sp_ofs != NULL_RTX)
1074 type |= ARM_FT_EXCEPTION_HANDLER;
1075 else
1077 a = lookup_attribute ("isr", attr);
1078 if (a == NULL_TREE)
1079 a = lookup_attribute ("interrupt", attr);
1081 if (a == NULL_TREE)
1082 type |= TARGET_INTERWORK ? ARM_FT_INTERWORKED : ARM_FT_NORMAL;
1083 else
1084 type |= arm_isr_value (TREE_VALUE (a));
1087 return type;
1090 /* Returns the type of the current function. */
1092 unsigned long
1093 arm_current_func_type (void)
1095 if (ARM_FUNC_TYPE (cfun->machine->func_type) == ARM_FT_UNKNOWN)
1096 cfun->machine->func_type = arm_compute_func_type ();
1098 return cfun->machine->func_type;
1101 /* Return 1 if it is possible to return using a single instruction.
1102 If SIBLING is non-null, this is a test for a return before a sibling
1103 call. SIBLING is the call insn, so we can examine its register usage. */
1106 use_return_insn (int iscond, rtx sibling)
1108 int regno;
1109 unsigned int func_type;
1110 unsigned long saved_int_regs;
1111 unsigned HOST_WIDE_INT stack_adjust;
1113 /* Never use a return instruction before reload has run. */
1114 if (!reload_completed)
1115 return 0;
1117 func_type = arm_current_func_type ();
1119 /* Naked functions and volatile functions need special
1120 consideration. */
1121 if (func_type & (ARM_FT_VOLATILE | ARM_FT_NAKED))
1122 return 0;
1124 /* So do interrupt functions that use the frame pointer. */
1125 if (IS_INTERRUPT (func_type) && frame_pointer_needed)
1126 return 0;
1128 stack_adjust = arm_get_frame_size () + current_function_outgoing_args_size;
1130 /* As do variadic functions. */
1131 if (current_function_pretend_args_size
1132 || cfun->machine->uses_anonymous_args
1133 /* Or if the function calls __builtin_eh_return () */
1134 || ARM_FUNC_TYPE (func_type) == ARM_FT_EXCEPTION_HANDLER
1135 /* Or if the function calls alloca */
1136 || current_function_calls_alloca
1137 /* Or if there is a stack adjustment. However, if the stack pointer
1138 is saved on the stack, we can use a pre-incrementing stack load. */
1139 || !(stack_adjust == 0 || (frame_pointer_needed && stack_adjust == 4)))
1140 return 0;
1142 saved_int_regs = arm_compute_save_reg_mask ();
1144 /* Unfortunately, the insn
1146 ldmib sp, {..., sp, ...}
1148 triggers a bug on most SA-110 based devices, such that the stack
1149 pointer won't be correctly restored if the instruction takes a
1150 page fault. We work around this problem by popping r3 along with
1151 the other registers, since that is never slower than executing
1152 another instruction.
1154 We test for !arm_arch5 here, because code for any architecture
1155 less than this could potentially be run on one of the buggy
1156 chips. */
1157 if (stack_adjust == 4 && !arm_arch5)
1159 /* Validate that r3 is a call-clobbered register (always true in
1160 the default abi) ... */
1161 if (!call_used_regs[3])
1162 return 0;
1164 /* ... that it isn't being used for a return value (always true
1165 until we implement return-in-regs), or for a tail-call
1166 argument ... */
1167 if (sibling)
1169 if (GET_CODE (sibling) != CALL_INSN)
1170 abort ();
1172 if (find_regno_fusage (sibling, USE, 3))
1173 return 0;
1176 /* ... and that there are no call-saved registers in r0-r2
1177 (always true in the default ABI). */
1178 if (saved_int_regs & 0x7)
1179 return 0;
1182 /* Can't be done if interworking with Thumb, and any registers have been
1183 stacked. */
1184 if (TARGET_INTERWORK && saved_int_regs != 0)
1185 return 0;
1187 /* On StrongARM, conditional returns are expensive if they aren't
1188 taken and multiple registers have been stacked. */
1189 if (iscond && arm_is_strong)
1191 /* Conditional return when just the LR is stored is a simple
1192 conditional-load instruction, that's not expensive. */
1193 if (saved_int_regs != 0 && saved_int_regs != (1 << LR_REGNUM))
1194 return 0;
1196 if (flag_pic && regs_ever_live[PIC_OFFSET_TABLE_REGNUM])
1197 return 0;
1200 /* If there are saved registers but the LR isn't saved, then we need
1201 two instructions for the return. */
1202 if (saved_int_regs && !(saved_int_regs & (1 << LR_REGNUM)))
1203 return 0;
1205 /* Can't be done if any of the FPA regs are pushed,
1206 since this also requires an insn. */
1207 if (TARGET_HARD_FLOAT && TARGET_FPA)
1208 for (regno = FIRST_FPA_REGNUM; regno <= LAST_FPA_REGNUM; regno++)
1209 if (regs_ever_live[regno] && !call_used_regs[regno])
1210 return 0;
1212 /* Likewise VFP regs. */
1213 if (TARGET_HARD_FLOAT && TARGET_VFP)
1214 for (regno = FIRST_VFP_REGNUM; regno <= LAST_VFP_REGNUM; regno++)
1215 if (regs_ever_live[regno] && !call_used_regs[regno])
1216 return 0;
1218 if (TARGET_REALLY_IWMMXT)
1219 for (regno = FIRST_IWMMXT_REGNUM; regno <= LAST_IWMMXT_REGNUM; regno++)
1220 if (regs_ever_live[regno] && ! call_used_regs [regno])
1221 return 0;
1223 return 1;
1226 /* Return TRUE if int I is a valid immediate ARM constant. */
1229 const_ok_for_arm (HOST_WIDE_INT i)
1231 unsigned HOST_WIDE_INT mask = ~(unsigned HOST_WIDE_INT)0xFF;
1233 /* For machines with >32 bit HOST_WIDE_INT, the bits above bit 31 must
1234 be all zero, or all one. */
1235 if ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff) != 0
1236 && ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff)
1237 != ((~(unsigned HOST_WIDE_INT) 0)
1238 & ~(unsigned HOST_WIDE_INT) 0xffffffff)))
1239 return FALSE;
1241 /* Fast return for 0 and powers of 2 */
1242 if ((i & (i - 1)) == 0)
1243 return TRUE;
1247 if ((i & mask & (unsigned HOST_WIDE_INT) 0xffffffff) == 0)
1248 return TRUE;
1249 mask =
1250 (mask << 2) | ((mask & (unsigned HOST_WIDE_INT) 0xffffffff)
1251 >> (32 - 2)) | ~(unsigned HOST_WIDE_INT) 0xffffffff;
1253 while (mask != ~(unsigned HOST_WIDE_INT) 0xFF);
1255 return FALSE;
1258 /* Return true if I is a valid constant for the operation CODE. */
1259 static int
1260 const_ok_for_op (HOST_WIDE_INT i, enum rtx_code code)
1262 if (const_ok_for_arm (i))
1263 return 1;
1265 switch (code)
1267 case PLUS:
1268 return const_ok_for_arm (ARM_SIGN_EXTEND (-i));
1270 case MINUS: /* Should only occur with (MINUS I reg) => rsb */
1271 case XOR:
1272 case IOR:
1273 return 0;
1275 case AND:
1276 return const_ok_for_arm (ARM_SIGN_EXTEND (~i));
1278 default:
1279 abort ();
1283 /* Emit a sequence of insns to handle a large constant.
1284 CODE is the code of the operation required, it can be any of SET, PLUS,
1285 IOR, AND, XOR, MINUS;
1286 MODE is the mode in which the operation is being performed;
1287 VAL is the integer to operate on;
1288 SOURCE is the other operand (a register, or a null-pointer for SET);
1289 SUBTARGETS means it is safe to create scratch registers if that will
1290 either produce a simpler sequence, or we will want to cse the values.
1291 Return value is the number of insns emitted. */
1294 arm_split_constant (enum rtx_code code, enum machine_mode mode,
1295 HOST_WIDE_INT val, rtx target, rtx source, int subtargets)
1297 if (subtargets || code == SET
1298 || (GET_CODE (target) == REG && GET_CODE (source) == REG
1299 && REGNO (target) != REGNO (source)))
1301 /* After arm_reorg has been called, we can't fix up expensive
1302 constants by pushing them into memory so we must synthesize
1303 them in-line, regardless of the cost. This is only likely to
1304 be more costly on chips that have load delay slots and we are
1305 compiling without running the scheduler (so no splitting
1306 occurred before the final instruction emission).
1308 Ref: gcc -O1 -mcpu=strongarm gcc.c-torture/compile/980506-2.c
1310 if (!after_arm_reorg
1311 && (arm_gen_constant (code, mode, val, target, source, 1, 0)
1312 > arm_constant_limit + (code != SET)))
1314 if (code == SET)
1316 /* Currently SET is the only monadic value for CODE, all
1317 the rest are diadic. */
1318 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (val)));
1319 return 1;
1321 else
1323 rtx temp = subtargets ? gen_reg_rtx (mode) : target;
1325 emit_insn (gen_rtx_SET (VOIDmode, temp, GEN_INT (val)));
1326 /* For MINUS, the value is subtracted from, since we never
1327 have subtraction of a constant. */
1328 if (code == MINUS)
1329 emit_insn (gen_rtx_SET (VOIDmode, target,
1330 gen_rtx_MINUS (mode, temp, source)));
1331 else
1332 emit_insn (gen_rtx_SET (VOIDmode, target,
1333 gen_rtx_fmt_ee (code, mode, source, temp)));
1334 return 2;
1339 return arm_gen_constant (code, mode, val, target, source, subtargets, 1);
1342 static int
1343 count_insns_for_constant (HOST_WIDE_INT remainder, int i)
1345 HOST_WIDE_INT temp1;
1346 int num_insns = 0;
1349 int end;
1351 if (i <= 0)
1352 i += 32;
1353 if (remainder & (3 << (i - 2)))
1355 end = i - 8;
1356 if (end < 0)
1357 end += 32;
1358 temp1 = remainder & ((0x0ff << end)
1359 | ((i < end) ? (0xff >> (32 - end)) : 0));
1360 remainder &= ~temp1;
1361 num_insns++;
1362 i -= 6;
1364 i -= 2;
1365 } while (remainder);
1366 return num_insns;
1369 /* As above, but extra parameter GENERATE which, if clear, suppresses
1370 RTL generation. */
1372 static int
1373 arm_gen_constant (enum rtx_code code, enum machine_mode mode,
1374 HOST_WIDE_INT val, rtx target, rtx source, int subtargets,
1375 int generate)
1377 int can_invert = 0;
1378 int can_negate = 0;
1379 int can_negate_initial = 0;
1380 int can_shift = 0;
1381 int i;
1382 int num_bits_set = 0;
1383 int set_sign_bit_copies = 0;
1384 int clear_sign_bit_copies = 0;
1385 int clear_zero_bit_copies = 0;
1386 int set_zero_bit_copies = 0;
1387 int insns = 0;
1388 unsigned HOST_WIDE_INT temp1, temp2;
1389 unsigned HOST_WIDE_INT remainder = val & 0xffffffff;
1391 /* Find out which operations are safe for a given CODE. Also do a quick
1392 check for degenerate cases; these can occur when DImode operations
1393 are split. */
1394 switch (code)
1396 case SET:
1397 can_invert = 1;
1398 can_shift = 1;
1399 can_negate = 1;
1400 break;
1402 case PLUS:
1403 can_negate = 1;
1404 can_negate_initial = 1;
1405 break;
1407 case IOR:
1408 if (remainder == 0xffffffff)
1410 if (generate)
1411 emit_insn (gen_rtx_SET (VOIDmode, target,
1412 GEN_INT (ARM_SIGN_EXTEND (val))));
1413 return 1;
1415 if (remainder == 0)
1417 if (reload_completed && rtx_equal_p (target, source))
1418 return 0;
1419 if (generate)
1420 emit_insn (gen_rtx_SET (VOIDmode, target, source));
1421 return 1;
1423 break;
1425 case AND:
1426 if (remainder == 0)
1428 if (generate)
1429 emit_insn (gen_rtx_SET (VOIDmode, target, const0_rtx));
1430 return 1;
1432 if (remainder == 0xffffffff)
1434 if (reload_completed && rtx_equal_p (target, source))
1435 return 0;
1436 if (generate)
1437 emit_insn (gen_rtx_SET (VOIDmode, target, source));
1438 return 1;
1440 can_invert = 1;
1441 break;
1443 case XOR:
1444 if (remainder == 0)
1446 if (reload_completed && rtx_equal_p (target, source))
1447 return 0;
1448 if (generate)
1449 emit_insn (gen_rtx_SET (VOIDmode, target, source));
1450 return 1;
1452 if (remainder == 0xffffffff)
1454 if (generate)
1455 emit_insn (gen_rtx_SET (VOIDmode, target,
1456 gen_rtx_NOT (mode, source)));
1457 return 1;
1460 /* We don't know how to handle this yet below. */
1461 abort ();
1463 case MINUS:
1464 /* We treat MINUS as (val - source), since (source - val) is always
1465 passed as (source + (-val)). */
1466 if (remainder == 0)
1468 if (generate)
1469 emit_insn (gen_rtx_SET (VOIDmode, target,
1470 gen_rtx_NEG (mode, source)));
1471 return 1;
1473 if (const_ok_for_arm (val))
1475 if (generate)
1476 emit_insn (gen_rtx_SET (VOIDmode, target,
1477 gen_rtx_MINUS (mode, GEN_INT (val),
1478 source)));
1479 return 1;
1481 can_negate = 1;
1483 break;
1485 default:
1486 abort ();
1489 /* If we can do it in one insn get out quickly. */
1490 if (const_ok_for_arm (val)
1491 || (can_negate_initial && const_ok_for_arm (-val))
1492 || (can_invert && const_ok_for_arm (~val)))
1494 if (generate)
1495 emit_insn (gen_rtx_SET (VOIDmode, target,
1496 (source ? gen_rtx_fmt_ee (code, mode, source,
1497 GEN_INT (val))
1498 : GEN_INT (val))));
1499 return 1;
1502 /* Calculate a few attributes that may be useful for specific
1503 optimizations. */
1504 for (i = 31; i >= 0; i--)
1506 if ((remainder & (1 << i)) == 0)
1507 clear_sign_bit_copies++;
1508 else
1509 break;
1512 for (i = 31; i >= 0; i--)
1514 if ((remainder & (1 << i)) != 0)
1515 set_sign_bit_copies++;
1516 else
1517 break;
1520 for (i = 0; i <= 31; i++)
1522 if ((remainder & (1 << i)) == 0)
1523 clear_zero_bit_copies++;
1524 else
1525 break;
1528 for (i = 0; i <= 31; i++)
1530 if ((remainder & (1 << i)) != 0)
1531 set_zero_bit_copies++;
1532 else
1533 break;
1536 switch (code)
1538 case SET:
1539 /* See if we can do this by sign_extending a constant that is known
1540 to be negative. This is a good, way of doing it, since the shift
1541 may well merge into a subsequent insn. */
1542 if (set_sign_bit_copies > 1)
1544 if (const_ok_for_arm
1545 (temp1 = ARM_SIGN_EXTEND (remainder
1546 << (set_sign_bit_copies - 1))))
1548 if (generate)
1550 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1551 emit_insn (gen_rtx_SET (VOIDmode, new_src,
1552 GEN_INT (temp1)));
1553 emit_insn (gen_ashrsi3 (target, new_src,
1554 GEN_INT (set_sign_bit_copies - 1)));
1556 return 2;
1558 /* For an inverted constant, we will need to set the low bits,
1559 these will be shifted out of harm's way. */
1560 temp1 |= (1 << (set_sign_bit_copies - 1)) - 1;
1561 if (const_ok_for_arm (~temp1))
1563 if (generate)
1565 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1566 emit_insn (gen_rtx_SET (VOIDmode, new_src,
1567 GEN_INT (temp1)));
1568 emit_insn (gen_ashrsi3 (target, new_src,
1569 GEN_INT (set_sign_bit_copies - 1)));
1571 return 2;
1575 /* See if we can generate this by setting the bottom (or the top)
1576 16 bits, and then shifting these into the other half of the
1577 word. We only look for the simplest cases, to do more would cost
1578 too much. Be careful, however, not to generate this when the
1579 alternative would take fewer insns. */
1580 if (val & 0xffff0000)
1582 temp1 = remainder & 0xffff0000;
1583 temp2 = remainder & 0x0000ffff;
1585 /* Overlaps outside this range are best done using other methods. */
1586 for (i = 9; i < 24; i++)
1588 if ((((temp2 | (temp2 << i)) & 0xffffffff) == remainder)
1589 && !const_ok_for_arm (temp2))
1591 rtx new_src = (subtargets
1592 ? (generate ? gen_reg_rtx (mode) : NULL_RTX)
1593 : target);
1594 insns = arm_gen_constant (code, mode, temp2, new_src,
1595 source, subtargets, generate);
1596 source = new_src;
1597 if (generate)
1598 emit_insn (gen_rtx_SET
1599 (VOIDmode, target,
1600 gen_rtx_IOR (mode,
1601 gen_rtx_ASHIFT (mode, source,
1602 GEN_INT (i)),
1603 source)));
1604 return insns + 1;
1608 /* Don't duplicate cases already considered. */
1609 for (i = 17; i < 24; i++)
1611 if (((temp1 | (temp1 >> i)) == remainder)
1612 && !const_ok_for_arm (temp1))
1614 rtx new_src = (subtargets
1615 ? (generate ? gen_reg_rtx (mode) : NULL_RTX)
1616 : target);
1617 insns = arm_gen_constant (code, mode, temp1, new_src,
1618 source, subtargets, generate);
1619 source = new_src;
1620 if (generate)
1621 emit_insn
1622 (gen_rtx_SET (VOIDmode, target,
1623 gen_rtx_IOR
1624 (mode,
1625 gen_rtx_LSHIFTRT (mode, source,
1626 GEN_INT (i)),
1627 source)));
1628 return insns + 1;
1632 break;
1634 case IOR:
1635 case XOR:
1636 /* If we have IOR or XOR, and the constant can be loaded in a
1637 single instruction, and we can find a temporary to put it in,
1638 then this can be done in two instructions instead of 3-4. */
1639 if (subtargets
1640 /* TARGET can't be NULL if SUBTARGETS is 0 */
1641 || (reload_completed && !reg_mentioned_p (target, source)))
1643 if (const_ok_for_arm (ARM_SIGN_EXTEND (~val)))
1645 if (generate)
1647 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1649 emit_insn (gen_rtx_SET (VOIDmode, sub, GEN_INT (val)));
1650 emit_insn (gen_rtx_SET (VOIDmode, target,
1651 gen_rtx_fmt_ee (code, mode, source, sub)));
1653 return 2;
1657 if (code == XOR)
1658 break;
1660 if (set_sign_bit_copies > 8
1661 && (val & (-1 << (32 - set_sign_bit_copies))) == val)
1663 if (generate)
1665 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1666 rtx shift = GEN_INT (set_sign_bit_copies);
1668 emit_insn (gen_rtx_SET (VOIDmode, sub,
1669 gen_rtx_NOT (mode,
1670 gen_rtx_ASHIFT (mode,
1671 source,
1672 shift))));
1673 emit_insn (gen_rtx_SET (VOIDmode, target,
1674 gen_rtx_NOT (mode,
1675 gen_rtx_LSHIFTRT (mode, sub,
1676 shift))));
1678 return 2;
1681 if (set_zero_bit_copies > 8
1682 && (remainder & ((1 << set_zero_bit_copies) - 1)) == remainder)
1684 if (generate)
1686 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1687 rtx shift = GEN_INT (set_zero_bit_copies);
1689 emit_insn (gen_rtx_SET (VOIDmode, sub,
1690 gen_rtx_NOT (mode,
1691 gen_rtx_LSHIFTRT (mode,
1692 source,
1693 shift))));
1694 emit_insn (gen_rtx_SET (VOIDmode, target,
1695 gen_rtx_NOT (mode,
1696 gen_rtx_ASHIFT (mode, sub,
1697 shift))));
1699 return 2;
1702 if (const_ok_for_arm (temp1 = ARM_SIGN_EXTEND (~val)))
1704 if (generate)
1706 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1707 emit_insn (gen_rtx_SET (VOIDmode, sub,
1708 gen_rtx_NOT (mode, source)));
1709 source = sub;
1710 if (subtargets)
1711 sub = gen_reg_rtx (mode);
1712 emit_insn (gen_rtx_SET (VOIDmode, sub,
1713 gen_rtx_AND (mode, source,
1714 GEN_INT (temp1))));
1715 emit_insn (gen_rtx_SET (VOIDmode, target,
1716 gen_rtx_NOT (mode, sub)));
1718 return 3;
1720 break;
1722 case AND:
1723 /* See if two shifts will do 2 or more insn's worth of work. */
1724 if (clear_sign_bit_copies >= 16 && clear_sign_bit_copies < 24)
1726 HOST_WIDE_INT shift_mask = ((0xffffffff
1727 << (32 - clear_sign_bit_copies))
1728 & 0xffffffff);
1730 if ((remainder | shift_mask) != 0xffffffff)
1732 if (generate)
1734 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1735 insns = arm_gen_constant (AND, mode, remainder | shift_mask,
1736 new_src, source, subtargets, 1);
1737 source = new_src;
1739 else
1741 rtx targ = subtargets ? NULL_RTX : target;
1742 insns = arm_gen_constant (AND, mode, remainder | shift_mask,
1743 targ, source, subtargets, 0);
1747 if (generate)
1749 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1750 rtx shift = GEN_INT (clear_sign_bit_copies);
1752 emit_insn (gen_ashlsi3 (new_src, source, shift));
1753 emit_insn (gen_lshrsi3 (target, new_src, shift));
1756 return insns + 2;
1759 if (clear_zero_bit_copies >= 16 && clear_zero_bit_copies < 24)
1761 HOST_WIDE_INT shift_mask = (1 << clear_zero_bit_copies) - 1;
1763 if ((remainder | shift_mask) != 0xffffffff)
1765 if (generate)
1767 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1769 insns = arm_gen_constant (AND, mode, remainder | shift_mask,
1770 new_src, source, subtargets, 1);
1771 source = new_src;
1773 else
1775 rtx targ = subtargets ? NULL_RTX : target;
1777 insns = arm_gen_constant (AND, mode, remainder | shift_mask,
1778 targ, source, subtargets, 0);
1782 if (generate)
1784 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1785 rtx shift = GEN_INT (clear_zero_bit_copies);
1787 emit_insn (gen_lshrsi3 (new_src, source, shift));
1788 emit_insn (gen_ashlsi3 (target, new_src, shift));
1791 return insns + 2;
1794 break;
1796 default:
1797 break;
1800 for (i = 0; i < 32; i++)
1801 if (remainder & (1 << i))
1802 num_bits_set++;
1804 if (code == AND || (can_invert && num_bits_set > 16))
1805 remainder = (~remainder) & 0xffffffff;
1806 else if (code == PLUS && num_bits_set > 16)
1807 remainder = (-remainder) & 0xffffffff;
1808 else
1810 can_invert = 0;
1811 can_negate = 0;
1814 /* Now try and find a way of doing the job in either two or three
1815 instructions.
1816 We start by looking for the largest block of zeros that are aligned on
1817 a 2-bit boundary, we then fill up the temps, wrapping around to the
1818 top of the word when we drop off the bottom.
1819 In the worst case this code should produce no more than four insns. */
1821 int best_start = 0;
1822 int best_consecutive_zeros = 0;
1824 for (i = 0; i < 32; i += 2)
1826 int consecutive_zeros = 0;
1828 if (!(remainder & (3 << i)))
1830 while ((i < 32) && !(remainder & (3 << i)))
1832 consecutive_zeros += 2;
1833 i += 2;
1835 if (consecutive_zeros > best_consecutive_zeros)
1837 best_consecutive_zeros = consecutive_zeros;
1838 best_start = i - consecutive_zeros;
1840 i -= 2;
1844 /* So long as it won't require any more insns to do so, it's
1845 desirable to emit a small constant (in bits 0...9) in the last
1846 insn. This way there is more chance that it can be combined with
1847 a later addressing insn to form a pre-indexed load or store
1848 operation. Consider:
1850 *((volatile int *)0xe0000100) = 1;
1851 *((volatile int *)0xe0000110) = 2;
1853 We want this to wind up as:
1855 mov rA, #0xe0000000
1856 mov rB, #1
1857 str rB, [rA, #0x100]
1858 mov rB, #2
1859 str rB, [rA, #0x110]
1861 rather than having to synthesize both large constants from scratch.
1863 Therefore, we calculate how many insns would be required to emit
1864 the constant starting from `best_start', and also starting from
1865 zero (ie with bit 31 first to be output). If `best_start' doesn't
1866 yield a shorter sequence, we may as well use zero. */
1867 if (best_start != 0
1868 && ((((unsigned HOST_WIDE_INT) 1) << best_start) < remainder)
1869 && (count_insns_for_constant (remainder, 0) <=
1870 count_insns_for_constant (remainder, best_start)))
1871 best_start = 0;
1873 /* Now start emitting the insns. */
1874 i = best_start;
1877 int end;
1879 if (i <= 0)
1880 i += 32;
1881 if (remainder & (3 << (i - 2)))
1883 end = i - 8;
1884 if (end < 0)
1885 end += 32;
1886 temp1 = remainder & ((0x0ff << end)
1887 | ((i < end) ? (0xff >> (32 - end)) : 0));
1888 remainder &= ~temp1;
1890 if (generate)
1892 rtx new_src, temp1_rtx;
1894 if (code == SET || code == MINUS)
1896 new_src = (subtargets ? gen_reg_rtx (mode) : target);
1897 if (can_invert && code != MINUS)
1898 temp1 = ~temp1;
1900 else
1902 if (remainder && subtargets)
1903 new_src = gen_reg_rtx (mode);
1904 else
1905 new_src = target;
1906 if (can_invert)
1907 temp1 = ~temp1;
1908 else if (can_negate)
1909 temp1 = -temp1;
1912 temp1 = trunc_int_for_mode (temp1, mode);
1913 temp1_rtx = GEN_INT (temp1);
1915 if (code == SET)
1917 else if (code == MINUS)
1918 temp1_rtx = gen_rtx_MINUS (mode, temp1_rtx, source);
1919 else
1920 temp1_rtx = gen_rtx_fmt_ee (code, mode, source, temp1_rtx);
1922 emit_insn (gen_rtx_SET (VOIDmode, new_src, temp1_rtx));
1923 source = new_src;
1926 if (code == SET)
1928 can_invert = 0;
1929 code = PLUS;
1931 else if (code == MINUS)
1932 code = PLUS;
1934 insns++;
1935 i -= 6;
1937 i -= 2;
1939 while (remainder);
1942 return insns;
1945 /* Canonicalize a comparison so that we are more likely to recognize it.
1946 This can be done for a few constant compares, where we can make the
1947 immediate value easier to load. */
1949 enum rtx_code
1950 arm_canonicalize_comparison (enum rtx_code code, rtx * op1)
1952 unsigned HOST_WIDE_INT i = INTVAL (*op1);
1954 switch (code)
1956 case EQ:
1957 case NE:
1958 return code;
1960 case GT:
1961 case LE:
1962 if (i != ((((unsigned HOST_WIDE_INT) 1) << (HOST_BITS_PER_WIDE_INT - 1)) - 1)
1963 && (const_ok_for_arm (i + 1) || const_ok_for_arm (-(i + 1))))
1965 *op1 = GEN_INT (i + 1);
1966 return code == GT ? GE : LT;
1968 break;
1970 case GE:
1971 case LT:
1972 if (i != (((unsigned HOST_WIDE_INT) 1) << (HOST_BITS_PER_WIDE_INT - 1))
1973 && (const_ok_for_arm (i - 1) || const_ok_for_arm (-(i - 1))))
1975 *op1 = GEN_INT (i - 1);
1976 return code == GE ? GT : LE;
1978 break;
1980 case GTU:
1981 case LEU:
1982 if (i != ~((unsigned HOST_WIDE_INT) 0)
1983 && (const_ok_for_arm (i + 1) || const_ok_for_arm (-(i + 1))))
1985 *op1 = GEN_INT (i + 1);
1986 return code == GTU ? GEU : LTU;
1988 break;
1990 case GEU:
1991 case LTU:
1992 if (i != 0
1993 && (const_ok_for_arm (i - 1) || const_ok_for_arm (-(i - 1))))
1995 *op1 = GEN_INT (i - 1);
1996 return code == GEU ? GTU : LEU;
1998 break;
2000 default:
2001 abort ();
2004 return code;
2007 /* Decide whether a type should be returned in memory (true)
2008 or in a register (false). This is called by the macro
2009 RETURN_IN_MEMORY. */
2011 arm_return_in_memory (tree type)
2013 HOST_WIDE_INT size;
2015 if (!AGGREGATE_TYPE_P (type))
2016 /* All simple types are returned in registers. */
2017 return 0;
2019 size = int_size_in_bytes (type);
2021 if (TARGET_ATPCS)
2023 /* ATPCS returns aggregate types in memory only if they are
2024 larger than a word (or are variable size). */
2025 return (size < 0 || size > UNITS_PER_WORD);
2028 /* For the arm-wince targets we choose to be compatible with Microsoft's
2029 ARM and Thumb compilers, which always return aggregates in memory. */
2030 #ifndef ARM_WINCE
2031 /* All structures/unions bigger than one word are returned in memory.
2032 Also catch the case where int_size_in_bytes returns -1. In this case
2033 the aggregate is either huge or of variable size, and in either case
2034 we will want to return it via memory and not in a register. */
2035 if (size < 0 || size > UNITS_PER_WORD)
2036 return 1;
2038 if (TREE_CODE (type) == RECORD_TYPE)
2040 tree field;
2042 /* For a struct the APCS says that we only return in a register
2043 if the type is 'integer like' and every addressable element
2044 has an offset of zero. For practical purposes this means
2045 that the structure can have at most one non bit-field element
2046 and that this element must be the first one in the structure. */
2048 /* Find the first field, ignoring non FIELD_DECL things which will
2049 have been created by C++. */
2050 for (field = TYPE_FIELDS (type);
2051 field && TREE_CODE (field) != FIELD_DECL;
2052 field = TREE_CHAIN (field))
2053 continue;
2055 if (field == NULL)
2056 return 0; /* An empty structure. Allowed by an extension to ANSI C. */
2058 /* Check that the first field is valid for returning in a register. */
2060 /* ... Floats are not allowed */
2061 if (FLOAT_TYPE_P (TREE_TYPE (field)))
2062 return 1;
2064 /* ... Aggregates that are not themselves valid for returning in
2065 a register are not allowed. */
2066 if (RETURN_IN_MEMORY (TREE_TYPE (field)))
2067 return 1;
2069 /* Now check the remaining fields, if any. Only bitfields are allowed,
2070 since they are not addressable. */
2071 for (field = TREE_CHAIN (field);
2072 field;
2073 field = TREE_CHAIN (field))
2075 if (TREE_CODE (field) != FIELD_DECL)
2076 continue;
2078 if (!DECL_BIT_FIELD_TYPE (field))
2079 return 1;
2082 return 0;
2085 if (TREE_CODE (type) == UNION_TYPE)
2087 tree field;
2089 /* Unions can be returned in registers if every element is
2090 integral, or can be returned in an integer register. */
2091 for (field = TYPE_FIELDS (type);
2092 field;
2093 field = TREE_CHAIN (field))
2095 if (TREE_CODE (field) != FIELD_DECL)
2096 continue;
2098 if (FLOAT_TYPE_P (TREE_TYPE (field)))
2099 return 1;
2101 if (RETURN_IN_MEMORY (TREE_TYPE (field)))
2102 return 1;
2105 return 0;
2107 #endif /* not ARM_WINCE */
2109 /* Return all other types in memory. */
2110 return 1;
2113 /* Indicate whether or not words of a double are in big-endian order. */
2116 arm_float_words_big_endian (void)
2118 if (TARGET_MAVERICK)
2119 return 0;
2121 /* For FPA, float words are always big-endian. For VFP, floats words
2122 follow the memory system mode. */
2124 if (TARGET_FPA)
2126 return 1;
2129 if (TARGET_VFP)
2130 return (TARGET_BIG_END ? 1 : 0);
2132 return 1;
2135 /* Initialize a variable CUM of type CUMULATIVE_ARGS
2136 for a call to a function whose data type is FNTYPE.
2137 For a library call, FNTYPE is NULL. */
2138 void
2139 arm_init_cumulative_args (CUMULATIVE_ARGS *pcum, tree fntype,
2140 rtx libname ATTRIBUTE_UNUSED,
2141 tree fndecl ATTRIBUTE_UNUSED)
2143 /* On the ARM, the offset starts at 0. */
2144 pcum->nregs = ((fntype && aggregate_value_p (TREE_TYPE (fntype), fntype)) ? 1 : 0);
2145 pcum->iwmmxt_nregs = 0;
2147 pcum->call_cookie = CALL_NORMAL;
2149 if (TARGET_LONG_CALLS)
2150 pcum->call_cookie = CALL_LONG;
2152 /* Check for long call/short call attributes. The attributes
2153 override any command line option. */
2154 if (fntype)
2156 if (lookup_attribute ("short_call", TYPE_ATTRIBUTES (fntype)))
2157 pcum->call_cookie = CALL_SHORT;
2158 else if (lookup_attribute ("long_call", TYPE_ATTRIBUTES (fntype)))
2159 pcum->call_cookie = CALL_LONG;
2162 /* Varargs vectors are treated the same as long long.
2163 named_count avoids having to change the way arm handles 'named' */
2164 pcum->named_count = 0;
2165 pcum->nargs = 0;
2167 if (TARGET_REALLY_IWMMXT && fntype)
2169 tree fn_arg;
2171 for (fn_arg = TYPE_ARG_TYPES (fntype);
2172 fn_arg;
2173 fn_arg = TREE_CHAIN (fn_arg))
2174 pcum->named_count += 1;
2176 if (! pcum->named_count)
2177 pcum->named_count = INT_MAX;
2181 /* Determine where to put an argument to a function.
2182 Value is zero to push the argument on the stack,
2183 or a hard register in which to store the argument.
2185 MODE is the argument's machine mode.
2186 TYPE is the data type of the argument (as a tree).
2187 This is null for libcalls where that information may
2188 not be available.
2189 CUM is a variable of type CUMULATIVE_ARGS which gives info about
2190 the preceding args and about the function being called.
2191 NAMED is nonzero if this argument is a named parameter
2192 (otherwise it is an extra parameter matching an ellipsis). */
2195 arm_function_arg (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
2196 tree type ATTRIBUTE_UNUSED, int named)
2198 if (TARGET_REALLY_IWMMXT)
2200 if (VECTOR_MODE_SUPPORTED_P (mode))
2202 /* varargs vectors are treated the same as long long.
2203 named_count avoids having to change the way arm handles 'named' */
2204 if (pcum->named_count <= pcum->nargs + 1)
2206 if (pcum->nregs == 1)
2207 pcum->nregs += 1;
2208 if (pcum->nregs <= 2)
2209 return gen_rtx_REG (mode, pcum->nregs);
2210 else
2211 return NULL_RTX;
2213 else if (pcum->iwmmxt_nregs <= 9)
2214 return gen_rtx_REG (mode, pcum->iwmmxt_nregs + FIRST_IWMMXT_REGNUM);
2215 else
2216 return NULL_RTX;
2218 else if ((mode == DImode || mode == DFmode) && pcum->nregs & 1)
2219 pcum->nregs += 1;
2222 if (mode == VOIDmode)
2223 /* Compute operand 2 of the call insn. */
2224 return GEN_INT (pcum->call_cookie);
2226 if (!named || pcum->nregs >= NUM_ARG_REGS)
2227 return NULL_RTX;
2229 return gen_rtx_REG (mode, pcum->nregs);
2232 /* Variable sized types are passed by reference. This is a GCC
2233 extension to the ARM ABI. */
2236 arm_function_arg_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
2237 enum machine_mode mode ATTRIBUTE_UNUSED,
2238 tree type, int named ATTRIBUTE_UNUSED)
2240 return type && TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST;
2243 /* Implement va_arg. */
2246 arm_va_arg (tree valist, tree type)
2248 /* Variable sized types are passed by reference. */
2249 if (TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
2251 rtx addr = std_expand_builtin_va_arg (valist, build_pointer_type (type));
2252 return gen_rtx_MEM (ptr_mode, force_reg (Pmode, addr));
2255 if (FUNCTION_ARG_BOUNDARY (TYPE_MODE (type), NULL) == IWMMXT_ALIGNMENT)
2257 tree minus_eight;
2258 tree t;
2260 /* Maintain 64-bit alignment of the valist pointer by
2261 constructing: valist = ((valist + (8 - 1)) & -8). */
2262 minus_eight = build_int_2 (- (IWMMXT_ALIGNMENT / BITS_PER_UNIT), -1);
2263 t = build_int_2 ((IWMMXT_ALIGNMENT / BITS_PER_UNIT) - 1, 0);
2264 t = build (PLUS_EXPR, TREE_TYPE (valist), valist, t);
2265 t = build (BIT_AND_EXPR, TREE_TYPE (t), t, minus_eight);
2266 t = build (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
2267 TREE_SIDE_EFFECTS (t) = 1;
2268 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
2270 /* This is to stop the combine pass optimizing
2271 away the alignment adjustment. */
2272 mark_reg_pointer (arg_pointer_rtx, PARM_BOUNDARY);
2275 return std_expand_builtin_va_arg (valist, type);
2278 /* Encode the current state of the #pragma [no_]long_calls. */
2279 typedef enum
2281 OFF, /* No #pramgma [no_]long_calls is in effect. */
2282 LONG, /* #pragma long_calls is in effect. */
2283 SHORT /* #pragma no_long_calls is in effect. */
2284 } arm_pragma_enum;
2286 static arm_pragma_enum arm_pragma_long_calls = OFF;
2288 void
2289 arm_pr_long_calls (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2291 arm_pragma_long_calls = LONG;
2294 void
2295 arm_pr_no_long_calls (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2297 arm_pragma_long_calls = SHORT;
2300 void
2301 arm_pr_long_calls_off (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2303 arm_pragma_long_calls = OFF;
2306 /* Table of machine attributes. */
2307 const struct attribute_spec arm_attribute_table[] =
2309 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
2310 /* Function calls made to this symbol must be done indirectly, because
2311 it may lie outside of the 26 bit addressing range of a normal function
2312 call. */
2313 { "long_call", 0, 0, false, true, true, NULL },
2314 /* Whereas these functions are always known to reside within the 26 bit
2315 addressing range. */
2316 { "short_call", 0, 0, false, true, true, NULL },
2317 /* Interrupt Service Routines have special prologue and epilogue requirements. */
2318 { "isr", 0, 1, false, false, false, arm_handle_isr_attribute },
2319 { "interrupt", 0, 1, false, false, false, arm_handle_isr_attribute },
2320 { "naked", 0, 0, true, false, false, arm_handle_fndecl_attribute },
2321 #ifdef ARM_PE
2322 /* ARM/PE has three new attributes:
2323 interfacearm - ?
2324 dllexport - for exporting a function/variable that will live in a dll
2325 dllimport - for importing a function/variable from a dll
2327 Microsoft allows multiple declspecs in one __declspec, separating
2328 them with spaces. We do NOT support this. Instead, use __declspec
2329 multiple times.
2331 { "dllimport", 0, 0, true, false, false, NULL },
2332 { "dllexport", 0, 0, true, false, false, NULL },
2333 { "interfacearm", 0, 0, true, false, false, arm_handle_fndecl_attribute },
2334 #endif
2335 { NULL, 0, 0, false, false, false, NULL }
2338 /* Handle an attribute requiring a FUNCTION_DECL;
2339 arguments as in struct attribute_spec.handler. */
2340 static tree
2341 arm_handle_fndecl_attribute (tree *node, tree name, tree args ATTRIBUTE_UNUSED,
2342 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
2344 if (TREE_CODE (*node) != FUNCTION_DECL)
2346 warning ("`%s' attribute only applies to functions",
2347 IDENTIFIER_POINTER (name));
2348 *no_add_attrs = true;
2351 return NULL_TREE;
2354 /* Handle an "interrupt" or "isr" attribute;
2355 arguments as in struct attribute_spec.handler. */
2356 static tree
2357 arm_handle_isr_attribute (tree *node, tree name, tree args, int flags,
2358 bool *no_add_attrs)
2360 if (DECL_P (*node))
2362 if (TREE_CODE (*node) != FUNCTION_DECL)
2364 warning ("`%s' attribute only applies to functions",
2365 IDENTIFIER_POINTER (name));
2366 *no_add_attrs = true;
2368 /* FIXME: the argument if any is checked for type attributes;
2369 should it be checked for decl ones? */
2371 else
2373 if (TREE_CODE (*node) == FUNCTION_TYPE
2374 || TREE_CODE (*node) == METHOD_TYPE)
2376 if (arm_isr_value (args) == ARM_FT_UNKNOWN)
2378 warning ("`%s' attribute ignored", IDENTIFIER_POINTER (name));
2379 *no_add_attrs = true;
2382 else if (TREE_CODE (*node) == POINTER_TYPE
2383 && (TREE_CODE (TREE_TYPE (*node)) == FUNCTION_TYPE
2384 || TREE_CODE (TREE_TYPE (*node)) == METHOD_TYPE)
2385 && arm_isr_value (args) != ARM_FT_UNKNOWN)
2387 *node = build_type_copy (*node);
2388 TREE_TYPE (*node) = build_type_attribute_variant
2389 (TREE_TYPE (*node),
2390 tree_cons (name, args, TYPE_ATTRIBUTES (TREE_TYPE (*node))));
2391 *no_add_attrs = true;
2393 else
2395 /* Possibly pass this attribute on from the type to a decl. */
2396 if (flags & ((int) ATTR_FLAG_DECL_NEXT
2397 | (int) ATTR_FLAG_FUNCTION_NEXT
2398 | (int) ATTR_FLAG_ARRAY_NEXT))
2400 *no_add_attrs = true;
2401 return tree_cons (name, args, NULL_TREE);
2403 else
2405 warning ("`%s' attribute ignored", IDENTIFIER_POINTER (name));
2410 return NULL_TREE;
2413 /* Return 0 if the attributes for two types are incompatible, 1 if they
2414 are compatible, and 2 if they are nearly compatible (which causes a
2415 warning to be generated). */
2416 static int
2417 arm_comp_type_attributes (tree type1, tree type2)
2419 int l1, l2, s1, s2;
2421 /* Check for mismatch of non-default calling convention. */
2422 if (TREE_CODE (type1) != FUNCTION_TYPE)
2423 return 1;
2425 /* Check for mismatched call attributes. */
2426 l1 = lookup_attribute ("long_call", TYPE_ATTRIBUTES (type1)) != NULL;
2427 l2 = lookup_attribute ("long_call", TYPE_ATTRIBUTES (type2)) != NULL;
2428 s1 = lookup_attribute ("short_call", TYPE_ATTRIBUTES (type1)) != NULL;
2429 s2 = lookup_attribute ("short_call", TYPE_ATTRIBUTES (type2)) != NULL;
2431 /* Only bother to check if an attribute is defined. */
2432 if (l1 | l2 | s1 | s2)
2434 /* If one type has an attribute, the other must have the same attribute. */
2435 if ((l1 != l2) || (s1 != s2))
2436 return 0;
2438 /* Disallow mixed attributes. */
2439 if ((l1 & s2) || (l2 & s1))
2440 return 0;
2443 /* Check for mismatched ISR attribute. */
2444 l1 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type1)) != NULL;
2445 if (! l1)
2446 l1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type1)) != NULL;
2447 l2 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type2)) != NULL;
2448 if (! l2)
2449 l1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type2)) != NULL;
2450 if (l1 != l2)
2451 return 0;
2453 return 1;
2456 /* Encode long_call or short_call attribute by prefixing
2457 symbol name in DECL with a special character FLAG. */
2458 void
2459 arm_encode_call_attribute (tree decl, int flag)
2461 const char * str = XSTR (XEXP (DECL_RTL (decl), 0), 0);
2462 int len = strlen (str);
2463 char * newstr;
2465 /* Do not allow weak functions to be treated as short call. */
2466 if (DECL_WEAK (decl) && flag == SHORT_CALL_FLAG_CHAR)
2467 return;
2469 newstr = alloca (len + 2);
2470 newstr[0] = flag;
2471 strcpy (newstr + 1, str);
2473 newstr = (char *) ggc_alloc_string (newstr, len + 1);
2474 XSTR (XEXP (DECL_RTL (decl), 0), 0) = newstr;
2477 /* Assigns default attributes to newly defined type. This is used to
2478 set short_call/long_call attributes for function types of
2479 functions defined inside corresponding #pragma scopes. */
2480 static void
2481 arm_set_default_type_attributes (tree type)
2483 /* Add __attribute__ ((long_call)) to all functions, when
2484 inside #pragma long_calls or __attribute__ ((short_call)),
2485 when inside #pragma no_long_calls. */
2486 if (TREE_CODE (type) == FUNCTION_TYPE || TREE_CODE (type) == METHOD_TYPE)
2488 tree type_attr_list, attr_name;
2489 type_attr_list = TYPE_ATTRIBUTES (type);
2491 if (arm_pragma_long_calls == LONG)
2492 attr_name = get_identifier ("long_call");
2493 else if (arm_pragma_long_calls == SHORT)
2494 attr_name = get_identifier ("short_call");
2495 else
2496 return;
2498 type_attr_list = tree_cons (attr_name, NULL_TREE, type_attr_list);
2499 TYPE_ATTRIBUTES (type) = type_attr_list;
2503 /* Return 1 if the operand is a SYMBOL_REF for a function known to be
2504 defined within the current compilation unit. If this cannot be
2505 determined, then 0 is returned. */
2506 static int
2507 current_file_function_operand (rtx sym_ref)
2509 /* This is a bit of a fib. A function will have a short call flag
2510 applied to its name if it has the short call attribute, or it has
2511 already been defined within the current compilation unit. */
2512 if (ENCODED_SHORT_CALL_ATTR_P (XSTR (sym_ref, 0)))
2513 return 1;
2515 /* The current function is always defined within the current compilation
2516 unit. if it s a weak definition however, then this may not be the real
2517 definition of the function, and so we have to say no. */
2518 if (sym_ref == XEXP (DECL_RTL (current_function_decl), 0)
2519 && !DECL_WEAK (current_function_decl))
2520 return 1;
2522 /* We cannot make the determination - default to returning 0. */
2523 return 0;
2526 /* Return nonzero if a 32 bit "long_call" should be generated for
2527 this call. We generate a long_call if the function:
2529 a. has an __attribute__((long call))
2530 or b. is within the scope of a #pragma long_calls
2531 or c. the -mlong-calls command line switch has been specified
2533 However we do not generate a long call if the function:
2535 d. has an __attribute__ ((short_call))
2536 or e. is inside the scope of a #pragma no_long_calls
2537 or f. has an __attribute__ ((section))
2538 or g. is defined within the current compilation unit.
2540 This function will be called by C fragments contained in the machine
2541 description file. CALL_REF and CALL_COOKIE correspond to the matched
2542 rtl operands. CALL_SYMBOL is used to distinguish between
2543 two different callers of the function. It is set to 1 in the
2544 "call_symbol" and "call_symbol_value" patterns and to 0 in the "call"
2545 and "call_value" patterns. This is because of the difference in the
2546 SYM_REFs passed by these patterns. */
2548 arm_is_longcall_p (rtx sym_ref, int call_cookie, int call_symbol)
2550 if (!call_symbol)
2552 if (GET_CODE (sym_ref) != MEM)
2553 return 0;
2555 sym_ref = XEXP (sym_ref, 0);
2558 if (GET_CODE (sym_ref) != SYMBOL_REF)
2559 return 0;
2561 if (call_cookie & CALL_SHORT)
2562 return 0;
2564 if (TARGET_LONG_CALLS && flag_function_sections)
2565 return 1;
2567 if (current_file_function_operand (sym_ref))
2568 return 0;
2570 return (call_cookie & CALL_LONG)
2571 || ENCODED_LONG_CALL_ATTR_P (XSTR (sym_ref, 0))
2572 || TARGET_LONG_CALLS;
2575 /* Return nonzero if it is ok to make a tail-call to DECL. */
2576 static bool
2577 arm_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
2579 int call_type = TARGET_LONG_CALLS ? CALL_LONG : CALL_NORMAL;
2581 if (cfun->machine->sibcall_blocked)
2582 return false;
2584 /* Never tailcall something for which we have no decl, or if we
2585 are in Thumb mode. */
2586 if (decl == NULL || TARGET_THUMB)
2587 return false;
2589 /* Get the calling method. */
2590 if (lookup_attribute ("short_call", TYPE_ATTRIBUTES (TREE_TYPE (decl))))
2591 call_type = CALL_SHORT;
2592 else if (lookup_attribute ("long_call", TYPE_ATTRIBUTES (TREE_TYPE (decl))))
2593 call_type = CALL_LONG;
2595 /* Cannot tail-call to long calls, since these are out of range of
2596 a branch instruction. However, if not compiling PIC, we know
2597 we can reach the symbol if it is in this compilation unit. */
2598 if (call_type == CALL_LONG && (flag_pic || !TREE_ASM_WRITTEN (decl)))
2599 return false;
2601 /* If we are interworking and the function is not declared static
2602 then we can't tail-call it unless we know that it exists in this
2603 compilation unit (since it might be a Thumb routine). */
2604 if (TARGET_INTERWORK && TREE_PUBLIC (decl) && !TREE_ASM_WRITTEN (decl))
2605 return false;
2607 /* Never tailcall from an ISR routine - it needs a special exit sequence. */
2608 if (IS_INTERRUPT (arm_current_func_type ()))
2609 return false;
2611 /* Everything else is ok. */
2612 return true;
2616 /* Addressing mode support functions. */
2618 /* Return nonzero if X is a legitimate immediate operand when compiling
2619 for PIC. */
2621 legitimate_pic_operand_p (rtx x)
2623 if (CONSTANT_P (x)
2624 && flag_pic
2625 && (GET_CODE (x) == SYMBOL_REF
2626 || (GET_CODE (x) == CONST
2627 && GET_CODE (XEXP (x, 0)) == PLUS
2628 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF)))
2629 return 0;
2631 return 1;
2635 legitimize_pic_address (rtx orig, enum machine_mode mode, rtx reg)
2637 if (GET_CODE (orig) == SYMBOL_REF
2638 || GET_CODE (orig) == LABEL_REF)
2640 #ifndef AOF_ASSEMBLER
2641 rtx pic_ref, address;
2642 #endif
2643 rtx insn;
2644 int subregs = 0;
2646 if (reg == 0)
2648 if (no_new_pseudos)
2649 abort ();
2650 else
2651 reg = gen_reg_rtx (Pmode);
2653 subregs = 1;
2656 #ifdef AOF_ASSEMBLER
2657 /* The AOF assembler can generate relocations for these directly, and
2658 understands that the PIC register has to be added into the offset. */
2659 insn = emit_insn (gen_pic_load_addr_based (reg, orig));
2660 #else
2661 if (subregs)
2662 address = gen_reg_rtx (Pmode);
2663 else
2664 address = reg;
2666 if (TARGET_ARM)
2667 emit_insn (gen_pic_load_addr_arm (address, orig));
2668 else
2669 emit_insn (gen_pic_load_addr_thumb (address, orig));
2671 if ((GET_CODE (orig) == LABEL_REF
2672 || (GET_CODE (orig) == SYMBOL_REF &&
2673 SYMBOL_REF_LOCAL_P (orig)))
2674 && NEED_GOT_RELOC)
2675 pic_ref = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, address);
2676 else
2678 pic_ref = gen_rtx_MEM (Pmode,
2679 gen_rtx_PLUS (Pmode, pic_offset_table_rtx,
2680 address));
2681 RTX_UNCHANGING_P (pic_ref) = 1;
2684 insn = emit_move_insn (reg, pic_ref);
2685 #endif
2686 current_function_uses_pic_offset_table = 1;
2687 /* Put a REG_EQUAL note on this insn, so that it can be optimized
2688 by loop. */
2689 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EQUAL, orig,
2690 REG_NOTES (insn));
2691 return reg;
2693 else if (GET_CODE (orig) == CONST)
2695 rtx base, offset;
2697 if (GET_CODE (XEXP (orig, 0)) == PLUS
2698 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
2699 return orig;
2701 if (reg == 0)
2703 if (no_new_pseudos)
2704 abort ();
2705 else
2706 reg = gen_reg_rtx (Pmode);
2709 if (GET_CODE (XEXP (orig, 0)) == PLUS)
2711 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
2712 offset = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
2713 base == reg ? 0 : reg);
2715 else
2716 abort ();
2718 if (GET_CODE (offset) == CONST_INT)
2720 /* The base register doesn't really matter, we only want to
2721 test the index for the appropriate mode. */
2722 if (!arm_legitimate_index_p (mode, offset, 0))
2724 if (!no_new_pseudos)
2725 offset = force_reg (Pmode, offset);
2726 else
2727 abort ();
2730 if (GET_CODE (offset) == CONST_INT)
2731 return plus_constant (base, INTVAL (offset));
2734 if (GET_MODE_SIZE (mode) > 4
2735 && (GET_MODE_CLASS (mode) == MODE_INT
2736 || TARGET_SOFT_FLOAT))
2738 emit_insn (gen_addsi3 (reg, base, offset));
2739 return reg;
2742 return gen_rtx_PLUS (Pmode, base, offset);
2745 return orig;
2748 /* Generate code to load the PIC register. PROLOGUE is true if
2749 called from arm_expand_prologue (in which case we want the
2750 generated insns at the start of the function); false if called
2751 by an exception receiver that needs the PIC register reloaded
2752 (in which case the insns are just dumped at the current location). */
2753 void
2754 arm_finalize_pic (int prologue ATTRIBUTE_UNUSED)
2756 #ifndef AOF_ASSEMBLER
2757 rtx l1, pic_tmp, pic_tmp2, seq, pic_rtx;
2758 rtx global_offset_table;
2760 if (current_function_uses_pic_offset_table == 0 || TARGET_SINGLE_PIC_BASE)
2761 return;
2763 if (!flag_pic)
2764 abort ();
2766 start_sequence ();
2767 l1 = gen_label_rtx ();
2769 global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
2770 /* On the ARM the PC register contains 'dot + 8' at the time of the
2771 addition, on the Thumb it is 'dot + 4'. */
2772 pic_tmp = plus_constant (gen_rtx_LABEL_REF (Pmode, l1), TARGET_ARM ? 8 : 4);
2773 if (GOT_PCREL)
2774 pic_tmp2 = gen_rtx_CONST (VOIDmode,
2775 gen_rtx_PLUS (Pmode, global_offset_table, pc_rtx));
2776 else
2777 pic_tmp2 = gen_rtx_CONST (VOIDmode, global_offset_table);
2779 pic_rtx = gen_rtx_CONST (Pmode, gen_rtx_MINUS (Pmode, pic_tmp2, pic_tmp));
2781 if (TARGET_ARM)
2783 emit_insn (gen_pic_load_addr_arm (pic_offset_table_rtx, pic_rtx));
2784 emit_insn (gen_pic_add_dot_plus_eight (pic_offset_table_rtx, l1));
2786 else
2788 emit_insn (gen_pic_load_addr_thumb (pic_offset_table_rtx, pic_rtx));
2789 emit_insn (gen_pic_add_dot_plus_four (pic_offset_table_rtx, l1));
2792 seq = get_insns ();
2793 end_sequence ();
2794 if (prologue)
2795 emit_insn_after (seq, get_insns ());
2796 else
2797 emit_insn (seq);
2799 /* Need to emit this whether or not we obey regdecls,
2800 since setjmp/longjmp can cause life info to screw up. */
2801 emit_insn (gen_rtx_USE (VOIDmode, pic_offset_table_rtx));
2802 #endif /* AOF_ASSEMBLER */
2805 /* Return nonzero if X is valid as an ARM state addressing register. */
2806 static int
2807 arm_address_register_rtx_p (rtx x, int strict_p)
2809 int regno;
2811 if (GET_CODE (x) != REG)
2812 return 0;
2814 regno = REGNO (x);
2816 if (strict_p)
2817 return ARM_REGNO_OK_FOR_BASE_P (regno);
2819 return (regno <= LAST_ARM_REGNUM
2820 || regno >= FIRST_PSEUDO_REGISTER
2821 || regno == FRAME_POINTER_REGNUM
2822 || regno == ARG_POINTER_REGNUM);
2825 /* Return nonzero if X is a valid ARM state address operand. */
2827 arm_legitimate_address_p (enum machine_mode mode, rtx x, int strict_p)
2829 if (arm_address_register_rtx_p (x, strict_p))
2830 return 1;
2832 else if (GET_CODE (x) == POST_INC || GET_CODE (x) == PRE_DEC)
2833 return arm_address_register_rtx_p (XEXP (x, 0), strict_p);
2835 else if ((GET_CODE (x) == POST_MODIFY || GET_CODE (x) == PRE_MODIFY)
2836 && GET_MODE_SIZE (mode) <= 4
2837 && arm_address_register_rtx_p (XEXP (x, 0), strict_p)
2838 && GET_CODE (XEXP (x, 1)) == PLUS
2839 && XEXP (XEXP (x, 1), 0) == XEXP (x, 0))
2840 return arm_legitimate_index_p (mode, XEXP (XEXP (x, 1), 1), strict_p);
2842 /* After reload constants split into minipools will have addresses
2843 from a LABEL_REF. */
2844 else if (reload_completed
2845 && (GET_CODE (x) == LABEL_REF
2846 || (GET_CODE (x) == CONST
2847 && GET_CODE (XEXP (x, 0)) == PLUS
2848 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
2849 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
2850 return 1;
2852 else if (mode == TImode)
2853 return 0;
2855 else if (mode == DImode || (TARGET_SOFT_FLOAT && mode == DFmode))
2857 if (GET_CODE (x) == PLUS
2858 && arm_address_register_rtx_p (XEXP (x, 0), strict_p)
2859 && GET_CODE (XEXP (x, 1)) == CONST_INT)
2861 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
2863 if (val == 4 || val == -4 || val == -8)
2864 return 1;
2868 else if (TARGET_HARD_FLOAT && TARGET_VFP && mode == DFmode)
2870 if (GET_CODE (x) == PLUS
2871 && arm_address_register_rtx_p (XEXP (x, 0), strict_p)
2872 && GET_CODE (XEXP (x, 1)) == CONST_INT)
2874 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
2876 /* ??? valid arm offsets are a subset of VFP offsets.
2877 For now only allow this subset. Proper fix is to add an
2878 additional memory constraint for arm address modes.
2879 Alternatively allow full vfp addressing and let
2880 output_move_double fix it up with a sub-optimal sequence. */
2881 if (val == 4 || val == -4 || val == -8)
2882 return 1;
2886 else if (GET_CODE (x) == PLUS)
2888 rtx xop0 = XEXP (x, 0);
2889 rtx xop1 = XEXP (x, 1);
2891 return ((arm_address_register_rtx_p (xop0, strict_p)
2892 && arm_legitimate_index_p (mode, xop1, strict_p))
2893 || (arm_address_register_rtx_p (xop1, strict_p)
2894 && arm_legitimate_index_p (mode, xop0, strict_p)));
2897 #if 0
2898 /* Reload currently can't handle MINUS, so disable this for now */
2899 else if (GET_CODE (x) == MINUS)
2901 rtx xop0 = XEXP (x, 0);
2902 rtx xop1 = XEXP (x, 1);
2904 return (arm_address_register_rtx_p (xop0, strict_p)
2905 && arm_legitimate_index_p (mode, xop1, strict_p));
2907 #endif
2909 else if (GET_MODE_CLASS (mode) != MODE_FLOAT
2910 && GET_CODE (x) == SYMBOL_REF
2911 && CONSTANT_POOL_ADDRESS_P (x)
2912 && ! (flag_pic
2913 && symbol_mentioned_p (get_pool_constant (x))))
2914 return 1;
2916 else if ((GET_CODE (x) == PRE_INC || GET_CODE (x) == POST_DEC)
2917 && (GET_MODE_SIZE (mode) <= 4)
2918 && arm_address_register_rtx_p (XEXP (x, 0), strict_p))
2919 return 1;
2921 return 0;
2924 /* Return nonzero if INDEX is valid for an address index operand in
2925 ARM state. */
2926 static int
2927 arm_legitimate_index_p (enum machine_mode mode, rtx index, int strict_p)
2929 HOST_WIDE_INT range;
2930 enum rtx_code code = GET_CODE (index);
2932 if (TARGET_HARD_FLOAT && TARGET_FPA && GET_MODE_CLASS (mode) == MODE_FLOAT)
2933 return (code == CONST_INT && INTVAL (index) < 1024
2934 && INTVAL (index) > -1024
2935 && (INTVAL (index) & 3) == 0);
2937 if (TARGET_HARD_FLOAT && TARGET_MAVERICK
2938 && (GET_MODE_CLASS (mode) == MODE_FLOAT || mode == DImode))
2939 return (code == CONST_INT
2940 && INTVAL (index) < 255
2941 && INTVAL (index) > -255);
2943 if (arm_address_register_rtx_p (index, strict_p)
2944 && GET_MODE_SIZE (mode) <= 4)
2945 return 1;
2947 if (TARGET_REALLY_IWMMXT && VALID_IWMMXT_REG_MODE (mode))
2948 return (code == CONST_INT
2949 && INTVAL (index) < 256
2950 && INTVAL (index) > -256);
2952 /* XXX What about ldrsb? */
2953 if (GET_MODE_SIZE (mode) <= 4 && code == MULT
2954 && (!arm_arch4 || (mode) != HImode))
2956 rtx xiop0 = XEXP (index, 0);
2957 rtx xiop1 = XEXP (index, 1);
2959 return ((arm_address_register_rtx_p (xiop0, strict_p)
2960 && power_of_two_operand (xiop1, SImode))
2961 || (arm_address_register_rtx_p (xiop1, strict_p)
2962 && power_of_two_operand (xiop0, SImode)));
2965 if (GET_MODE_SIZE (mode) <= 4
2966 && (code == LSHIFTRT || code == ASHIFTRT
2967 || code == ASHIFT || code == ROTATERT)
2968 && (!arm_arch4 || (mode) != HImode))
2970 rtx op = XEXP (index, 1);
2972 return (arm_address_register_rtx_p (XEXP (index, 0), strict_p)
2973 && GET_CODE (op) == CONST_INT
2974 && INTVAL (op) > 0
2975 && INTVAL (op) <= 31);
2978 /* XXX For ARM v4 we may be doing a sign-extend operation during the
2979 load, but that has a restricted addressing range and we are unable
2980 to tell here whether that is the case. To be safe we restrict all
2981 loads to that range. */
2982 if (arm_arch4)
2983 range = (mode == HImode || mode == QImode) ? 256 : 4096;
2984 else
2985 range = (mode == HImode) ? 4095 : 4096;
2987 return (code == CONST_INT
2988 && INTVAL (index) < range
2989 && INTVAL (index) > -range);
2992 /* Return nonzero if X is valid as a Thumb state base register. */
2993 static int
2994 thumb_base_register_rtx_p (rtx x, enum machine_mode mode, int strict_p)
2996 int regno;
2998 if (GET_CODE (x) != REG)
2999 return 0;
3001 regno = REGNO (x);
3003 if (strict_p)
3004 return THUMB_REGNO_MODE_OK_FOR_BASE_P (regno, mode);
3006 return (regno <= LAST_LO_REGNUM
3007 || regno > LAST_VIRTUAL_REGISTER
3008 || regno == FRAME_POINTER_REGNUM
3009 || (GET_MODE_SIZE (mode) >= 4
3010 && (regno == STACK_POINTER_REGNUM
3011 || regno >= FIRST_PSEUDO_REGISTER
3012 || x == hard_frame_pointer_rtx
3013 || x == arg_pointer_rtx)));
3016 /* Return nonzero if x is a legitimate index register. This is the case
3017 for any base register that can access a QImode object. */
3018 inline static int
3019 thumb_index_register_rtx_p (rtx x, int strict_p)
3021 return thumb_base_register_rtx_p (x, QImode, strict_p);
3024 /* Return nonzero if x is a legitimate Thumb-state address.
3026 The AP may be eliminated to either the SP or the FP, so we use the
3027 least common denominator, e.g. SImode, and offsets from 0 to 64.
3029 ??? Verify whether the above is the right approach.
3031 ??? Also, the FP may be eliminated to the SP, so perhaps that
3032 needs special handling also.
3034 ??? Look at how the mips16 port solves this problem. It probably uses
3035 better ways to solve some of these problems.
3037 Although it is not incorrect, we don't accept QImode and HImode
3038 addresses based on the frame pointer or arg pointer until the
3039 reload pass starts. This is so that eliminating such addresses
3040 into stack based ones won't produce impossible code. */
3042 thumb_legitimate_address_p (enum machine_mode mode, rtx x, int strict_p)
3044 /* ??? Not clear if this is right. Experiment. */
3045 if (GET_MODE_SIZE (mode) < 4
3046 && !(reload_in_progress || reload_completed)
3047 && (reg_mentioned_p (frame_pointer_rtx, x)
3048 || reg_mentioned_p (arg_pointer_rtx, x)
3049 || reg_mentioned_p (virtual_incoming_args_rtx, x)
3050 || reg_mentioned_p (virtual_outgoing_args_rtx, x)
3051 || reg_mentioned_p (virtual_stack_dynamic_rtx, x)
3052 || reg_mentioned_p (virtual_stack_vars_rtx, x)))
3053 return 0;
3055 /* Accept any base register. SP only in SImode or larger. */
3056 else if (thumb_base_register_rtx_p (x, mode, strict_p))
3057 return 1;
3059 /* This is PC relative data before arm_reorg runs. */
3060 else if (GET_MODE_SIZE (mode) >= 4 && CONSTANT_P (x)
3061 && GET_CODE (x) == SYMBOL_REF
3062 && CONSTANT_POOL_ADDRESS_P (x) && ! flag_pic)
3063 return 1;
3065 /* This is PC relative data after arm_reorg runs. */
3066 else if (GET_MODE_SIZE (mode) >= 4 && reload_completed
3067 && (GET_CODE (x) == LABEL_REF
3068 || (GET_CODE (x) == CONST
3069 && GET_CODE (XEXP (x, 0)) == PLUS
3070 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
3071 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
3072 return 1;
3074 /* Post-inc indexing only supported for SImode and larger. */
3075 else if (GET_CODE (x) == POST_INC && GET_MODE_SIZE (mode) >= 4
3076 && thumb_index_register_rtx_p (XEXP (x, 0), strict_p))
3077 return 1;
3079 else if (GET_CODE (x) == PLUS)
3081 /* REG+REG address can be any two index registers. */
3082 /* We disallow FRAME+REG addressing since we know that FRAME
3083 will be replaced with STACK, and SP relative addressing only
3084 permits SP+OFFSET. */
3085 if (GET_MODE_SIZE (mode) <= 4
3086 && XEXP (x, 0) != frame_pointer_rtx
3087 && XEXP (x, 1) != frame_pointer_rtx
3088 && thumb_index_register_rtx_p (XEXP (x, 0), strict_p)
3089 && thumb_index_register_rtx_p (XEXP (x, 1), strict_p))
3090 return 1;
3092 /* REG+const has 5-7 bit offset for non-SP registers. */
3093 else if ((thumb_index_register_rtx_p (XEXP (x, 0), strict_p)
3094 || XEXP (x, 0) == arg_pointer_rtx)
3095 && GET_CODE (XEXP (x, 1)) == CONST_INT
3096 && thumb_legitimate_offset_p (mode, INTVAL (XEXP (x, 1))))
3097 return 1;
3099 /* REG+const has 10 bit offset for SP, but only SImode and
3100 larger is supported. */
3101 /* ??? Should probably check for DI/DFmode overflow here
3102 just like GO_IF_LEGITIMATE_OFFSET does. */
3103 else if (GET_CODE (XEXP (x, 0)) == REG
3104 && REGNO (XEXP (x, 0)) == STACK_POINTER_REGNUM
3105 && GET_MODE_SIZE (mode) >= 4
3106 && GET_CODE (XEXP (x, 1)) == CONST_INT
3107 && INTVAL (XEXP (x, 1)) >= 0
3108 && INTVAL (XEXP (x, 1)) + GET_MODE_SIZE (mode) <= 1024
3109 && (INTVAL (XEXP (x, 1)) & 3) == 0)
3110 return 1;
3112 else if (GET_CODE (XEXP (x, 0)) == REG
3113 && REGNO (XEXP (x, 0)) == FRAME_POINTER_REGNUM
3114 && GET_MODE_SIZE (mode) >= 4
3115 && GET_CODE (XEXP (x, 1)) == CONST_INT
3116 && (INTVAL (XEXP (x, 1)) & 3) == 0)
3117 return 1;
3120 else if (GET_MODE_CLASS (mode) != MODE_FLOAT
3121 && GET_MODE_SIZE (mode) == 4
3122 && GET_CODE (x) == SYMBOL_REF
3123 && CONSTANT_POOL_ADDRESS_P (x)
3124 && !(flag_pic
3125 && symbol_mentioned_p (get_pool_constant (x))))
3126 return 1;
3128 return 0;
3131 /* Return nonzero if VAL can be used as an offset in a Thumb-state address
3132 instruction of mode MODE. */
3134 thumb_legitimate_offset_p (enum machine_mode mode, HOST_WIDE_INT val)
3136 switch (GET_MODE_SIZE (mode))
3138 case 1:
3139 return val >= 0 && val < 32;
3141 case 2:
3142 return val >= 0 && val < 64 && (val & 1) == 0;
3144 default:
3145 return (val >= 0
3146 && (val + GET_MODE_SIZE (mode)) <= 128
3147 && (val & 3) == 0);
3151 /* Try machine-dependent ways of modifying an illegitimate address
3152 to be legitimate. If we find one, return the new, valid address. */
3154 arm_legitimize_address (rtx x, rtx orig_x, enum machine_mode mode)
3156 if (GET_CODE (x) == PLUS)
3158 rtx xop0 = XEXP (x, 0);
3159 rtx xop1 = XEXP (x, 1);
3161 if (CONSTANT_P (xop0) && !symbol_mentioned_p (xop0))
3162 xop0 = force_reg (SImode, xop0);
3164 if (CONSTANT_P (xop1) && !symbol_mentioned_p (xop1))
3165 xop1 = force_reg (SImode, xop1);
3167 if (ARM_BASE_REGISTER_RTX_P (xop0)
3168 && GET_CODE (xop1) == CONST_INT)
3170 HOST_WIDE_INT n, low_n;
3171 rtx base_reg, val;
3172 n = INTVAL (xop1);
3174 /* VFP addressing modes actually allow greater offsets, but for
3175 now we just stick with the lowest common denominator. */
3176 if (mode == DImode
3177 || ((TARGET_SOFT_FLOAT || TARGET_VFP) && mode == DFmode))
3179 low_n = n & 0x0f;
3180 n &= ~0x0f;
3181 if (low_n > 4)
3183 n += 16;
3184 low_n -= 16;
3187 else
3189 low_n = ((mode) == TImode ? 0
3190 : n >= 0 ? (n & 0xfff) : -((-n) & 0xfff));
3191 n -= low_n;
3194 base_reg = gen_reg_rtx (SImode);
3195 val = force_operand (gen_rtx_PLUS (SImode, xop0,
3196 GEN_INT (n)), NULL_RTX);
3197 emit_move_insn (base_reg, val);
3198 x = (low_n == 0 ? base_reg
3199 : gen_rtx_PLUS (SImode, base_reg, GEN_INT (low_n)));
3201 else if (xop0 != XEXP (x, 0) || xop1 != XEXP (x, 1))
3202 x = gen_rtx_PLUS (SImode, xop0, xop1);
3205 /* XXX We don't allow MINUS any more -- see comment in
3206 arm_legitimate_address_p (). */
3207 else if (GET_CODE (x) == MINUS)
3209 rtx xop0 = XEXP (x, 0);
3210 rtx xop1 = XEXP (x, 1);
3212 if (CONSTANT_P (xop0))
3213 xop0 = force_reg (SImode, xop0);
3215 if (CONSTANT_P (xop1) && ! symbol_mentioned_p (xop1))
3216 xop1 = force_reg (SImode, xop1);
3218 if (xop0 != XEXP (x, 0) || xop1 != XEXP (x, 1))
3219 x = gen_rtx_MINUS (SImode, xop0, xop1);
3222 if (flag_pic)
3224 /* We need to find and carefully transform any SYMBOL and LABEL
3225 references; so go back to the original address expression. */
3226 rtx new_x = legitimize_pic_address (orig_x, mode, NULL_RTX);
3228 if (new_x != orig_x)
3229 x = new_x;
3232 return x;
3236 /* Try machine-dependent ways of modifying an illegitimate Thumb address
3237 to be legitimate. If we find one, return the new, valid address. */
3239 thumb_legitimize_address (rtx x, rtx orig_x, enum machine_mode mode)
3241 if (GET_CODE (x) == PLUS
3242 && GET_CODE (XEXP (x, 1)) == CONST_INT
3243 && (INTVAL (XEXP (x, 1)) >= 32 * GET_MODE_SIZE (mode)
3244 || INTVAL (XEXP (x, 1)) < 0))
3246 rtx xop0 = XEXP (x, 0);
3247 rtx xop1 = XEXP (x, 1);
3248 HOST_WIDE_INT offset = INTVAL (xop1);
3250 /* Try and fold the offset into a biasing of the base register and
3251 then offsetting that. Don't do this when optimizing for space
3252 since it can cause too many CSEs. */
3253 if (optimize_size && offset >= 0
3254 && offset < 256 + 31 * GET_MODE_SIZE (mode))
3256 HOST_WIDE_INT delta;
3258 if (offset >= 256)
3259 delta = offset - (256 - GET_MODE_SIZE (mode));
3260 else if (offset < 32 * GET_MODE_SIZE (mode) + 8)
3261 delta = 31 * GET_MODE_SIZE (mode);
3262 else
3263 delta = offset & (~31 * GET_MODE_SIZE (mode));
3265 xop0 = force_operand (plus_constant (xop0, offset - delta),
3266 NULL_RTX);
3267 x = plus_constant (xop0, delta);
3269 else if (offset < 0 && offset > -256)
3270 /* Small negative offsets are best done with a subtract before the
3271 dereference, forcing these into a register normally takes two
3272 instructions. */
3273 x = force_operand (x, NULL_RTX);
3274 else
3276 /* For the remaining cases, force the constant into a register. */
3277 xop1 = force_reg (SImode, xop1);
3278 x = gen_rtx_PLUS (SImode, xop0, xop1);
3281 else if (GET_CODE (x) == PLUS
3282 && s_register_operand (XEXP (x, 1), SImode)
3283 && !s_register_operand (XEXP (x, 0), SImode))
3285 rtx xop0 = force_operand (XEXP (x, 0), NULL_RTX);
3287 x = gen_rtx_PLUS (SImode, xop0, XEXP (x, 1));
3290 if (flag_pic)
3292 /* We need to find and carefully transform any SYMBOL and LABEL
3293 references; so go back to the original address expression. */
3294 rtx new_x = legitimize_pic_address (orig_x, mode, NULL_RTX);
3296 if (new_x != orig_x)
3297 x = new_x;
3300 return x;
3305 #define REG_OR_SUBREG_REG(X) \
3306 (GET_CODE (X) == REG \
3307 || (GET_CODE (X) == SUBREG && GET_CODE (SUBREG_REG (X)) == REG))
3309 #define REG_OR_SUBREG_RTX(X) \
3310 (GET_CODE (X) == REG ? (X) : SUBREG_REG (X))
3312 #ifndef COSTS_N_INSNS
3313 #define COSTS_N_INSNS(N) ((N) * 4 - 2)
3314 #endif
3315 static inline int
3316 thumb_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer)
3318 enum machine_mode mode = GET_MODE (x);
3320 switch (code)
3322 case ASHIFT:
3323 case ASHIFTRT:
3324 case LSHIFTRT:
3325 case ROTATERT:
3326 case PLUS:
3327 case MINUS:
3328 case COMPARE:
3329 case NEG:
3330 case NOT:
3331 return COSTS_N_INSNS (1);
3333 case MULT:
3334 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
3336 int cycles = 0;
3337 unsigned HOST_WIDE_INT i = INTVAL (XEXP (x, 1));
3339 while (i)
3341 i >>= 2;
3342 cycles++;
3344 return COSTS_N_INSNS (2) + cycles;
3346 return COSTS_N_INSNS (1) + 16;
3348 case SET:
3349 return (COSTS_N_INSNS (1)
3350 + 4 * ((GET_CODE (SET_SRC (x)) == MEM)
3351 + GET_CODE (SET_DEST (x)) == MEM));
3353 case CONST_INT:
3354 if (outer == SET)
3356 if ((unsigned HOST_WIDE_INT) INTVAL (x) < 256)
3357 return 0;
3358 if (thumb_shiftable_const (INTVAL (x)))
3359 return COSTS_N_INSNS (2);
3360 return COSTS_N_INSNS (3);
3362 else if ((outer == PLUS || outer == COMPARE)
3363 && INTVAL (x) < 256 && INTVAL (x) > -256)
3364 return 0;
3365 else if (outer == AND
3366 && INTVAL (x) < 256 && INTVAL (x) >= -256)
3367 return COSTS_N_INSNS (1);
3368 else if (outer == ASHIFT || outer == ASHIFTRT
3369 || outer == LSHIFTRT)
3370 return 0;
3371 return COSTS_N_INSNS (2);
3373 case CONST:
3374 case CONST_DOUBLE:
3375 case LABEL_REF:
3376 case SYMBOL_REF:
3377 return COSTS_N_INSNS (3);
3379 case UDIV:
3380 case UMOD:
3381 case DIV:
3382 case MOD:
3383 return 100;
3385 case TRUNCATE:
3386 return 99;
3388 case AND:
3389 case XOR:
3390 case IOR:
3391 /* XXX guess. */
3392 return 8;
3394 case ADDRESSOF:
3395 case MEM:
3396 /* XXX another guess. */
3397 /* Memory costs quite a lot for the first word, but subsequent words
3398 load at the equivalent of a single insn each. */
3399 return (10 + 4 * ((GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD)
3400 + ((GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
3401 ? 4 : 0));
3403 case IF_THEN_ELSE:
3404 /* XXX a guess. */
3405 if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
3406 return 14;
3407 return 2;
3409 case ZERO_EXTEND:
3410 /* XXX still guessing. */
3411 switch (GET_MODE (XEXP (x, 0)))
3413 case QImode:
3414 return (1 + (mode == DImode ? 4 : 0)
3415 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3417 case HImode:
3418 return (4 + (mode == DImode ? 4 : 0)
3419 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3421 case SImode:
3422 return (1 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3424 default:
3425 return 99;
3428 default:
3429 return 99;
3434 /* Worker routine for arm_rtx_costs. */
3435 static inline int
3436 arm_rtx_costs_1 (rtx x, enum rtx_code code, enum rtx_code outer)
3438 enum machine_mode mode = GET_MODE (x);
3439 enum rtx_code subcode;
3440 int extra_cost;
3442 switch (code)
3444 case MEM:
3445 /* Memory costs quite a lot for the first word, but subsequent words
3446 load at the equivalent of a single insn each. */
3447 return (10 + 4 * ((GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD)
3448 + (GET_CODE (x) == SYMBOL_REF
3449 && CONSTANT_POOL_ADDRESS_P (x) ? 4 : 0));
3451 case DIV:
3452 case MOD:
3453 case UDIV:
3454 case UMOD:
3455 return optimize_size ? COSTS_N_INSNS (2) : 100;
3457 case ROTATE:
3458 if (mode == SImode && GET_CODE (XEXP (x, 1)) == REG)
3459 return 4;
3460 /* Fall through */
3461 case ROTATERT:
3462 if (mode != SImode)
3463 return 8;
3464 /* Fall through */
3465 case ASHIFT: case LSHIFTRT: case ASHIFTRT:
3466 if (mode == DImode)
3467 return (8 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : 8)
3468 + ((GET_CODE (XEXP (x, 0)) == REG
3469 || (GET_CODE (XEXP (x, 0)) == SUBREG
3470 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG))
3471 ? 0 : 8));
3472 return (1 + ((GET_CODE (XEXP (x, 0)) == REG
3473 || (GET_CODE (XEXP (x, 0)) == SUBREG
3474 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG))
3475 ? 0 : 4)
3476 + ((GET_CODE (XEXP (x, 1)) == REG
3477 || (GET_CODE (XEXP (x, 1)) == SUBREG
3478 && GET_CODE (SUBREG_REG (XEXP (x, 1))) == REG)
3479 || (GET_CODE (XEXP (x, 1)) == CONST_INT))
3480 ? 0 : 4));
3482 case MINUS:
3483 if (mode == DImode)
3484 return (4 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 8)
3485 + ((REG_OR_SUBREG_REG (XEXP (x, 0))
3486 || (GET_CODE (XEXP (x, 0)) == CONST_INT
3487 && const_ok_for_arm (INTVAL (XEXP (x, 0)))))
3488 ? 0 : 8));
3490 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
3491 return (2 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
3492 || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
3493 && arm_const_double_rtx (XEXP (x, 1))))
3494 ? 0 : 8)
3495 + ((REG_OR_SUBREG_REG (XEXP (x, 0))
3496 || (GET_CODE (XEXP (x, 0)) == CONST_DOUBLE
3497 && arm_const_double_rtx (XEXP (x, 0))))
3498 ? 0 : 8));
3500 if (((GET_CODE (XEXP (x, 0)) == CONST_INT
3501 && const_ok_for_arm (INTVAL (XEXP (x, 0)))
3502 && REG_OR_SUBREG_REG (XEXP (x, 1))))
3503 || (((subcode = GET_CODE (XEXP (x, 1))) == ASHIFT
3504 || subcode == ASHIFTRT || subcode == LSHIFTRT
3505 || subcode == ROTATE || subcode == ROTATERT
3506 || (subcode == MULT
3507 && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
3508 && ((INTVAL (XEXP (XEXP (x, 1), 1)) &
3509 (INTVAL (XEXP (XEXP (x, 1), 1)) - 1)) == 0)))
3510 && REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 0))
3511 && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 1))
3512 || GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT)
3513 && REG_OR_SUBREG_REG (XEXP (x, 0))))
3514 return 1;
3515 /* Fall through */
3517 case PLUS:
3518 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
3519 return (2 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8)
3520 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
3521 || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
3522 && arm_const_double_rtx (XEXP (x, 1))))
3523 ? 0 : 8));
3525 /* Fall through */
3526 case AND: case XOR: case IOR:
3527 extra_cost = 0;
3529 /* Normally the frame registers will be spilt into reg+const during
3530 reload, so it is a bad idea to combine them with other instructions,
3531 since then they might not be moved outside of loops. As a compromise
3532 we allow integration with ops that have a constant as their second
3533 operand. */
3534 if ((REG_OR_SUBREG_REG (XEXP (x, 0))
3535 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0)))
3536 && GET_CODE (XEXP (x, 1)) != CONST_INT)
3537 || (REG_OR_SUBREG_REG (XEXP (x, 0))
3538 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0)))))
3539 extra_cost = 4;
3541 if (mode == DImode)
3542 return (4 + extra_cost + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8)
3543 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
3544 || (GET_CODE (XEXP (x, 1)) == CONST_INT
3545 && const_ok_for_op (INTVAL (XEXP (x, 1)), code)))
3546 ? 0 : 8));
3548 if (REG_OR_SUBREG_REG (XEXP (x, 0)))
3549 return (1 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : extra_cost)
3550 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
3551 || (GET_CODE (XEXP (x, 1)) == CONST_INT
3552 && const_ok_for_op (INTVAL (XEXP (x, 1)), code)))
3553 ? 0 : 4));
3555 else if (REG_OR_SUBREG_REG (XEXP (x, 1)))
3556 return (1 + extra_cost
3557 + ((((subcode = GET_CODE (XEXP (x, 0))) == ASHIFT
3558 || subcode == LSHIFTRT || subcode == ASHIFTRT
3559 || subcode == ROTATE || subcode == ROTATERT
3560 || (subcode == MULT
3561 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
3562 && ((INTVAL (XEXP (XEXP (x, 0), 1)) &
3563 (INTVAL (XEXP (XEXP (x, 0), 1)) - 1)) == 0)))
3564 && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 0)))
3565 && ((REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 1)))
3566 || GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT))
3567 ? 0 : 4));
3569 return 8;
3571 case MULT:
3572 /* This should have been handled by the CPU specific routines. */
3573 abort ();
3575 case TRUNCATE:
3576 if (arm_arch3m && mode == SImode
3577 && GET_CODE (XEXP (x, 0)) == LSHIFTRT
3578 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
3579 && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0))
3580 == GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)))
3581 && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == ZERO_EXTEND
3582 || GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == SIGN_EXTEND))
3583 return 8;
3584 return 99;
3586 case NEG:
3587 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
3588 return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 6);
3589 /* Fall through */
3590 case NOT:
3591 if (mode == DImode)
3592 return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4);
3594 return 1 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4);
3596 case IF_THEN_ELSE:
3597 if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
3598 return 14;
3599 return 2;
3601 case COMPARE:
3602 return 1;
3604 case ABS:
3605 return 4 + (mode == DImode ? 4 : 0);
3607 case SIGN_EXTEND:
3608 if (GET_MODE (XEXP (x, 0)) == QImode)
3609 return (4 + (mode == DImode ? 4 : 0)
3610 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3611 /* Fall through */
3612 case ZERO_EXTEND:
3613 switch (GET_MODE (XEXP (x, 0)))
3615 case QImode:
3616 return (1 + (mode == DImode ? 4 : 0)
3617 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3619 case HImode:
3620 return (4 + (mode == DImode ? 4 : 0)
3621 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3623 case SImode:
3624 return (1 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3626 case V8QImode:
3627 case V4HImode:
3628 case V2SImode:
3629 case V4QImode:
3630 case V2HImode:
3631 return 1;
3633 default:
3634 break;
3636 abort ();
3638 case CONST_INT:
3639 if (const_ok_for_arm (INTVAL (x)))
3640 return outer == SET ? 2 : -1;
3641 else if (outer == AND
3642 && const_ok_for_arm (~INTVAL (x)))
3643 return -1;
3644 else if ((outer == COMPARE
3645 || outer == PLUS || outer == MINUS)
3646 && const_ok_for_arm (-INTVAL (x)))
3647 return -1;
3648 else
3649 return 5;
3651 case CONST:
3652 case LABEL_REF:
3653 case SYMBOL_REF:
3654 return 6;
3656 case CONST_DOUBLE:
3657 if (arm_const_double_rtx (x))
3658 return outer == SET ? 2 : -1;
3659 else if ((outer == COMPARE || outer == PLUS)
3660 && neg_const_double_rtx_ok_for_fpa (x))
3661 return -1;
3662 return 7;
3664 default:
3665 return 99;
3669 /* RTX costs for cores with a slow MUL implementation. */
3671 static bool
3672 arm_slowmul_rtx_costs (rtx x, int code, int outer_code, int *total)
3674 enum machine_mode mode = GET_MODE (x);
3676 if (TARGET_THUMB)
3678 *total = thumb_rtx_costs (x, code, outer_code);
3679 return true;
3682 switch (code)
3684 case MULT:
3685 if (GET_MODE_CLASS (mode) == MODE_FLOAT
3686 || mode == DImode)
3688 *total = 30;
3689 return true;
3692 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
3694 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
3695 & (unsigned HOST_WIDE_INT) 0xffffffff);
3696 int cost, const_ok = const_ok_for_arm (i);
3697 int j, booth_unit_size;
3699 /* Tune as appropriate. */
3700 cost = const_ok ? 4 : 8;
3701 booth_unit_size = 2;
3702 for (j = 0; i && j < 32; j += booth_unit_size)
3704 i >>= booth_unit_size;
3705 cost += 2;
3708 *total = cost;
3709 return true;
3712 *total = 30 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
3713 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
3714 return true;
3716 default:
3717 *total = arm_rtx_costs_1 (x, code, outer_code);
3718 return true;
3723 /* RTX cost for cores with a fast multiply unit (M variants). */
3725 static bool
3726 arm_fastmul_rtx_costs (rtx x, int code, int outer_code, int *total)
3728 enum machine_mode mode = GET_MODE (x);
3730 if (TARGET_THUMB)
3732 *total = thumb_rtx_costs (x, code, outer_code);
3733 return true;
3736 switch (code)
3738 case MULT:
3739 /* There is no point basing this on the tuning, since it is always the
3740 fast variant if it exists at all. */
3741 if (mode == DImode
3742 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
3743 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
3744 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
3746 *total = 8;
3747 return true;
3751 if (GET_MODE_CLASS (mode) == MODE_FLOAT
3752 || mode == DImode)
3754 *total = 30;
3755 return true;
3758 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
3760 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
3761 & (unsigned HOST_WIDE_INT) 0xffffffff);
3762 int cost, const_ok = const_ok_for_arm (i);
3763 int j, booth_unit_size;
3765 /* Tune as appropriate. */
3766 cost = const_ok ? 4 : 8;
3767 booth_unit_size = 8;
3768 for (j = 0; i && j < 32; j += booth_unit_size)
3770 i >>= booth_unit_size;
3771 cost += 2;
3774 *total = cost;
3775 return true;
3778 *total = 8 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
3779 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
3780 return true;
3782 default:
3783 *total = arm_rtx_costs_1 (x, code, outer_code);
3784 return true;
3789 /* RTX cost for XScale CPUs. */
3791 static bool
3792 arm_xscale_rtx_costs (rtx x, int code, int outer_code, int *total)
3794 enum machine_mode mode = GET_MODE (x);
3796 if (TARGET_THUMB)
3798 *total = thumb_rtx_costs (x, code, outer_code);
3799 return true;
3802 switch (code)
3804 case MULT:
3805 /* There is no point basing this on the tuning, since it is always the
3806 fast variant if it exists at all. */
3807 if (mode == DImode
3808 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
3809 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
3810 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
3812 *total = 8;
3813 return true;
3817 if (GET_MODE_CLASS (mode) == MODE_FLOAT
3818 || mode == DImode)
3820 *total = 30;
3821 return true;
3824 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
3826 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
3827 & (unsigned HOST_WIDE_INT) 0xffffffff);
3828 int cost, const_ok = const_ok_for_arm (i);
3829 unsigned HOST_WIDE_INT masked_const;
3831 /* The cost will be related to two insns.
3832 First a load of the constant (MOV or LDR), then a multiply. */
3833 cost = 2;
3834 if (! const_ok)
3835 cost += 1; /* LDR is probably more expensive because
3836 of longer result latency. */
3837 masked_const = i & 0xffff8000;
3838 if (masked_const != 0 && masked_const != 0xffff8000)
3840 masked_const = i & 0xf8000000;
3841 if (masked_const == 0 || masked_const == 0xf8000000)
3842 cost += 1;
3843 else
3844 cost += 2;
3846 *total = cost;
3847 return true;
3850 *total = 8 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
3851 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
3852 return true;
3854 default:
3855 *total = arm_rtx_costs_1 (x, code, outer_code);
3856 return true;
3861 /* RTX costs for 9e (and later) cores. */
3863 static bool
3864 arm_9e_rtx_costs (rtx x, int code, int outer_code, int *total)
3866 enum machine_mode mode = GET_MODE (x);
3867 int nonreg_cost;
3868 int cost;
3870 if (TARGET_THUMB)
3872 switch (code)
3874 case MULT:
3875 *total = COSTS_N_INSNS (3);
3876 return true;
3878 default:
3879 *total = thumb_rtx_costs (x, code, outer_code);
3880 return true;
3884 switch (code)
3886 case MULT:
3887 /* There is no point basing this on the tuning, since it is always the
3888 fast variant if it exists at all. */
3889 if (mode == DImode
3890 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
3891 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
3892 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
3894 *total = 3;
3895 return true;
3899 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
3901 *total = 30;
3902 return true;
3904 if (mode == DImode)
3906 cost = 7;
3907 nonreg_cost = 8;
3909 else
3911 cost = 2;
3912 nonreg_cost = 4;
3916 *total = cost + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : nonreg_cost)
3917 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : nonreg_cost);
3918 return true;
3920 default:
3921 *total = arm_rtx_costs_1 (x, code, outer_code);
3922 return true;
3925 /* All address computations that can be done are free, but rtx cost returns
3926 the same for practically all of them. So we weight the different types
3927 of address here in the order (most pref first):
3928 PRE/POST_INC/DEC, SHIFT or NON-INT sum, INT sum, REG, MEM or LABEL. */
3929 static inline int
3930 arm_arm_address_cost (rtx x)
3932 enum rtx_code c = GET_CODE (x);
3934 if (c == PRE_INC || c == PRE_DEC || c == POST_INC || c == POST_DEC)
3935 return 0;
3936 if (c == MEM || c == LABEL_REF || c == SYMBOL_REF)
3937 return 10;
3939 if (c == PLUS || c == MINUS)
3941 char cl0 = GET_RTX_CLASS (GET_CODE (XEXP (x, 0)));
3942 char cl1 = GET_RTX_CLASS (GET_CODE (XEXP (x, 1)));
3944 if (GET_CODE (XEXP (x, 0)) == CONST_INT)
3945 return 2;
3947 if (cl0 == '2' || cl0 == 'c' || cl1 == '2' || cl1 == 'c')
3948 return 3;
3950 return 4;
3953 return 6;
3956 static inline int
3957 arm_thumb_address_cost (rtx x)
3959 enum rtx_code c = GET_CODE (x);
3961 if (c == REG)
3962 return 1;
3963 if (c == PLUS
3964 && GET_CODE (XEXP (x, 0)) == REG
3965 && GET_CODE (XEXP (x, 1)) == CONST_INT)
3966 return 1;
3968 return 2;
3971 static int
3972 arm_address_cost (rtx x)
3974 return TARGET_ARM ? arm_arm_address_cost (x) : arm_thumb_address_cost (x);
3977 static int
3978 arm_use_dfa_pipeline_interface (void)
3980 return true;
3983 static int
3984 arm_adjust_cost (rtx insn, rtx link, rtx dep, int cost)
3986 rtx i_pat, d_pat;
3988 /* Some true dependencies can have a higher cost depending
3989 on precisely how certain input operands are used. */
3990 if (arm_tune_xscale
3991 && REG_NOTE_KIND (link) == 0
3992 && recog_memoized (insn) >= 0
3993 && recog_memoized (dep) >= 0)
3995 int shift_opnum = get_attr_shift (insn);
3996 enum attr_type attr_type = get_attr_type (dep);
3998 /* If nonzero, SHIFT_OPNUM contains the operand number of a shifted
3999 operand for INSN. If we have a shifted input operand and the
4000 instruction we depend on is another ALU instruction, then we may
4001 have to account for an additional stall. */
4002 if (shift_opnum != 0
4003 && (attr_type == TYPE_ALU_SHIFT || attr_type == TYPE_ALU_SHIFT_REG))
4005 rtx shifted_operand;
4006 int opno;
4008 /* Get the shifted operand. */
4009 extract_insn (insn);
4010 shifted_operand = recog_data.operand[shift_opnum];
4012 /* Iterate over all the operands in DEP. If we write an operand
4013 that overlaps with SHIFTED_OPERAND, then we have increase the
4014 cost of this dependency. */
4015 extract_insn (dep);
4016 preprocess_constraints ();
4017 for (opno = 0; opno < recog_data.n_operands; opno++)
4019 /* We can ignore strict inputs. */
4020 if (recog_data.operand_type[opno] == OP_IN)
4021 continue;
4023 if (reg_overlap_mentioned_p (recog_data.operand[opno],
4024 shifted_operand))
4025 return 2;
4030 /* XXX This is not strictly true for the FPA. */
4031 if (REG_NOTE_KIND (link) == REG_DEP_ANTI
4032 || REG_NOTE_KIND (link) == REG_DEP_OUTPUT)
4033 return 0;
4035 /* Call insns don't incur a stall, even if they follow a load. */
4036 if (REG_NOTE_KIND (link) == 0
4037 && GET_CODE (insn) == CALL_INSN)
4038 return 1;
4040 if ((i_pat = single_set (insn)) != NULL
4041 && GET_CODE (SET_SRC (i_pat)) == MEM
4042 && (d_pat = single_set (dep)) != NULL
4043 && GET_CODE (SET_DEST (d_pat)) == MEM)
4045 rtx src_mem = XEXP (SET_SRC (i_pat), 0);
4046 /* This is a load after a store, there is no conflict if the load reads
4047 from a cached area. Assume that loads from the stack, and from the
4048 constant pool are cached, and that others will miss. This is a
4049 hack. */
4051 if ((GET_CODE (src_mem) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (src_mem))
4052 || reg_mentioned_p (stack_pointer_rtx, src_mem)
4053 || reg_mentioned_p (frame_pointer_rtx, src_mem)
4054 || reg_mentioned_p (hard_frame_pointer_rtx, src_mem))
4055 return 1;
4058 return cost;
4061 static int fp_consts_inited = 0;
4063 /* Only zero is valid for VFP. Other values are also valid for FPA. */
4064 static const char * const strings_fp[8] =
4066 "0", "1", "2", "3",
4067 "4", "5", "0.5", "10"
4070 static REAL_VALUE_TYPE values_fp[8];
4072 static void
4073 init_fp_table (void)
4075 int i;
4076 REAL_VALUE_TYPE r;
4078 if (TARGET_VFP)
4079 fp_consts_inited = 1;
4080 else
4081 fp_consts_inited = 8;
4083 for (i = 0; i < fp_consts_inited; i++)
4085 r = REAL_VALUE_ATOF (strings_fp[i], DFmode);
4086 values_fp[i] = r;
4090 /* Return TRUE if rtx X is a valid immediate FP constant. */
4092 arm_const_double_rtx (rtx x)
4094 REAL_VALUE_TYPE r;
4095 int i;
4097 if (!fp_consts_inited)
4098 init_fp_table ();
4100 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
4101 if (REAL_VALUE_MINUS_ZERO (r))
4102 return 0;
4104 for (i = 0; i < fp_consts_inited; i++)
4105 if (REAL_VALUES_EQUAL (r, values_fp[i]))
4106 return 1;
4108 return 0;
4111 /* Return TRUE if rtx X is a valid immediate FPA constant. */
4113 neg_const_double_rtx_ok_for_fpa (rtx x)
4115 REAL_VALUE_TYPE r;
4116 int i;
4118 if (!fp_consts_inited)
4119 init_fp_table ();
4121 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
4122 r = REAL_VALUE_NEGATE (r);
4123 if (REAL_VALUE_MINUS_ZERO (r))
4124 return 0;
4126 for (i = 0; i < 8; i++)
4127 if (REAL_VALUES_EQUAL (r, values_fp[i]))
4128 return 1;
4130 return 0;
4133 /* Predicates for `match_operand' and `match_operator'. */
4135 /* s_register_operand is the same as register_operand, but it doesn't accept
4136 (SUBREG (MEM)...).
4138 This function exists because at the time it was put in it led to better
4139 code. SUBREG(MEM) always needs a reload in the places where
4140 s_register_operand is used, and this seemed to lead to excessive
4141 reloading. */
4143 s_register_operand (rtx op, enum machine_mode mode)
4145 if (GET_MODE (op) != mode && mode != VOIDmode)
4146 return 0;
4148 if (GET_CODE (op) == SUBREG)
4149 op = SUBREG_REG (op);
4151 /* We don't consider registers whose class is NO_REGS
4152 to be a register operand. */
4153 /* XXX might have to check for lo regs only for thumb ??? */
4154 return (GET_CODE (op) == REG
4155 && (REGNO (op) >= FIRST_PSEUDO_REGISTER
4156 || REGNO_REG_CLASS (REGNO (op)) != NO_REGS));
4159 /* A hard register operand (even before reload. */
4161 arm_hard_register_operand (rtx op, enum machine_mode mode)
4163 if (GET_MODE (op) != mode && mode != VOIDmode)
4164 return 0;
4166 return (GET_CODE (op) == REG
4167 && REGNO (op) < FIRST_PSEUDO_REGISTER);
4170 /* An arm register operand. */
4172 arm_general_register_operand (rtx op, enum machine_mode mode)
4174 if (GET_MODE (op) != mode && mode != VOIDmode)
4175 return 0;
4177 if (GET_CODE (op) == SUBREG)
4178 op = SUBREG_REG (op);
4180 return (GET_CODE (op) == REG
4181 && (REGNO (op) <= LAST_ARM_REGNUM
4182 || REGNO (op) >= FIRST_PSEUDO_REGISTER));
4185 /* Only accept reg, subreg(reg), const_int. */
4187 reg_or_int_operand (rtx op, enum machine_mode mode)
4189 if (GET_CODE (op) == CONST_INT)
4190 return 1;
4192 if (GET_MODE (op) != mode && mode != VOIDmode)
4193 return 0;
4195 if (GET_CODE (op) == SUBREG)
4196 op = SUBREG_REG (op);
4198 /* We don't consider registers whose class is NO_REGS
4199 to be a register operand. */
4200 return (GET_CODE (op) == REG
4201 && (REGNO (op) >= FIRST_PSEUDO_REGISTER
4202 || REGNO_REG_CLASS (REGNO (op)) != NO_REGS));
4205 /* Return 1 if OP is an item in memory, given that we are in reload. */
4207 arm_reload_memory_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
4209 int regno = true_regnum (op);
4211 return (!CONSTANT_P (op)
4212 && (regno == -1
4213 || (GET_CODE (op) == REG
4214 && REGNO (op) >= FIRST_PSEUDO_REGISTER)));
4217 /* Return 1 if OP is a valid memory address, but not valid for a signed byte
4218 memory access (architecture V4).
4219 MODE is QImode if called when computing constraints, or VOIDmode when
4220 emitting patterns. In this latter case we cannot use memory_operand()
4221 because it will fail on badly formed MEMs, which is precisely what we are
4222 trying to catch. */
4224 bad_signed_byte_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
4226 if (GET_CODE (op) != MEM)
4227 return 0;
4229 op = XEXP (op, 0);
4231 /* A sum of anything more complex than reg + reg or reg + const is bad. */
4232 if ((GET_CODE (op) == PLUS || GET_CODE (op) == MINUS)
4233 && (!s_register_operand (XEXP (op, 0), VOIDmode)
4234 || (!s_register_operand (XEXP (op, 1), VOIDmode)
4235 && GET_CODE (XEXP (op, 1)) != CONST_INT)))
4236 return 1;
4238 /* Big constants are also bad. */
4239 if (GET_CODE (op) == PLUS && GET_CODE (XEXP (op, 1)) == CONST_INT
4240 && (INTVAL (XEXP (op, 1)) > 0xff
4241 || -INTVAL (XEXP (op, 1)) > 0xff))
4242 return 1;
4244 /* Everything else is good, or can will automatically be made so. */
4245 return 0;
4248 /* Return TRUE for valid operands for the rhs of an ARM instruction. */
4250 arm_rhs_operand (rtx op, enum machine_mode mode)
4252 return (s_register_operand (op, mode)
4253 || (GET_CODE (op) == CONST_INT && const_ok_for_arm (INTVAL (op))));
4256 /* Return TRUE for valid operands for the
4257 rhs of an ARM instruction, or a load. */
4259 arm_rhsm_operand (rtx op, enum machine_mode mode)
4261 return (s_register_operand (op, mode)
4262 || (GET_CODE (op) == CONST_INT && const_ok_for_arm (INTVAL (op)))
4263 || memory_operand (op, mode));
4266 /* Return TRUE for valid operands for the rhs of an ARM instruction, or if a
4267 constant that is valid when negated. */
4269 arm_add_operand (rtx op, enum machine_mode mode)
4271 if (TARGET_THUMB)
4272 return thumb_cmp_operand (op, mode);
4274 return (s_register_operand (op, mode)
4275 || (GET_CODE (op) == CONST_INT
4276 && (const_ok_for_arm (INTVAL (op))
4277 || const_ok_for_arm (-INTVAL (op)))));
4280 /* Return TRUE for valid ARM constants (or when valid if negated). */
4282 arm_addimm_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
4284 return (GET_CODE (op) == CONST_INT
4285 && (const_ok_for_arm (INTVAL (op))
4286 || const_ok_for_arm (-INTVAL (op))));
4290 arm_not_operand (rtx op, enum machine_mode mode)
4292 return (s_register_operand (op, mode)
4293 || (GET_CODE (op) == CONST_INT
4294 && (const_ok_for_arm (INTVAL (op))
4295 || const_ok_for_arm (~INTVAL (op)))));
4298 /* Return TRUE if the operand is a memory reference which contains an
4299 offsettable address. */
4301 offsettable_memory_operand (rtx op, enum machine_mode mode)
4303 if (mode == VOIDmode)
4304 mode = GET_MODE (op);
4306 return (mode == GET_MODE (op)
4307 && GET_CODE (op) == MEM
4308 && offsettable_address_p (reload_completed | reload_in_progress,
4309 mode, XEXP (op, 0)));
4312 /* Return TRUE if the operand is a memory reference which is, or can be
4313 made word aligned by adjusting the offset. */
4315 alignable_memory_operand (rtx op, enum machine_mode mode)
4317 rtx reg;
4319 if (mode == VOIDmode)
4320 mode = GET_MODE (op);
4322 if (mode != GET_MODE (op) || GET_CODE (op) != MEM)
4323 return 0;
4325 op = XEXP (op, 0);
4327 return ((GET_CODE (reg = op) == REG
4328 || (GET_CODE (op) == SUBREG
4329 && GET_CODE (reg = SUBREG_REG (op)) == REG)
4330 || (GET_CODE (op) == PLUS
4331 && GET_CODE (XEXP (op, 1)) == CONST_INT
4332 && (GET_CODE (reg = XEXP (op, 0)) == REG
4333 || (GET_CODE (XEXP (op, 0)) == SUBREG
4334 && GET_CODE (reg = SUBREG_REG (XEXP (op, 0))) == REG))))
4335 && REGNO_POINTER_ALIGN (REGNO (reg)) >= 32);
4338 /* Similar to s_register_operand, but does not allow hard integer
4339 registers. */
4341 f_register_operand (rtx op, enum machine_mode mode)
4343 if (GET_MODE (op) != mode && mode != VOIDmode)
4344 return 0;
4346 if (GET_CODE (op) == SUBREG)
4347 op = SUBREG_REG (op);
4349 /* We don't consider registers whose class is NO_REGS
4350 to be a register operand. */
4351 return (GET_CODE (op) == REG
4352 && (REGNO (op) >= FIRST_PSEUDO_REGISTER
4353 || REGNO_REG_CLASS (REGNO (op)) == FPA_REGS));
4356 /* Return TRUE for valid operands for the rhs of an floating point insns.
4357 Allows regs or certain consts on FPA, just regs for everything else. */
4359 arm_float_rhs_operand (rtx op, enum machine_mode mode)
4361 if (s_register_operand (op, mode))
4362 return TRUE;
4364 if (GET_MODE (op) != mode && mode != VOIDmode)
4365 return FALSE;
4367 if (TARGET_FPA && GET_CODE (op) == CONST_DOUBLE)
4368 return arm_const_double_rtx (op);
4370 return FALSE;
4374 arm_float_add_operand (rtx op, enum machine_mode mode)
4376 if (s_register_operand (op, mode))
4377 return TRUE;
4379 if (GET_MODE (op) != mode && mode != VOIDmode)
4380 return FALSE;
4382 if (TARGET_FPA && GET_CODE (op) == CONST_DOUBLE)
4383 return (arm_const_double_rtx (op)
4384 || neg_const_double_rtx_ok_for_fpa (op));
4386 return FALSE;
4390 /* Return TRUE if OP is suitable for the rhs of a floating point comparison.
4391 Depends which fpu we are targeting. */
4394 arm_float_compare_operand (rtx op, enum machine_mode mode)
4396 if (TARGET_VFP)
4397 return vfp_compare_operand (op, mode);
4398 else
4399 return arm_float_rhs_operand (op, mode);
4403 /* Return nonzero if OP is a valid Cirrus memory address pattern. */
4405 cirrus_memory_offset (rtx op)
4407 /* Reject eliminable registers. */
4408 if (! (reload_in_progress || reload_completed)
4409 && ( reg_mentioned_p (frame_pointer_rtx, op)
4410 || reg_mentioned_p (arg_pointer_rtx, op)
4411 || reg_mentioned_p (virtual_incoming_args_rtx, op)
4412 || reg_mentioned_p (virtual_outgoing_args_rtx, op)
4413 || reg_mentioned_p (virtual_stack_dynamic_rtx, op)
4414 || reg_mentioned_p (virtual_stack_vars_rtx, op)))
4415 return 0;
4417 if (GET_CODE (op) == MEM)
4419 rtx ind;
4421 ind = XEXP (op, 0);
4423 /* Match: (mem (reg)). */
4424 if (GET_CODE (ind) == REG)
4425 return 1;
4427 /* Match:
4428 (mem (plus (reg)
4429 (const))). */
4430 if (GET_CODE (ind) == PLUS
4431 && GET_CODE (XEXP (ind, 0)) == REG
4432 && REG_MODE_OK_FOR_BASE_P (XEXP (ind, 0), VOIDmode)
4433 && GET_CODE (XEXP (ind, 1)) == CONST_INT)
4434 return 1;
4437 return 0;
4440 /* Return nonzero if OP is a Cirrus or general register. */
4442 cirrus_register_operand (rtx op, enum machine_mode mode)
4444 if (GET_MODE (op) != mode && mode != VOIDmode)
4445 return FALSE;
4447 if (GET_CODE (op) == SUBREG)
4448 op = SUBREG_REG (op);
4450 return (GET_CODE (op) == REG
4451 && (REGNO_REG_CLASS (REGNO (op)) == CIRRUS_REGS
4452 || REGNO_REG_CLASS (REGNO (op)) == GENERAL_REGS));
4455 /* Return nonzero if OP is a cirrus FP register. */
4457 cirrus_fp_register (rtx op, enum machine_mode mode)
4459 if (GET_MODE (op) != mode && mode != VOIDmode)
4460 return FALSE;
4462 if (GET_CODE (op) == SUBREG)
4463 op = SUBREG_REG (op);
4465 return (GET_CODE (op) == REG
4466 && (REGNO (op) >= FIRST_PSEUDO_REGISTER
4467 || REGNO_REG_CLASS (REGNO (op)) == CIRRUS_REGS));
4470 /* Return nonzero if OP is a 6bit constant (0..63). */
4472 cirrus_shift_const (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
4474 return (GET_CODE (op) == CONST_INT
4475 && INTVAL (op) >= 0
4476 && INTVAL (op) < 64);
4480 /* Return TRUE if OP is a valid VFP memory address pattern. */
4481 /* Copied from cirrus_memory_offset but with restricted offset range. */
4484 vfp_mem_operand (rtx op)
4486 /* Reject eliminable registers. */
4488 if (! (reload_in_progress || reload_completed)
4489 && ( reg_mentioned_p (frame_pointer_rtx, op)
4490 || reg_mentioned_p (arg_pointer_rtx, op)
4491 || reg_mentioned_p (virtual_incoming_args_rtx, op)
4492 || reg_mentioned_p (virtual_outgoing_args_rtx, op)
4493 || reg_mentioned_p (virtual_stack_dynamic_rtx, op)
4494 || reg_mentioned_p (virtual_stack_vars_rtx, op)))
4495 return FALSE;
4497 /* Constants are converted into offsets from labels. */
4498 if (GET_CODE (op) == MEM)
4500 rtx ind;
4502 ind = XEXP (op, 0);
4504 if (reload_completed
4505 && (GET_CODE (ind) == LABEL_REF
4506 || (GET_CODE (ind) == CONST
4507 && GET_CODE (XEXP (ind, 0)) == PLUS
4508 && GET_CODE (XEXP (XEXP (ind, 0), 0)) == LABEL_REF
4509 && GET_CODE (XEXP (XEXP (ind, 0), 1)) == CONST_INT)))
4510 return TRUE;
4512 /* Match: (mem (reg)). */
4513 if (GET_CODE (ind) == REG)
4514 return arm_address_register_rtx_p (ind, 0);
4516 /* Match:
4517 (mem (plus (reg)
4518 (const))). */
4519 if (GET_CODE (ind) == PLUS
4520 && GET_CODE (XEXP (ind, 0)) == REG
4521 && REG_MODE_OK_FOR_BASE_P (XEXP (ind, 0), VOIDmode)
4522 && GET_CODE (XEXP (ind, 1)) == CONST_INT
4523 && INTVAL (XEXP (ind, 1)) > -1024
4524 && INTVAL (XEXP (ind, 1)) < 1024)
4525 return TRUE;
4528 return FALSE;
4532 /* Return TRUE if OP is a REG or constant zero. */
4534 vfp_compare_operand (rtx op, enum machine_mode mode)
4536 if (s_register_operand (op, mode))
4537 return TRUE;
4539 return (GET_CODE (op) == CONST_DOUBLE
4540 && arm_const_double_rtx (op));
4544 /* Return GENERAL_REGS if a scratch register required to reload x to/from
4545 VFP registers. Otherwise return NO_REGS. */
4547 enum reg_class
4548 vfp_secondary_reload_class (enum machine_mode mode, rtx x)
4550 if (vfp_mem_operand (x) || s_register_operand (x, mode))
4551 return NO_REGS;
4553 return GENERAL_REGS;
4557 /* Returns TRUE if INSN is an "LDR REG, ADDR" instruction.
4558 Use by the Cirrus Maverick code which has to workaround
4559 a hardware bug triggered by such instructions. */
4560 static bool
4561 arm_memory_load_p (rtx insn)
4563 rtx body, lhs, rhs;;
4565 if (insn == NULL_RTX || GET_CODE (insn) != INSN)
4566 return false;
4568 body = PATTERN (insn);
4570 if (GET_CODE (body) != SET)
4571 return false;
4573 lhs = XEXP (body, 0);
4574 rhs = XEXP (body, 1);
4576 lhs = REG_OR_SUBREG_RTX (lhs);
4578 /* If the destination is not a general purpose
4579 register we do not have to worry. */
4580 if (GET_CODE (lhs) != REG
4581 || REGNO_REG_CLASS (REGNO (lhs)) != GENERAL_REGS)
4582 return false;
4584 /* As well as loads from memory we also have to react
4585 to loads of invalid constants which will be turned
4586 into loads from the minipool. */
4587 return (GET_CODE (rhs) == MEM
4588 || GET_CODE (rhs) == SYMBOL_REF
4589 || note_invalid_constants (insn, -1, false));
4592 /* Return TRUE if INSN is a Cirrus instruction. */
4593 static bool
4594 arm_cirrus_insn_p (rtx insn)
4596 enum attr_cirrus attr;
4598 /* get_attr aborts on USE and CLOBBER. */
4599 if (!insn
4600 || GET_CODE (insn) != INSN
4601 || GET_CODE (PATTERN (insn)) == USE
4602 || GET_CODE (PATTERN (insn)) == CLOBBER)
4603 return 0;
4605 attr = get_attr_cirrus (insn);
4607 return attr != CIRRUS_NOT;
4610 /* Cirrus reorg for invalid instruction combinations. */
4611 static void
4612 cirrus_reorg (rtx first)
4614 enum attr_cirrus attr;
4615 rtx body = PATTERN (first);
4616 rtx t;
4617 int nops;
4619 /* Any branch must be followed by 2 non Cirrus instructions. */
4620 if (GET_CODE (first) == JUMP_INSN && GET_CODE (body) != RETURN)
4622 nops = 0;
4623 t = next_nonnote_insn (first);
4625 if (arm_cirrus_insn_p (t))
4626 ++ nops;
4628 if (arm_cirrus_insn_p (next_nonnote_insn (t)))
4629 ++ nops;
4631 while (nops --)
4632 emit_insn_after (gen_nop (), first);
4634 return;
4637 /* (float (blah)) is in parallel with a clobber. */
4638 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) > 0)
4639 body = XVECEXP (body, 0, 0);
4641 if (GET_CODE (body) == SET)
4643 rtx lhs = XEXP (body, 0), rhs = XEXP (body, 1);
4645 /* cfldrd, cfldr64, cfstrd, cfstr64 must
4646 be followed by a non Cirrus insn. */
4647 if (get_attr_cirrus (first) == CIRRUS_DOUBLE)
4649 if (arm_cirrus_insn_p (next_nonnote_insn (first)))
4650 emit_insn_after (gen_nop (), first);
4652 return;
4654 else if (arm_memory_load_p (first))
4656 unsigned int arm_regno;
4658 /* Any ldr/cfmvdlr, ldr/cfmvdhr, ldr/cfmvsr, ldr/cfmv64lr,
4659 ldr/cfmv64hr combination where the Rd field is the same
4660 in both instructions must be split with a non Cirrus
4661 insn. Example:
4663 ldr r0, blah
4665 cfmvsr mvf0, r0. */
4667 /* Get Arm register number for ldr insn. */
4668 if (GET_CODE (lhs) == REG)
4669 arm_regno = REGNO (lhs);
4670 else if (GET_CODE (rhs) == REG)
4671 arm_regno = REGNO (rhs);
4672 else
4673 abort ();
4675 /* Next insn. */
4676 first = next_nonnote_insn (first);
4678 if (! arm_cirrus_insn_p (first))
4679 return;
4681 body = PATTERN (first);
4683 /* (float (blah)) is in parallel with a clobber. */
4684 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0))
4685 body = XVECEXP (body, 0, 0);
4687 if (GET_CODE (body) == FLOAT)
4688 body = XEXP (body, 0);
4690 if (get_attr_cirrus (first) == CIRRUS_MOVE
4691 && GET_CODE (XEXP (body, 1)) == REG
4692 && arm_regno == REGNO (XEXP (body, 1)))
4693 emit_insn_after (gen_nop (), first);
4695 return;
4699 /* get_attr aborts on USE and CLOBBER. */
4700 if (!first
4701 || GET_CODE (first) != INSN
4702 || GET_CODE (PATTERN (first)) == USE
4703 || GET_CODE (PATTERN (first)) == CLOBBER)
4704 return;
4706 attr = get_attr_cirrus (first);
4708 /* Any coprocessor compare instruction (cfcmps, cfcmpd, ...)
4709 must be followed by a non-coprocessor instruction. */
4710 if (attr == CIRRUS_COMPARE)
4712 nops = 0;
4714 t = next_nonnote_insn (first);
4716 if (arm_cirrus_insn_p (t))
4717 ++ nops;
4719 if (arm_cirrus_insn_p (next_nonnote_insn (t)))
4720 ++ nops;
4722 while (nops --)
4723 emit_insn_after (gen_nop (), first);
4725 return;
4729 /* Return nonzero if OP is a constant power of two. */
4731 power_of_two_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
4733 if (GET_CODE (op) == CONST_INT)
4735 HOST_WIDE_INT value = INTVAL (op);
4737 return value != 0 && (value & (value - 1)) == 0;
4740 return FALSE;
4743 /* Return TRUE for a valid operand of a DImode operation.
4744 Either: REG, SUBREG, CONST_DOUBLE or MEM(DImode_address).
4745 Note that this disallows MEM(REG+REG), but allows
4746 MEM(PRE/POST_INC/DEC(REG)). */
4748 di_operand (rtx op, enum machine_mode mode)
4750 if (s_register_operand (op, mode))
4751 return TRUE;
4753 if (mode != VOIDmode && GET_MODE (op) != VOIDmode && GET_MODE (op) != DImode)
4754 return FALSE;
4756 if (GET_CODE (op) == SUBREG)
4757 op = SUBREG_REG (op);
4759 switch (GET_CODE (op))
4761 case CONST_DOUBLE:
4762 case CONST_INT:
4763 return TRUE;
4765 case MEM:
4766 return memory_address_p (DImode, XEXP (op, 0));
4768 default:
4769 return FALSE;
4773 /* Like di_operand, but don't accept constants. */
4775 nonimmediate_di_operand (rtx op, enum machine_mode mode)
4777 if (s_register_operand (op, mode))
4778 return TRUE;
4780 if (mode != VOIDmode && GET_MODE (op) != VOIDmode && GET_MODE (op) != DImode)
4781 return FALSE;
4783 if (GET_CODE (op) == SUBREG)
4784 op = SUBREG_REG (op);
4786 if (GET_CODE (op) == MEM)
4787 return memory_address_p (DImode, XEXP (op, 0));
4789 return FALSE;
4792 /* Return TRUE for a valid operand of a DFmode operation when soft-float.
4793 Either: REG, SUBREG, CONST_DOUBLE or MEM(DImode_address).
4794 Note that this disallows MEM(REG+REG), but allows
4795 MEM(PRE/POST_INC/DEC(REG)). */
4797 soft_df_operand (rtx op, enum machine_mode mode)
4799 if (s_register_operand (op, mode))
4800 return TRUE;
4802 if (mode != VOIDmode && GET_MODE (op) != mode)
4803 return FALSE;
4805 if (GET_CODE (op) == SUBREG && CONSTANT_P (SUBREG_REG (op)))
4806 return FALSE;
4808 if (GET_CODE (op) == SUBREG)
4809 op = SUBREG_REG (op);
4811 switch (GET_CODE (op))
4813 case CONST_DOUBLE:
4814 return TRUE;
4816 case MEM:
4817 return memory_address_p (DFmode, XEXP (op, 0));
4819 default:
4820 return FALSE;
4824 /* Like soft_df_operand, but don't accept constants. */
4826 nonimmediate_soft_df_operand (rtx op, enum machine_mode mode)
4828 if (s_register_operand (op, mode))
4829 return TRUE;
4831 if (mode != VOIDmode && GET_MODE (op) != mode)
4832 return FALSE;
4834 if (GET_CODE (op) == SUBREG)
4835 op = SUBREG_REG (op);
4837 if (GET_CODE (op) == MEM)
4838 return memory_address_p (DFmode, XEXP (op, 0));
4839 return FALSE;
4842 /* Return TRUE for valid index operands. */
4844 index_operand (rtx op, enum machine_mode mode)
4846 return (s_register_operand (op, mode)
4847 || (immediate_operand (op, mode)
4848 && (GET_CODE (op) != CONST_INT
4849 || (INTVAL (op) < 4096 && INTVAL (op) > -4096))));
4852 /* Return TRUE for valid shifts by a constant. This also accepts any
4853 power of two on the (somewhat overly relaxed) assumption that the
4854 shift operator in this case was a mult. */
4856 const_shift_operand (rtx op, enum machine_mode mode)
4858 return (power_of_two_operand (op, mode)
4859 || (immediate_operand (op, mode)
4860 && (GET_CODE (op) != CONST_INT
4861 || (INTVAL (op) < 32 && INTVAL (op) > 0))));
4864 /* Return TRUE for arithmetic operators which can be combined with a multiply
4865 (shift). */
4867 shiftable_operator (rtx x, enum machine_mode mode)
4869 enum rtx_code code;
4871 if (GET_MODE (x) != mode)
4872 return FALSE;
4874 code = GET_CODE (x);
4876 return (code == PLUS || code == MINUS
4877 || code == IOR || code == XOR || code == AND);
4880 /* Return TRUE for binary logical operators. */
4882 logical_binary_operator (rtx x, enum machine_mode mode)
4884 enum rtx_code code;
4886 if (GET_MODE (x) != mode)
4887 return FALSE;
4889 code = GET_CODE (x);
4891 return (code == IOR || code == XOR || code == AND);
4894 /* Return TRUE for shift operators. */
4896 shift_operator (rtx x,enum machine_mode mode)
4898 enum rtx_code code;
4900 if (GET_MODE (x) != mode)
4901 return FALSE;
4903 code = GET_CODE (x);
4905 if (code == MULT)
4906 return power_of_two_operand (XEXP (x, 1), mode);
4908 return (code == ASHIFT || code == ASHIFTRT || code == LSHIFTRT
4909 || code == ROTATERT);
4912 /* Return TRUE if x is EQ or NE. */
4914 equality_operator (rtx x, enum machine_mode mode ATTRIBUTE_UNUSED)
4916 return GET_CODE (x) == EQ || GET_CODE (x) == NE;
4919 /* Return TRUE if x is a comparison operator other than LTGT or UNEQ. */
4921 arm_comparison_operator (rtx x, enum machine_mode mode)
4923 return (comparison_operator (x, mode)
4924 && GET_CODE (x) != LTGT
4925 && GET_CODE (x) != UNEQ);
4928 /* Return TRUE for SMIN SMAX UMIN UMAX operators. */
4930 minmax_operator (rtx x, enum machine_mode mode)
4932 enum rtx_code code = GET_CODE (x);
4934 if (GET_MODE (x) != mode)
4935 return FALSE;
4937 return code == SMIN || code == SMAX || code == UMIN || code == UMAX;
4940 /* Return TRUE if this is the condition code register, if we aren't given
4941 a mode, accept any class CCmode register. */
4943 cc_register (rtx x, enum machine_mode mode)
4945 if (mode == VOIDmode)
4947 mode = GET_MODE (x);
4949 if (GET_MODE_CLASS (mode) != MODE_CC)
4950 return FALSE;
4953 if ( GET_MODE (x) == mode
4954 && GET_CODE (x) == REG
4955 && REGNO (x) == CC_REGNUM)
4956 return TRUE;
4958 return FALSE;
4961 /* Return TRUE if this is the condition code register, if we aren't given
4962 a mode, accept any class CCmode register which indicates a dominance
4963 expression. */
4965 dominant_cc_register (rtx x, enum machine_mode mode)
4967 if (mode == VOIDmode)
4969 mode = GET_MODE (x);
4971 if (GET_MODE_CLASS (mode) != MODE_CC)
4972 return FALSE;
4975 if (mode != CC_DNEmode && mode != CC_DEQmode
4976 && mode != CC_DLEmode && mode != CC_DLTmode
4977 && mode != CC_DGEmode && mode != CC_DGTmode
4978 && mode != CC_DLEUmode && mode != CC_DLTUmode
4979 && mode != CC_DGEUmode && mode != CC_DGTUmode)
4980 return FALSE;
4982 return cc_register (x, mode);
4985 /* Return TRUE if X references a SYMBOL_REF. */
4987 symbol_mentioned_p (rtx x)
4989 const char * fmt;
4990 int i;
4992 if (GET_CODE (x) == SYMBOL_REF)
4993 return 1;
4995 fmt = GET_RTX_FORMAT (GET_CODE (x));
4997 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
4999 if (fmt[i] == 'E')
5001 int j;
5003 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5004 if (symbol_mentioned_p (XVECEXP (x, i, j)))
5005 return 1;
5007 else if (fmt[i] == 'e' && symbol_mentioned_p (XEXP (x, i)))
5008 return 1;
5011 return 0;
5014 /* Return TRUE if X references a LABEL_REF. */
5016 label_mentioned_p (rtx x)
5018 const char * fmt;
5019 int i;
5021 if (GET_CODE (x) == LABEL_REF)
5022 return 1;
5024 fmt = GET_RTX_FORMAT (GET_CODE (x));
5025 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5027 if (fmt[i] == 'E')
5029 int j;
5031 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5032 if (label_mentioned_p (XVECEXP (x, i, j)))
5033 return 1;
5035 else if (fmt[i] == 'e' && label_mentioned_p (XEXP (x, i)))
5036 return 1;
5039 return 0;
5042 enum rtx_code
5043 minmax_code (rtx x)
5045 enum rtx_code code = GET_CODE (x);
5047 if (code == SMAX)
5048 return GE;
5049 else if (code == SMIN)
5050 return LE;
5051 else if (code == UMIN)
5052 return LEU;
5053 else if (code == UMAX)
5054 return GEU;
5056 abort ();
5059 /* Return 1 if memory locations are adjacent. */
5061 adjacent_mem_locations (rtx a, rtx b)
5063 if ((GET_CODE (XEXP (a, 0)) == REG
5064 || (GET_CODE (XEXP (a, 0)) == PLUS
5065 && GET_CODE (XEXP (XEXP (a, 0), 1)) == CONST_INT))
5066 && (GET_CODE (XEXP (b, 0)) == REG
5067 || (GET_CODE (XEXP (b, 0)) == PLUS
5068 && GET_CODE (XEXP (XEXP (b, 0), 1)) == CONST_INT)))
5070 int val0 = 0, val1 = 0;
5071 int reg0, reg1;
5073 if (GET_CODE (XEXP (a, 0)) == PLUS)
5075 reg0 = REGNO (XEXP (XEXP (a, 0), 0));
5076 val0 = INTVAL (XEXP (XEXP (a, 0), 1));
5078 else
5079 reg0 = REGNO (XEXP (a, 0));
5081 if (GET_CODE (XEXP (b, 0)) == PLUS)
5083 reg1 = REGNO (XEXP (XEXP (b, 0), 0));
5084 val1 = INTVAL (XEXP (XEXP (b, 0), 1));
5086 else
5087 reg1 = REGNO (XEXP (b, 0));
5089 /* Don't accept any offset that will require multiple
5090 instructions to handle, since this would cause the
5091 arith_adjacentmem pattern to output an overlong sequence. */
5092 if (!const_ok_for_op (PLUS, val0) || !const_ok_for_op (PLUS, val1))
5093 return 0;
5095 return (reg0 == reg1) && ((val1 - val0) == 4 || (val0 - val1) == 4);
5097 return 0;
5100 /* Return 1 if OP is a load multiple operation. It is known to be
5101 parallel and the first section will be tested. */
5103 load_multiple_operation (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
5105 HOST_WIDE_INT count = XVECLEN (op, 0);
5106 int dest_regno;
5107 rtx src_addr;
5108 HOST_WIDE_INT i = 1, base = 0;
5109 rtx elt;
5111 if (count <= 1
5112 || GET_CODE (XVECEXP (op, 0, 0)) != SET)
5113 return 0;
5115 /* Check to see if this might be a write-back. */
5116 if (GET_CODE (SET_SRC (elt = XVECEXP (op, 0, 0))) == PLUS)
5118 i++;
5119 base = 1;
5121 /* Now check it more carefully. */
5122 if (GET_CODE (SET_DEST (elt)) != REG
5123 || GET_CODE (XEXP (SET_SRC (elt), 0)) != REG
5124 || GET_CODE (XEXP (SET_SRC (elt), 1)) != CONST_INT
5125 || INTVAL (XEXP (SET_SRC (elt), 1)) != (count - 1) * 4)
5126 return 0;
5129 /* Perform a quick check so we don't blow up below. */
5130 if (count <= i
5131 || GET_CODE (XVECEXP (op, 0, i - 1)) != SET
5132 || GET_CODE (SET_DEST (XVECEXP (op, 0, i - 1))) != REG
5133 || GET_CODE (SET_SRC (XVECEXP (op, 0, i - 1))) != MEM)
5134 return 0;
5136 dest_regno = REGNO (SET_DEST (XVECEXP (op, 0, i - 1)));
5137 src_addr = XEXP (SET_SRC (XVECEXP (op, 0, i - 1)), 0);
5139 for (; i < count; i++)
5141 elt = XVECEXP (op, 0, i);
5143 if (GET_CODE (elt) != SET
5144 || GET_CODE (SET_DEST (elt)) != REG
5145 || GET_MODE (SET_DEST (elt)) != SImode
5146 || REGNO (SET_DEST (elt)) != (unsigned int)(dest_regno + i - base)
5147 || GET_CODE (SET_SRC (elt)) != MEM
5148 || GET_MODE (SET_SRC (elt)) != SImode
5149 || GET_CODE (XEXP (SET_SRC (elt), 0)) != PLUS
5150 || !rtx_equal_p (XEXP (XEXP (SET_SRC (elt), 0), 0), src_addr)
5151 || GET_CODE (XEXP (XEXP (SET_SRC (elt), 0), 1)) != CONST_INT
5152 || INTVAL (XEXP (XEXP (SET_SRC (elt), 0), 1)) != (i - base) * 4)
5153 return 0;
5156 return 1;
5159 /* Return 1 if OP is a store multiple operation. It is known to be
5160 parallel and the first section will be tested. */
5162 store_multiple_operation (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
5164 HOST_WIDE_INT count = XVECLEN (op, 0);
5165 int src_regno;
5166 rtx dest_addr;
5167 HOST_WIDE_INT i = 1, base = 0;
5168 rtx elt;
5170 if (count <= 1
5171 || GET_CODE (XVECEXP (op, 0, 0)) != SET)
5172 return 0;
5174 /* Check to see if this might be a write-back. */
5175 if (GET_CODE (SET_SRC (elt = XVECEXP (op, 0, 0))) == PLUS)
5177 i++;
5178 base = 1;
5180 /* Now check it more carefully. */
5181 if (GET_CODE (SET_DEST (elt)) != REG
5182 || GET_CODE (XEXP (SET_SRC (elt), 0)) != REG
5183 || GET_CODE (XEXP (SET_SRC (elt), 1)) != CONST_INT
5184 || INTVAL (XEXP (SET_SRC (elt), 1)) != (count - 1) * 4)
5185 return 0;
5188 /* Perform a quick check so we don't blow up below. */
5189 if (count <= i
5190 || GET_CODE (XVECEXP (op, 0, i - 1)) != SET
5191 || GET_CODE (SET_DEST (XVECEXP (op, 0, i - 1))) != MEM
5192 || GET_CODE (SET_SRC (XVECEXP (op, 0, i - 1))) != REG)
5193 return 0;
5195 src_regno = REGNO (SET_SRC (XVECEXP (op, 0, i - 1)));
5196 dest_addr = XEXP (SET_DEST (XVECEXP (op, 0, i - 1)), 0);
5198 for (; i < count; i++)
5200 elt = XVECEXP (op, 0, i);
5202 if (GET_CODE (elt) != SET
5203 || GET_CODE (SET_SRC (elt)) != REG
5204 || GET_MODE (SET_SRC (elt)) != SImode
5205 || REGNO (SET_SRC (elt)) != (unsigned int)(src_regno + i - base)
5206 || GET_CODE (SET_DEST (elt)) != MEM
5207 || GET_MODE (SET_DEST (elt)) != SImode
5208 || GET_CODE (XEXP (SET_DEST (elt), 0)) != PLUS
5209 || !rtx_equal_p (XEXP (XEXP (SET_DEST (elt), 0), 0), dest_addr)
5210 || GET_CODE (XEXP (XEXP (SET_DEST (elt), 0), 1)) != CONST_INT
5211 || INTVAL (XEXP (XEXP (SET_DEST (elt), 0), 1)) != (i - base) * 4)
5212 return 0;
5215 return 1;
5219 load_multiple_sequence (rtx *operands, int nops, int *regs, int *base,
5220 HOST_WIDE_INT *load_offset)
5222 int unsorted_regs[4];
5223 HOST_WIDE_INT unsorted_offsets[4];
5224 int order[4];
5225 int base_reg = -1;
5226 int i;
5228 /* Can only handle 2, 3, or 4 insns at present,
5229 though could be easily extended if required. */
5230 if (nops < 2 || nops > 4)
5231 abort ();
5233 /* Loop over the operands and check that the memory references are
5234 suitable (ie immediate offsets from the same base register). At
5235 the same time, extract the target register, and the memory
5236 offsets. */
5237 for (i = 0; i < nops; i++)
5239 rtx reg;
5240 rtx offset;
5242 /* Convert a subreg of a mem into the mem itself. */
5243 if (GET_CODE (operands[nops + i]) == SUBREG)
5244 operands[nops + i] = alter_subreg (operands + (nops + i));
5246 if (GET_CODE (operands[nops + i]) != MEM)
5247 abort ();
5249 /* Don't reorder volatile memory references; it doesn't seem worth
5250 looking for the case where the order is ok anyway. */
5251 if (MEM_VOLATILE_P (operands[nops + i]))
5252 return 0;
5254 offset = const0_rtx;
5256 if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG
5257 || (GET_CODE (reg) == SUBREG
5258 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5259 || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
5260 && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0))
5261 == REG)
5262 || (GET_CODE (reg) == SUBREG
5263 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5264 && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1))
5265 == CONST_INT)))
5267 if (i == 0)
5269 base_reg = REGNO (reg);
5270 unsorted_regs[0] = (GET_CODE (operands[i]) == REG
5271 ? REGNO (operands[i])
5272 : REGNO (SUBREG_REG (operands[i])));
5273 order[0] = 0;
5275 else
5277 if (base_reg != (int) REGNO (reg))
5278 /* Not addressed from the same base register. */
5279 return 0;
5281 unsorted_regs[i] = (GET_CODE (operands[i]) == REG
5282 ? REGNO (operands[i])
5283 : REGNO (SUBREG_REG (operands[i])));
5284 if (unsorted_regs[i] < unsorted_regs[order[0]])
5285 order[0] = i;
5288 /* If it isn't an integer register, or if it overwrites the
5289 base register but isn't the last insn in the list, then
5290 we can't do this. */
5291 if (unsorted_regs[i] < 0 || unsorted_regs[i] > 14
5292 || (i != nops - 1 && unsorted_regs[i] == base_reg))
5293 return 0;
5295 unsorted_offsets[i] = INTVAL (offset);
5297 else
5298 /* Not a suitable memory address. */
5299 return 0;
5302 /* All the useful information has now been extracted from the
5303 operands into unsorted_regs and unsorted_offsets; additionally,
5304 order[0] has been set to the lowest numbered register in the
5305 list. Sort the registers into order, and check that the memory
5306 offsets are ascending and adjacent. */
5308 for (i = 1; i < nops; i++)
5310 int j;
5312 order[i] = order[i - 1];
5313 for (j = 0; j < nops; j++)
5314 if (unsorted_regs[j] > unsorted_regs[order[i - 1]]
5315 && (order[i] == order[i - 1]
5316 || unsorted_regs[j] < unsorted_regs[order[i]]))
5317 order[i] = j;
5319 /* Have we found a suitable register? if not, one must be used more
5320 than once. */
5321 if (order[i] == order[i - 1])
5322 return 0;
5324 /* Is the memory address adjacent and ascending? */
5325 if (unsorted_offsets[order[i]] != unsorted_offsets[order[i - 1]] + 4)
5326 return 0;
5329 if (base)
5331 *base = base_reg;
5333 for (i = 0; i < nops; i++)
5334 regs[i] = unsorted_regs[order[i]];
5336 *load_offset = unsorted_offsets[order[0]];
5339 if (unsorted_offsets[order[0]] == 0)
5340 return 1; /* ldmia */
5342 if (unsorted_offsets[order[0]] == 4)
5343 return 2; /* ldmib */
5345 if (unsorted_offsets[order[nops - 1]] == 0)
5346 return 3; /* ldmda */
5348 if (unsorted_offsets[order[nops - 1]] == -4)
5349 return 4; /* ldmdb */
5351 /* For ARM8,9 & StrongARM, 2 ldr instructions are faster than an ldm
5352 if the offset isn't small enough. The reason 2 ldrs are faster
5353 is because these ARMs are able to do more than one cache access
5354 in a single cycle. The ARM9 and StrongARM have Harvard caches,
5355 whilst the ARM8 has a double bandwidth cache. This means that
5356 these cores can do both an instruction fetch and a data fetch in
5357 a single cycle, so the trick of calculating the address into a
5358 scratch register (one of the result regs) and then doing a load
5359 multiple actually becomes slower (and no smaller in code size).
5360 That is the transformation
5362 ldr rd1, [rbase + offset]
5363 ldr rd2, [rbase + offset + 4]
5367 add rd1, rbase, offset
5368 ldmia rd1, {rd1, rd2}
5370 produces worse code -- '3 cycles + any stalls on rd2' instead of
5371 '2 cycles + any stalls on rd2'. On ARMs with only one cache
5372 access per cycle, the first sequence could never complete in less
5373 than 6 cycles, whereas the ldm sequence would only take 5 and
5374 would make better use of sequential accesses if not hitting the
5375 cache.
5377 We cheat here and test 'arm_ld_sched' which we currently know to
5378 only be true for the ARM8, ARM9 and StrongARM. If this ever
5379 changes, then the test below needs to be reworked. */
5380 if (nops == 2 && arm_ld_sched)
5381 return 0;
5383 /* Can't do it without setting up the offset, only do this if it takes
5384 no more than one insn. */
5385 return (const_ok_for_arm (unsorted_offsets[order[0]])
5386 || const_ok_for_arm (-unsorted_offsets[order[0]])) ? 5 : 0;
5389 const char *
5390 emit_ldm_seq (rtx *operands, int nops)
5392 int regs[4];
5393 int base_reg;
5394 HOST_WIDE_INT offset;
5395 char buf[100];
5396 int i;
5398 switch (load_multiple_sequence (operands, nops, regs, &base_reg, &offset))
5400 case 1:
5401 strcpy (buf, "ldm%?ia\t");
5402 break;
5404 case 2:
5405 strcpy (buf, "ldm%?ib\t");
5406 break;
5408 case 3:
5409 strcpy (buf, "ldm%?da\t");
5410 break;
5412 case 4:
5413 strcpy (buf, "ldm%?db\t");
5414 break;
5416 case 5:
5417 if (offset >= 0)
5418 sprintf (buf, "add%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX,
5419 reg_names[regs[0]], REGISTER_PREFIX, reg_names[base_reg],
5420 (long) offset);
5421 else
5422 sprintf (buf, "sub%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX,
5423 reg_names[regs[0]], REGISTER_PREFIX, reg_names[base_reg],
5424 (long) -offset);
5425 output_asm_insn (buf, operands);
5426 base_reg = regs[0];
5427 strcpy (buf, "ldm%?ia\t");
5428 break;
5430 default:
5431 abort ();
5434 sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
5435 reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
5437 for (i = 1; i < nops; i++)
5438 sprintf (buf + strlen (buf), ", %s%s", REGISTER_PREFIX,
5439 reg_names[regs[i]]);
5441 strcat (buf, "}\t%@ phole ldm");
5443 output_asm_insn (buf, operands);
5444 return "";
5448 store_multiple_sequence (rtx *operands, int nops, int *regs, int *base,
5449 HOST_WIDE_INT * load_offset)
5451 int unsorted_regs[4];
5452 HOST_WIDE_INT unsorted_offsets[4];
5453 int order[4];
5454 int base_reg = -1;
5455 int i;
5457 /* Can only handle 2, 3, or 4 insns at present, though could be easily
5458 extended if required. */
5459 if (nops < 2 || nops > 4)
5460 abort ();
5462 /* Loop over the operands and check that the memory references are
5463 suitable (ie immediate offsets from the same base register). At
5464 the same time, extract the target register, and the memory
5465 offsets. */
5466 for (i = 0; i < nops; i++)
5468 rtx reg;
5469 rtx offset;
5471 /* Convert a subreg of a mem into the mem itself. */
5472 if (GET_CODE (operands[nops + i]) == SUBREG)
5473 operands[nops + i] = alter_subreg (operands + (nops + i));
5475 if (GET_CODE (operands[nops + i]) != MEM)
5476 abort ();
5478 /* Don't reorder volatile memory references; it doesn't seem worth
5479 looking for the case where the order is ok anyway. */
5480 if (MEM_VOLATILE_P (operands[nops + i]))
5481 return 0;
5483 offset = const0_rtx;
5485 if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG
5486 || (GET_CODE (reg) == SUBREG
5487 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5488 || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
5489 && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0))
5490 == REG)
5491 || (GET_CODE (reg) == SUBREG
5492 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5493 && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1))
5494 == CONST_INT)))
5496 if (i == 0)
5498 base_reg = REGNO (reg);
5499 unsorted_regs[0] = (GET_CODE (operands[i]) == REG
5500 ? REGNO (operands[i])
5501 : REGNO (SUBREG_REG (operands[i])));
5502 order[0] = 0;
5504 else
5506 if (base_reg != (int) REGNO (reg))
5507 /* Not addressed from the same base register. */
5508 return 0;
5510 unsorted_regs[i] = (GET_CODE (operands[i]) == REG
5511 ? REGNO (operands[i])
5512 : REGNO (SUBREG_REG (operands[i])));
5513 if (unsorted_regs[i] < unsorted_regs[order[0]])
5514 order[0] = i;
5517 /* If it isn't an integer register, then we can't do this. */
5518 if (unsorted_regs[i] < 0 || unsorted_regs[i] > 14)
5519 return 0;
5521 unsorted_offsets[i] = INTVAL (offset);
5523 else
5524 /* Not a suitable memory address. */
5525 return 0;
5528 /* All the useful information has now been extracted from the
5529 operands into unsorted_regs and unsorted_offsets; additionally,
5530 order[0] has been set to the lowest numbered register in the
5531 list. Sort the registers into order, and check that the memory
5532 offsets are ascending and adjacent. */
5534 for (i = 1; i < nops; i++)
5536 int j;
5538 order[i] = order[i - 1];
5539 for (j = 0; j < nops; j++)
5540 if (unsorted_regs[j] > unsorted_regs[order[i - 1]]
5541 && (order[i] == order[i - 1]
5542 || unsorted_regs[j] < unsorted_regs[order[i]]))
5543 order[i] = j;
5545 /* Have we found a suitable register? if not, one must be used more
5546 than once. */
5547 if (order[i] == order[i - 1])
5548 return 0;
5550 /* Is the memory address adjacent and ascending? */
5551 if (unsorted_offsets[order[i]] != unsorted_offsets[order[i - 1]] + 4)
5552 return 0;
5555 if (base)
5557 *base = base_reg;
5559 for (i = 0; i < nops; i++)
5560 regs[i] = unsorted_regs[order[i]];
5562 *load_offset = unsorted_offsets[order[0]];
5565 if (unsorted_offsets[order[0]] == 0)
5566 return 1; /* stmia */
5568 if (unsorted_offsets[order[0]] == 4)
5569 return 2; /* stmib */
5571 if (unsorted_offsets[order[nops - 1]] == 0)
5572 return 3; /* stmda */
5574 if (unsorted_offsets[order[nops - 1]] == -4)
5575 return 4; /* stmdb */
5577 return 0;
5580 const char *
5581 emit_stm_seq (rtx *operands, int nops)
5583 int regs[4];
5584 int base_reg;
5585 HOST_WIDE_INT offset;
5586 char buf[100];
5587 int i;
5589 switch (store_multiple_sequence (operands, nops, regs, &base_reg, &offset))
5591 case 1:
5592 strcpy (buf, "stm%?ia\t");
5593 break;
5595 case 2:
5596 strcpy (buf, "stm%?ib\t");
5597 break;
5599 case 3:
5600 strcpy (buf, "stm%?da\t");
5601 break;
5603 case 4:
5604 strcpy (buf, "stm%?db\t");
5605 break;
5607 default:
5608 abort ();
5611 sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
5612 reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
5614 for (i = 1; i < nops; i++)
5615 sprintf (buf + strlen (buf), ", %s%s", REGISTER_PREFIX,
5616 reg_names[regs[i]]);
5618 strcat (buf, "}\t%@ phole stm");
5620 output_asm_insn (buf, operands);
5621 return "";
5625 multi_register_push (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
5627 if (GET_CODE (op) != PARALLEL
5628 || (GET_CODE (XVECEXP (op, 0, 0)) != SET)
5629 || (GET_CODE (SET_SRC (XVECEXP (op, 0, 0))) != UNSPEC)
5630 || (XINT (SET_SRC (XVECEXP (op, 0, 0)), 1) != UNSPEC_PUSH_MULT))
5631 return 0;
5633 return 1;
5636 /* Routines for use in generating RTL. */
5639 arm_gen_load_multiple (int base_regno, int count, rtx from, int up,
5640 int write_back, int unchanging_p, int in_struct_p,
5641 int scalar_p)
5643 int i = 0, j;
5644 rtx result;
5645 int sign = up ? 1 : -1;
5646 rtx mem;
5648 /* XScale has load-store double instructions, but they have stricter
5649 alignment requirements than load-store multiple, so we can not
5650 use them.
5652 For XScale ldm requires 2 + NREGS cycles to complete and blocks
5653 the pipeline until completion.
5655 NREGS CYCLES
5661 An ldr instruction takes 1-3 cycles, but does not block the
5662 pipeline.
5664 NREGS CYCLES
5665 1 1-3
5666 2 2-6
5667 3 3-9
5668 4 4-12
5670 Best case ldr will always win. However, the more ldr instructions
5671 we issue, the less likely we are to be able to schedule them well.
5672 Using ldr instructions also increases code size.
5674 As a compromise, we use ldr for counts of 1 or 2 regs, and ldm
5675 for counts of 3 or 4 regs. */
5676 if (arm_tune_xscale && count <= 2 && ! optimize_size)
5678 rtx seq;
5680 start_sequence ();
5682 for (i = 0; i < count; i++)
5684 mem = gen_rtx_MEM (SImode, plus_constant (from, i * 4 * sign));
5685 RTX_UNCHANGING_P (mem) = unchanging_p;
5686 MEM_IN_STRUCT_P (mem) = in_struct_p;
5687 MEM_SCALAR_P (mem) = scalar_p;
5688 emit_move_insn (gen_rtx_REG (SImode, base_regno + i), mem);
5691 if (write_back)
5692 emit_move_insn (from, plus_constant (from, count * 4 * sign));
5694 seq = get_insns ();
5695 end_sequence ();
5697 return seq;
5700 result = gen_rtx_PARALLEL (VOIDmode,
5701 rtvec_alloc (count + (write_back ? 1 : 0)));
5702 if (write_back)
5704 XVECEXP (result, 0, 0)
5705 = gen_rtx_SET (GET_MODE (from), from,
5706 plus_constant (from, count * 4 * sign));
5707 i = 1;
5708 count++;
5711 for (j = 0; i < count; i++, j++)
5713 mem = gen_rtx_MEM (SImode, plus_constant (from, j * 4 * sign));
5714 RTX_UNCHANGING_P (mem) = unchanging_p;
5715 MEM_IN_STRUCT_P (mem) = in_struct_p;
5716 MEM_SCALAR_P (mem) = scalar_p;
5717 XVECEXP (result, 0, i)
5718 = gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, base_regno + j), mem);
5721 return result;
5725 arm_gen_store_multiple (int base_regno, int count, rtx to, int up,
5726 int write_back, int unchanging_p, int in_struct_p,
5727 int scalar_p)
5729 int i = 0, j;
5730 rtx result;
5731 int sign = up ? 1 : -1;
5732 rtx mem;
5734 /* See arm_gen_load_multiple for discussion of
5735 the pros/cons of ldm/stm usage for XScale. */
5736 if (arm_tune_xscale && count <= 2 && ! optimize_size)
5738 rtx seq;
5740 start_sequence ();
5742 for (i = 0; i < count; i++)
5744 mem = gen_rtx_MEM (SImode, plus_constant (to, i * 4 * sign));
5745 RTX_UNCHANGING_P (mem) = unchanging_p;
5746 MEM_IN_STRUCT_P (mem) = in_struct_p;
5747 MEM_SCALAR_P (mem) = scalar_p;
5748 emit_move_insn (mem, gen_rtx_REG (SImode, base_regno + i));
5751 if (write_back)
5752 emit_move_insn (to, plus_constant (to, count * 4 * sign));
5754 seq = get_insns ();
5755 end_sequence ();
5757 return seq;
5760 result = gen_rtx_PARALLEL (VOIDmode,
5761 rtvec_alloc (count + (write_back ? 1 : 0)));
5762 if (write_back)
5764 XVECEXP (result, 0, 0)
5765 = gen_rtx_SET (GET_MODE (to), to,
5766 plus_constant (to, count * 4 * sign));
5767 i = 1;
5768 count++;
5771 for (j = 0; i < count; i++, j++)
5773 mem = gen_rtx_MEM (SImode, plus_constant (to, j * 4 * sign));
5774 RTX_UNCHANGING_P (mem) = unchanging_p;
5775 MEM_IN_STRUCT_P (mem) = in_struct_p;
5776 MEM_SCALAR_P (mem) = scalar_p;
5778 XVECEXP (result, 0, i)
5779 = gen_rtx_SET (VOIDmode, mem, gen_rtx_REG (SImode, base_regno + j));
5782 return result;
5786 arm_gen_movstrqi (rtx *operands)
5788 HOST_WIDE_INT in_words_to_go, out_words_to_go, last_bytes;
5789 int i;
5790 rtx src, dst;
5791 rtx st_src, st_dst, fin_src, fin_dst;
5792 rtx part_bytes_reg = NULL;
5793 rtx mem;
5794 int dst_unchanging_p, dst_in_struct_p, src_unchanging_p, src_in_struct_p;
5795 int dst_scalar_p, src_scalar_p;
5797 if (GET_CODE (operands[2]) != CONST_INT
5798 || GET_CODE (operands[3]) != CONST_INT
5799 || INTVAL (operands[2]) > 64
5800 || INTVAL (operands[3]) & 3)
5801 return 0;
5803 st_dst = XEXP (operands[0], 0);
5804 st_src = XEXP (operands[1], 0);
5806 dst_unchanging_p = RTX_UNCHANGING_P (operands[0]);
5807 dst_in_struct_p = MEM_IN_STRUCT_P (operands[0]);
5808 dst_scalar_p = MEM_SCALAR_P (operands[0]);
5809 src_unchanging_p = RTX_UNCHANGING_P (operands[1]);
5810 src_in_struct_p = MEM_IN_STRUCT_P (operands[1]);
5811 src_scalar_p = MEM_SCALAR_P (operands[1]);
5813 fin_dst = dst = copy_to_mode_reg (SImode, st_dst);
5814 fin_src = src = copy_to_mode_reg (SImode, st_src);
5816 in_words_to_go = ARM_NUM_INTS (INTVAL (operands[2]));
5817 out_words_to_go = INTVAL (operands[2]) / 4;
5818 last_bytes = INTVAL (operands[2]) & 3;
5820 if (out_words_to_go != in_words_to_go && ((in_words_to_go - 1) & 3) != 0)
5821 part_bytes_reg = gen_rtx_REG (SImode, (in_words_to_go - 1) & 3);
5823 for (i = 0; in_words_to_go >= 2; i+=4)
5825 if (in_words_to_go > 4)
5826 emit_insn (arm_gen_load_multiple (0, 4, src, TRUE, TRUE,
5827 src_unchanging_p,
5828 src_in_struct_p,
5829 src_scalar_p));
5830 else
5831 emit_insn (arm_gen_load_multiple (0, in_words_to_go, src, TRUE,
5832 FALSE, src_unchanging_p,
5833 src_in_struct_p, src_scalar_p));
5835 if (out_words_to_go)
5837 if (out_words_to_go > 4)
5838 emit_insn (arm_gen_store_multiple (0, 4, dst, TRUE, TRUE,
5839 dst_unchanging_p,
5840 dst_in_struct_p,
5841 dst_scalar_p));
5842 else if (out_words_to_go != 1)
5843 emit_insn (arm_gen_store_multiple (0, out_words_to_go,
5844 dst, TRUE,
5845 (last_bytes == 0
5846 ? FALSE : TRUE),
5847 dst_unchanging_p,
5848 dst_in_struct_p,
5849 dst_scalar_p));
5850 else
5852 mem = gen_rtx_MEM (SImode, dst);
5853 RTX_UNCHANGING_P (mem) = dst_unchanging_p;
5854 MEM_IN_STRUCT_P (mem) = dst_in_struct_p;
5855 MEM_SCALAR_P (mem) = dst_scalar_p;
5856 emit_move_insn (mem, gen_rtx_REG (SImode, 0));
5857 if (last_bytes != 0)
5858 emit_insn (gen_addsi3 (dst, dst, GEN_INT (4)));
5862 in_words_to_go -= in_words_to_go < 4 ? in_words_to_go : 4;
5863 out_words_to_go -= out_words_to_go < 4 ? out_words_to_go : 4;
5866 /* OUT_WORDS_TO_GO will be zero here if there are byte stores to do. */
5867 if (out_words_to_go)
5869 rtx sreg;
5871 mem = gen_rtx_MEM (SImode, src);
5872 RTX_UNCHANGING_P (mem) = src_unchanging_p;
5873 MEM_IN_STRUCT_P (mem) = src_in_struct_p;
5874 MEM_SCALAR_P (mem) = src_scalar_p;
5875 emit_move_insn (sreg = gen_reg_rtx (SImode), mem);
5876 emit_move_insn (fin_src = gen_reg_rtx (SImode), plus_constant (src, 4));
5878 mem = gen_rtx_MEM (SImode, dst);
5879 RTX_UNCHANGING_P (mem) = dst_unchanging_p;
5880 MEM_IN_STRUCT_P (mem) = dst_in_struct_p;
5881 MEM_SCALAR_P (mem) = dst_scalar_p;
5882 emit_move_insn (mem, sreg);
5883 emit_move_insn (fin_dst = gen_reg_rtx (SImode), plus_constant (dst, 4));
5884 in_words_to_go--;
5886 if (in_words_to_go) /* Sanity check */
5887 abort ();
5890 if (in_words_to_go)
5892 if (in_words_to_go < 0)
5893 abort ();
5895 mem = gen_rtx_MEM (SImode, src);
5896 RTX_UNCHANGING_P (mem) = src_unchanging_p;
5897 MEM_IN_STRUCT_P (mem) = src_in_struct_p;
5898 MEM_SCALAR_P (mem) = src_scalar_p;
5899 part_bytes_reg = copy_to_mode_reg (SImode, mem);
5902 if (last_bytes && part_bytes_reg == NULL)
5903 abort ();
5905 if (BYTES_BIG_ENDIAN && last_bytes)
5907 rtx tmp = gen_reg_rtx (SImode);
5909 /* The bytes we want are in the top end of the word. */
5910 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg,
5911 GEN_INT (8 * (4 - last_bytes))));
5912 part_bytes_reg = tmp;
5914 while (last_bytes)
5916 mem = gen_rtx_MEM (QImode, plus_constant (dst, last_bytes - 1));
5917 RTX_UNCHANGING_P (mem) = dst_unchanging_p;
5918 MEM_IN_STRUCT_P (mem) = dst_in_struct_p;
5919 MEM_SCALAR_P (mem) = dst_scalar_p;
5920 emit_move_insn (mem, gen_lowpart (QImode, part_bytes_reg));
5922 if (--last_bytes)
5924 tmp = gen_reg_rtx (SImode);
5925 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (8)));
5926 part_bytes_reg = tmp;
5931 else
5933 if (last_bytes > 1)
5935 mem = gen_rtx_MEM (HImode, dst);
5936 RTX_UNCHANGING_P (mem) = dst_unchanging_p;
5937 MEM_IN_STRUCT_P (mem) = dst_in_struct_p;
5938 MEM_SCALAR_P (mem) = dst_scalar_p;
5939 emit_move_insn (mem, gen_lowpart (HImode, part_bytes_reg));
5940 last_bytes -= 2;
5941 if (last_bytes)
5943 rtx tmp = gen_reg_rtx (SImode);
5945 emit_insn (gen_addsi3 (dst, dst, const2_rtx));
5946 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (16)));
5947 part_bytes_reg = tmp;
5951 if (last_bytes)
5953 mem = gen_rtx_MEM (QImode, dst);
5954 RTX_UNCHANGING_P (mem) = dst_unchanging_p;
5955 MEM_IN_STRUCT_P (mem) = dst_in_struct_p;
5956 MEM_SCALAR_P (mem) = dst_scalar_p;
5957 emit_move_insn (mem, gen_lowpart (QImode, part_bytes_reg));
5961 return 1;
5964 /* Generate a memory reference for a half word, such that it will be loaded
5965 into the top 16 bits of the word. We can assume that the address is
5966 known to be alignable and of the form reg, or plus (reg, const). */
5969 arm_gen_rotated_half_load (rtx memref)
5971 HOST_WIDE_INT offset = 0;
5972 rtx base = XEXP (memref, 0);
5974 if (GET_CODE (base) == PLUS)
5976 offset = INTVAL (XEXP (base, 1));
5977 base = XEXP (base, 0);
5980 /* If we aren't allowed to generate unaligned addresses, then fail. */
5981 if (TARGET_MMU_TRAPS
5982 && ((BYTES_BIG_ENDIAN ? 1 : 0) ^ ((offset & 2) == 0)))
5983 return NULL;
5985 base = gen_rtx_MEM (SImode, plus_constant (base, offset & ~2));
5987 if ((BYTES_BIG_ENDIAN ? 1 : 0) ^ ((offset & 2) == 2))
5988 return base;
5990 return gen_rtx_ROTATE (SImode, base, GEN_INT (16));
5993 /* Select a dominance comparison mode if possible for a test of the general
5994 form (OP (COND_OR (X) (Y)) (const_int 0)). We support three forms.
5995 COND_OR == DOM_CC_X_AND_Y => (X && Y)
5996 COND_OR == DOM_CC_NX_OR_Y => ((! X) || Y)
5997 COND_OR == DOM_CC_X_OR_Y => (X || Y)
5998 In all cases OP will be either EQ or NE, but we don't need to know which
5999 here. If we are unable to support a dominance comparison we return
6000 CC mode. This will then fail to match for the RTL expressions that
6001 generate this call. */
6002 enum machine_mode
6003 arm_select_dominance_cc_mode (rtx x, rtx y, HOST_WIDE_INT cond_or)
6005 enum rtx_code cond1, cond2;
6006 int swapped = 0;
6008 /* Currently we will probably get the wrong result if the individual
6009 comparisons are not simple. This also ensures that it is safe to
6010 reverse a comparison if necessary. */
6011 if ((arm_select_cc_mode (cond1 = GET_CODE (x), XEXP (x, 0), XEXP (x, 1))
6012 != CCmode)
6013 || (arm_select_cc_mode (cond2 = GET_CODE (y), XEXP (y, 0), XEXP (y, 1))
6014 != CCmode))
6015 return CCmode;
6017 /* The if_then_else variant of this tests the second condition if the
6018 first passes, but is true if the first fails. Reverse the first
6019 condition to get a true "inclusive-or" expression. */
6020 if (cond_or == DOM_CC_NX_OR_Y)
6021 cond1 = reverse_condition (cond1);
6023 /* If the comparisons are not equal, and one doesn't dominate the other,
6024 then we can't do this. */
6025 if (cond1 != cond2
6026 && !comparison_dominates_p (cond1, cond2)
6027 && (swapped = 1, !comparison_dominates_p (cond2, cond1)))
6028 return CCmode;
6030 if (swapped)
6032 enum rtx_code temp = cond1;
6033 cond1 = cond2;
6034 cond2 = temp;
6037 switch (cond1)
6039 case EQ:
6040 if (cond2 == EQ || cond_or == DOM_CC_X_AND_Y)
6041 return CC_DEQmode;
6043 switch (cond2)
6045 case LE: return CC_DLEmode;
6046 case LEU: return CC_DLEUmode;
6047 case GE: return CC_DGEmode;
6048 case GEU: return CC_DGEUmode;
6049 default: break;
6052 break;
6054 case LT:
6055 if (cond2 == LT || cond_or == DOM_CC_X_AND_Y)
6056 return CC_DLTmode;
6057 if (cond2 == LE)
6058 return CC_DLEmode;
6059 if (cond2 == NE)
6060 return CC_DNEmode;
6061 break;
6063 case GT:
6064 if (cond2 == GT || cond_or == DOM_CC_X_AND_Y)
6065 return CC_DGTmode;
6066 if (cond2 == GE)
6067 return CC_DGEmode;
6068 if (cond2 == NE)
6069 return CC_DNEmode;
6070 break;
6072 case LTU:
6073 if (cond2 == LTU || cond_or == DOM_CC_X_AND_Y)
6074 return CC_DLTUmode;
6075 if (cond2 == LEU)
6076 return CC_DLEUmode;
6077 if (cond2 == NE)
6078 return CC_DNEmode;
6079 break;
6081 case GTU:
6082 if (cond2 == GTU || cond_or == DOM_CC_X_AND_Y)
6083 return CC_DGTUmode;
6084 if (cond2 == GEU)
6085 return CC_DGEUmode;
6086 if (cond2 == NE)
6087 return CC_DNEmode;
6088 break;
6090 /* The remaining cases only occur when both comparisons are the
6091 same. */
6092 case NE:
6093 return CC_DNEmode;
6095 case LE:
6096 return CC_DLEmode;
6098 case GE:
6099 return CC_DGEmode;
6101 case LEU:
6102 return CC_DLEUmode;
6104 case GEU:
6105 return CC_DGEUmode;
6107 default:
6108 break;
6111 abort ();
6114 enum machine_mode
6115 arm_select_cc_mode (enum rtx_code op, rtx x, rtx y)
6117 /* All floating point compares return CCFP if it is an equality
6118 comparison, and CCFPE otherwise. */
6119 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
6121 switch (op)
6123 case EQ:
6124 case NE:
6125 case UNORDERED:
6126 case ORDERED:
6127 case UNLT:
6128 case UNLE:
6129 case UNGT:
6130 case UNGE:
6131 case UNEQ:
6132 case LTGT:
6133 return CCFPmode;
6135 case LT:
6136 case LE:
6137 case GT:
6138 case GE:
6139 if (TARGET_HARD_FLOAT && TARGET_MAVERICK)
6140 return CCFPmode;
6141 return CCFPEmode;
6143 default:
6144 abort ();
6148 /* A compare with a shifted operand. Because of canonicalization, the
6149 comparison will have to be swapped when we emit the assembler. */
6150 if (GET_MODE (y) == SImode && GET_CODE (y) == REG
6151 && (GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
6152 || GET_CODE (x) == LSHIFTRT || GET_CODE (x) == ROTATE
6153 || GET_CODE (x) == ROTATERT))
6154 return CC_SWPmode;
6156 /* This is a special case that is used by combine to allow a
6157 comparison of a shifted byte load to be split into a zero-extend
6158 followed by a comparison of the shifted integer (only valid for
6159 equalities and unsigned inequalities). */
6160 if (GET_MODE (x) == SImode
6161 && GET_CODE (x) == ASHIFT
6162 && GET_CODE (XEXP (x, 1)) == CONST_INT && INTVAL (XEXP (x, 1)) == 24
6163 && GET_CODE (XEXP (x, 0)) == SUBREG
6164 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == MEM
6165 && GET_MODE (SUBREG_REG (XEXP (x, 0))) == QImode
6166 && (op == EQ || op == NE
6167 || op == GEU || op == GTU || op == LTU || op == LEU)
6168 && GET_CODE (y) == CONST_INT)
6169 return CC_Zmode;
6171 /* A construct for a conditional compare, if the false arm contains
6172 0, then both conditions must be true, otherwise either condition
6173 must be true. Not all conditions are possible, so CCmode is
6174 returned if it can't be done. */
6175 if (GET_CODE (x) == IF_THEN_ELSE
6176 && (XEXP (x, 2) == const0_rtx
6177 || XEXP (x, 2) == const1_rtx)
6178 && GET_RTX_CLASS (GET_CODE (XEXP (x, 0))) == '<'
6179 && GET_RTX_CLASS (GET_CODE (XEXP (x, 1))) == '<')
6180 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
6181 INTVAL (XEXP (x, 2)));
6183 /* Alternate canonicalizations of the above. These are somewhat cleaner. */
6184 if (GET_CODE (x) == AND
6185 && GET_RTX_CLASS (GET_CODE (XEXP (x, 0))) == '<'
6186 && GET_RTX_CLASS (GET_CODE (XEXP (x, 1))) == '<')
6187 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
6188 DOM_CC_X_AND_Y);
6190 if (GET_CODE (x) == IOR
6191 && GET_RTX_CLASS (GET_CODE (XEXP (x, 0))) == '<'
6192 && GET_RTX_CLASS (GET_CODE (XEXP (x, 1))) == '<')
6193 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
6194 DOM_CC_X_OR_Y);
6196 /* An operation (on Thumb) where we want to test for a single bit.
6197 This is done by shifting that bit up into the top bit of a
6198 scratch register; we can then branch on the sign bit. */
6199 if (TARGET_THUMB
6200 && GET_MODE (x) == SImode
6201 && (op == EQ || op == NE)
6202 && (GET_CODE (x) == ZERO_EXTRACT))
6203 return CC_Nmode;
6205 /* An operation that sets the condition codes as a side-effect, the
6206 V flag is not set correctly, so we can only use comparisons where
6207 this doesn't matter. (For LT and GE we can use "mi" and "pl"
6208 instead.) */
6209 if (GET_MODE (x) == SImode
6210 && y == const0_rtx
6211 && (op == EQ || op == NE || op == LT || op == GE)
6212 && (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
6213 || GET_CODE (x) == AND || GET_CODE (x) == IOR
6214 || GET_CODE (x) == XOR || GET_CODE (x) == MULT
6215 || GET_CODE (x) == NOT || GET_CODE (x) == NEG
6216 || GET_CODE (x) == LSHIFTRT
6217 || GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
6218 || GET_CODE (x) == ROTATERT
6219 || (TARGET_ARM && GET_CODE (x) == ZERO_EXTRACT)))
6220 return CC_NOOVmode;
6222 if (GET_MODE (x) == QImode && (op == EQ || op == NE))
6223 return CC_Zmode;
6225 if (GET_MODE (x) == SImode && (op == LTU || op == GEU)
6226 && GET_CODE (x) == PLUS
6227 && (rtx_equal_p (XEXP (x, 0), y) || rtx_equal_p (XEXP (x, 1), y)))
6228 return CC_Cmode;
6230 return CCmode;
6233 /* X and Y are two things to compare using CODE. Emit the compare insn and
6234 return the rtx for register 0 in the proper mode. FP means this is a
6235 floating point compare: I don't think that it is needed on the arm. */
6237 arm_gen_compare_reg (enum rtx_code code, rtx x, rtx y)
6239 enum machine_mode mode = SELECT_CC_MODE (code, x, y);
6240 rtx cc_reg = gen_rtx_REG (mode, CC_REGNUM);
6242 emit_insn (gen_rtx_SET (VOIDmode, cc_reg,
6243 gen_rtx_COMPARE (mode, x, y)));
6245 return cc_reg;
6248 /* Generate a sequence of insns that will generate the correct return
6249 address mask depending on the physical architecture that the program
6250 is running on. */
6252 arm_gen_return_addr_mask (void)
6254 rtx reg = gen_reg_rtx (Pmode);
6256 emit_insn (gen_return_addr_mask (reg));
6257 return reg;
6260 void
6261 arm_reload_in_hi (rtx *operands)
6263 rtx ref = operands[1];
6264 rtx base, scratch;
6265 HOST_WIDE_INT offset = 0;
6267 if (GET_CODE (ref) == SUBREG)
6269 offset = SUBREG_BYTE (ref);
6270 ref = SUBREG_REG (ref);
6273 if (GET_CODE (ref) == REG)
6275 /* We have a pseudo which has been spilt onto the stack; there
6276 are two cases here: the first where there is a simple
6277 stack-slot replacement and a second where the stack-slot is
6278 out of range, or is used as a subreg. */
6279 if (reg_equiv_mem[REGNO (ref)])
6281 ref = reg_equiv_mem[REGNO (ref)];
6282 base = find_replacement (&XEXP (ref, 0));
6284 else
6285 /* The slot is out of range, or was dressed up in a SUBREG. */
6286 base = reg_equiv_address[REGNO (ref)];
6288 else
6289 base = find_replacement (&XEXP (ref, 0));
6291 /* Handle the case where the address is too complex to be offset by 1. */
6292 if (GET_CODE (base) == MINUS
6293 || (GET_CODE (base) == PLUS && GET_CODE (XEXP (base, 1)) != CONST_INT))
6295 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6297 emit_insn (gen_rtx_SET (VOIDmode, base_plus, base));
6298 base = base_plus;
6300 else if (GET_CODE (base) == PLUS)
6302 /* The addend must be CONST_INT, or we would have dealt with it above. */
6303 HOST_WIDE_INT hi, lo;
6305 offset += INTVAL (XEXP (base, 1));
6306 base = XEXP (base, 0);
6308 /* Rework the address into a legal sequence of insns. */
6309 /* Valid range for lo is -4095 -> 4095 */
6310 lo = (offset >= 0
6311 ? (offset & 0xfff)
6312 : -((-offset) & 0xfff));
6314 /* Corner case, if lo is the max offset then we would be out of range
6315 once we have added the additional 1 below, so bump the msb into the
6316 pre-loading insn(s). */
6317 if (lo == 4095)
6318 lo &= 0x7ff;
6320 hi = ((((offset - lo) & (HOST_WIDE_INT) 0xffffffff)
6321 ^ (HOST_WIDE_INT) 0x80000000)
6322 - (HOST_WIDE_INT) 0x80000000);
6324 if (hi + lo != offset)
6325 abort ();
6327 if (hi != 0)
6329 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6331 /* Get the base address; addsi3 knows how to handle constants
6332 that require more than one insn. */
6333 emit_insn (gen_addsi3 (base_plus, base, GEN_INT (hi)));
6334 base = base_plus;
6335 offset = lo;
6339 /* Operands[2] may overlap operands[0] (though it won't overlap
6340 operands[1]), that's why we asked for a DImode reg -- so we can
6341 use the bit that does not overlap. */
6342 if (REGNO (operands[2]) == REGNO (operands[0]))
6343 scratch = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6344 else
6345 scratch = gen_rtx_REG (SImode, REGNO (operands[2]));
6347 emit_insn (gen_zero_extendqisi2 (scratch,
6348 gen_rtx_MEM (QImode,
6349 plus_constant (base,
6350 offset))));
6351 emit_insn (gen_zero_extendqisi2 (gen_rtx_SUBREG (SImode, operands[0], 0),
6352 gen_rtx_MEM (QImode,
6353 plus_constant (base,
6354 offset + 1))));
6355 if (!BYTES_BIG_ENDIAN)
6356 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_SUBREG (SImode, operands[0], 0),
6357 gen_rtx_IOR (SImode,
6358 gen_rtx_ASHIFT
6359 (SImode,
6360 gen_rtx_SUBREG (SImode, operands[0], 0),
6361 GEN_INT (8)),
6362 scratch)));
6363 else
6364 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_SUBREG (SImode, operands[0], 0),
6365 gen_rtx_IOR (SImode,
6366 gen_rtx_ASHIFT (SImode, scratch,
6367 GEN_INT (8)),
6368 gen_rtx_SUBREG (SImode, operands[0],
6369 0))));
6372 /* Handle storing a half-word to memory during reload by synthesizing as two
6373 byte stores. Take care not to clobber the input values until after we
6374 have moved them somewhere safe. This code assumes that if the DImode
6375 scratch in operands[2] overlaps either the input value or output address
6376 in some way, then that value must die in this insn (we absolutely need
6377 two scratch registers for some corner cases). */
6378 void
6379 arm_reload_out_hi (rtx *operands)
6381 rtx ref = operands[0];
6382 rtx outval = operands[1];
6383 rtx base, scratch;
6384 HOST_WIDE_INT offset = 0;
6386 if (GET_CODE (ref) == SUBREG)
6388 offset = SUBREG_BYTE (ref);
6389 ref = SUBREG_REG (ref);
6392 if (GET_CODE (ref) == REG)
6394 /* We have a pseudo which has been spilt onto the stack; there
6395 are two cases here: the first where there is a simple
6396 stack-slot replacement and a second where the stack-slot is
6397 out of range, or is used as a subreg. */
6398 if (reg_equiv_mem[REGNO (ref)])
6400 ref = reg_equiv_mem[REGNO (ref)];
6401 base = find_replacement (&XEXP (ref, 0));
6403 else
6404 /* The slot is out of range, or was dressed up in a SUBREG. */
6405 base = reg_equiv_address[REGNO (ref)];
6407 else
6408 base = find_replacement (&XEXP (ref, 0));
6410 scratch = gen_rtx_REG (SImode, REGNO (operands[2]));
6412 /* Handle the case where the address is too complex to be offset by 1. */
6413 if (GET_CODE (base) == MINUS
6414 || (GET_CODE (base) == PLUS && GET_CODE (XEXP (base, 1)) != CONST_INT))
6416 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6418 /* Be careful not to destroy OUTVAL. */
6419 if (reg_overlap_mentioned_p (base_plus, outval))
6421 /* Updating base_plus might destroy outval, see if we can
6422 swap the scratch and base_plus. */
6423 if (!reg_overlap_mentioned_p (scratch, outval))
6425 rtx tmp = scratch;
6426 scratch = base_plus;
6427 base_plus = tmp;
6429 else
6431 rtx scratch_hi = gen_rtx_REG (HImode, REGNO (operands[2]));
6433 /* Be conservative and copy OUTVAL into the scratch now,
6434 this should only be necessary if outval is a subreg
6435 of something larger than a word. */
6436 /* XXX Might this clobber base? I can't see how it can,
6437 since scratch is known to overlap with OUTVAL, and
6438 must be wider than a word. */
6439 emit_insn (gen_movhi (scratch_hi, outval));
6440 outval = scratch_hi;
6444 emit_insn (gen_rtx_SET (VOIDmode, base_plus, base));
6445 base = base_plus;
6447 else if (GET_CODE (base) == PLUS)
6449 /* The addend must be CONST_INT, or we would have dealt with it above. */
6450 HOST_WIDE_INT hi, lo;
6452 offset += INTVAL (XEXP (base, 1));
6453 base = XEXP (base, 0);
6455 /* Rework the address into a legal sequence of insns. */
6456 /* Valid range for lo is -4095 -> 4095 */
6457 lo = (offset >= 0
6458 ? (offset & 0xfff)
6459 : -((-offset) & 0xfff));
6461 /* Corner case, if lo is the max offset then we would be out of range
6462 once we have added the additional 1 below, so bump the msb into the
6463 pre-loading insn(s). */
6464 if (lo == 4095)
6465 lo &= 0x7ff;
6467 hi = ((((offset - lo) & (HOST_WIDE_INT) 0xffffffff)
6468 ^ (HOST_WIDE_INT) 0x80000000)
6469 - (HOST_WIDE_INT) 0x80000000);
6471 if (hi + lo != offset)
6472 abort ();
6474 if (hi != 0)
6476 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6478 /* Be careful not to destroy OUTVAL. */
6479 if (reg_overlap_mentioned_p (base_plus, outval))
6481 /* Updating base_plus might destroy outval, see if we
6482 can swap the scratch and base_plus. */
6483 if (!reg_overlap_mentioned_p (scratch, outval))
6485 rtx tmp = scratch;
6486 scratch = base_plus;
6487 base_plus = tmp;
6489 else
6491 rtx scratch_hi = gen_rtx_REG (HImode, REGNO (operands[2]));
6493 /* Be conservative and copy outval into scratch now,
6494 this should only be necessary if outval is a
6495 subreg of something larger than a word. */
6496 /* XXX Might this clobber base? I can't see how it
6497 can, since scratch is known to overlap with
6498 outval. */
6499 emit_insn (gen_movhi (scratch_hi, outval));
6500 outval = scratch_hi;
6504 /* Get the base address; addsi3 knows how to handle constants
6505 that require more than one insn. */
6506 emit_insn (gen_addsi3 (base_plus, base, GEN_INT (hi)));
6507 base = base_plus;
6508 offset = lo;
6512 if (BYTES_BIG_ENDIAN)
6514 emit_insn (gen_movqi (gen_rtx_MEM (QImode,
6515 plus_constant (base, offset + 1)),
6516 gen_lowpart (QImode, outval)));
6517 emit_insn (gen_lshrsi3 (scratch,
6518 gen_rtx_SUBREG (SImode, outval, 0),
6519 GEN_INT (8)));
6520 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (base, offset)),
6521 gen_lowpart (QImode, scratch)));
6523 else
6525 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (base, offset)),
6526 gen_lowpart (QImode, outval)));
6527 emit_insn (gen_lshrsi3 (scratch,
6528 gen_rtx_SUBREG (SImode, outval, 0),
6529 GEN_INT (8)));
6530 emit_insn (gen_movqi (gen_rtx_MEM (QImode,
6531 plus_constant (base, offset + 1)),
6532 gen_lowpart (QImode, scratch)));
6536 /* Print a symbolic form of X to the debug file, F. */
6537 static void
6538 arm_print_value (FILE *f, rtx x)
6540 switch (GET_CODE (x))
6542 case CONST_INT:
6543 fprintf (f, HOST_WIDE_INT_PRINT_HEX, INTVAL (x));
6544 return;
6546 case CONST_DOUBLE:
6547 fprintf (f, "<0x%lx,0x%lx>", (long)XWINT (x, 2), (long)XWINT (x, 3));
6548 return;
6550 case CONST_VECTOR:
6552 int i;
6554 fprintf (f, "<");
6555 for (i = 0; i < CONST_VECTOR_NUNITS (x); i++)
6557 fprintf (f, HOST_WIDE_INT_PRINT_HEX, INTVAL (CONST_VECTOR_ELT (x, i)));
6558 if (i < (CONST_VECTOR_NUNITS (x) - 1))
6559 fputc (',', f);
6561 fprintf (f, ">");
6563 return;
6565 case CONST_STRING:
6566 fprintf (f, "\"%s\"", XSTR (x, 0));
6567 return;
6569 case SYMBOL_REF:
6570 fprintf (f, "`%s'", XSTR (x, 0));
6571 return;
6573 case LABEL_REF:
6574 fprintf (f, "L%d", INSN_UID (XEXP (x, 0)));
6575 return;
6577 case CONST:
6578 arm_print_value (f, XEXP (x, 0));
6579 return;
6581 case PLUS:
6582 arm_print_value (f, XEXP (x, 0));
6583 fprintf (f, "+");
6584 arm_print_value (f, XEXP (x, 1));
6585 return;
6587 case PC:
6588 fprintf (f, "pc");
6589 return;
6591 default:
6592 fprintf (f, "????");
6593 return;
6597 /* Routines for manipulation of the constant pool. */
6599 /* Arm instructions cannot load a large constant directly into a
6600 register; they have to come from a pc relative load. The constant
6601 must therefore be placed in the addressable range of the pc
6602 relative load. Depending on the precise pc relative load
6603 instruction the range is somewhere between 256 bytes and 4k. This
6604 means that we often have to dump a constant inside a function, and
6605 generate code to branch around it.
6607 It is important to minimize this, since the branches will slow
6608 things down and make the code larger.
6610 Normally we can hide the table after an existing unconditional
6611 branch so that there is no interruption of the flow, but in the
6612 worst case the code looks like this:
6614 ldr rn, L1
6616 b L2
6617 align
6618 L1: .long value
6622 ldr rn, L3
6624 b L4
6625 align
6626 L3: .long value
6630 We fix this by performing a scan after scheduling, which notices
6631 which instructions need to have their operands fetched from the
6632 constant table and builds the table.
6634 The algorithm starts by building a table of all the constants that
6635 need fixing up and all the natural barriers in the function (places
6636 where a constant table can be dropped without breaking the flow).
6637 For each fixup we note how far the pc-relative replacement will be
6638 able to reach and the offset of the instruction into the function.
6640 Having built the table we then group the fixes together to form
6641 tables that are as large as possible (subject to addressing
6642 constraints) and emit each table of constants after the last
6643 barrier that is within range of all the instructions in the group.
6644 If a group does not contain a barrier, then we forcibly create one
6645 by inserting a jump instruction into the flow. Once the table has
6646 been inserted, the insns are then modified to reference the
6647 relevant entry in the pool.
6649 Possible enhancements to the algorithm (not implemented) are:
6651 1) For some processors and object formats, there may be benefit in
6652 aligning the pools to the start of cache lines; this alignment
6653 would need to be taken into account when calculating addressability
6654 of a pool. */
6656 /* These typedefs are located at the start of this file, so that
6657 they can be used in the prototypes there. This comment is to
6658 remind readers of that fact so that the following structures
6659 can be understood more easily.
6661 typedef struct minipool_node Mnode;
6662 typedef struct minipool_fixup Mfix; */
6664 struct minipool_node
6666 /* Doubly linked chain of entries. */
6667 Mnode * next;
6668 Mnode * prev;
6669 /* The maximum offset into the code that this entry can be placed. While
6670 pushing fixes for forward references, all entries are sorted in order
6671 of increasing max_address. */
6672 HOST_WIDE_INT max_address;
6673 /* Similarly for an entry inserted for a backwards ref. */
6674 HOST_WIDE_INT min_address;
6675 /* The number of fixes referencing this entry. This can become zero
6676 if we "unpush" an entry. In this case we ignore the entry when we
6677 come to emit the code. */
6678 int refcount;
6679 /* The offset from the start of the minipool. */
6680 HOST_WIDE_INT offset;
6681 /* The value in table. */
6682 rtx value;
6683 /* The mode of value. */
6684 enum machine_mode mode;
6685 /* The size of the value. With iWMMXt enabled
6686 sizes > 4 also imply an alignment of 8-bytes. */
6687 int fix_size;
6690 struct minipool_fixup
6692 Mfix * next;
6693 rtx insn;
6694 HOST_WIDE_INT address;
6695 rtx * loc;
6696 enum machine_mode mode;
6697 int fix_size;
6698 rtx value;
6699 Mnode * minipool;
6700 HOST_WIDE_INT forwards;
6701 HOST_WIDE_INT backwards;
6704 /* Fixes less than a word need padding out to a word boundary. */
6705 #define MINIPOOL_FIX_SIZE(mode) \
6706 (GET_MODE_SIZE ((mode)) >= 4 ? GET_MODE_SIZE ((mode)) : 4)
6708 static Mnode * minipool_vector_head;
6709 static Mnode * minipool_vector_tail;
6710 static rtx minipool_vector_label;
6712 /* The linked list of all minipool fixes required for this function. */
6713 Mfix * minipool_fix_head;
6714 Mfix * minipool_fix_tail;
6715 /* The fix entry for the current minipool, once it has been placed. */
6716 Mfix * minipool_barrier;
6718 /* Determines if INSN is the start of a jump table. Returns the end
6719 of the TABLE or NULL_RTX. */
6720 static rtx
6721 is_jump_table (rtx insn)
6723 rtx table;
6725 if (GET_CODE (insn) == JUMP_INSN
6726 && JUMP_LABEL (insn) != NULL
6727 && ((table = next_real_insn (JUMP_LABEL (insn)))
6728 == next_real_insn (insn))
6729 && table != NULL
6730 && GET_CODE (table) == JUMP_INSN
6731 && (GET_CODE (PATTERN (table)) == ADDR_VEC
6732 || GET_CODE (PATTERN (table)) == ADDR_DIFF_VEC))
6733 return table;
6735 return NULL_RTX;
6738 #ifndef JUMP_TABLES_IN_TEXT_SECTION
6739 #define JUMP_TABLES_IN_TEXT_SECTION 0
6740 #endif
6742 static HOST_WIDE_INT
6743 get_jump_table_size (rtx insn)
6745 /* ADDR_VECs only take room if read-only data does into the text
6746 section. */
6747 if (JUMP_TABLES_IN_TEXT_SECTION
6748 #if !defined(READONLY_DATA_SECTION) && !defined(READONLY_DATA_SECTION_ASM_OP)
6749 || 1
6750 #endif
6753 rtx body = PATTERN (insn);
6754 int elt = GET_CODE (body) == ADDR_DIFF_VEC ? 1 : 0;
6756 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, elt);
6759 return 0;
6762 /* Move a minipool fix MP from its current location to before MAX_MP.
6763 If MAX_MP is NULL, then MP doesn't need moving, but the addressing
6764 constraints may need updating. */
6765 static Mnode *
6766 move_minipool_fix_forward_ref (Mnode *mp, Mnode *max_mp,
6767 HOST_WIDE_INT max_address)
6769 /* This should never be true and the code below assumes these are
6770 different. */
6771 if (mp == max_mp)
6772 abort ();
6774 if (max_mp == NULL)
6776 if (max_address < mp->max_address)
6777 mp->max_address = max_address;
6779 else
6781 if (max_address > max_mp->max_address - mp->fix_size)
6782 mp->max_address = max_mp->max_address - mp->fix_size;
6783 else
6784 mp->max_address = max_address;
6786 /* Unlink MP from its current position. Since max_mp is non-null,
6787 mp->prev must be non-null. */
6788 mp->prev->next = mp->next;
6789 if (mp->next != NULL)
6790 mp->next->prev = mp->prev;
6791 else
6792 minipool_vector_tail = mp->prev;
6794 /* Re-insert it before MAX_MP. */
6795 mp->next = max_mp;
6796 mp->prev = max_mp->prev;
6797 max_mp->prev = mp;
6799 if (mp->prev != NULL)
6800 mp->prev->next = mp;
6801 else
6802 minipool_vector_head = mp;
6805 /* Save the new entry. */
6806 max_mp = mp;
6808 /* Scan over the preceding entries and adjust their addresses as
6809 required. */
6810 while (mp->prev != NULL
6811 && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
6813 mp->prev->max_address = mp->max_address - mp->prev->fix_size;
6814 mp = mp->prev;
6817 return max_mp;
6820 /* Add a constant to the minipool for a forward reference. Returns the
6821 node added or NULL if the constant will not fit in this pool. */
6822 static Mnode *
6823 add_minipool_forward_ref (Mfix *fix)
6825 /* If set, max_mp is the first pool_entry that has a lower
6826 constraint than the one we are trying to add. */
6827 Mnode * max_mp = NULL;
6828 HOST_WIDE_INT max_address = fix->address + fix->forwards;
6829 Mnode * mp;
6831 /* If this fix's address is greater than the address of the first
6832 entry, then we can't put the fix in this pool. We subtract the
6833 size of the current fix to ensure that if the table is fully
6834 packed we still have enough room to insert this value by suffling
6835 the other fixes forwards. */
6836 if (minipool_vector_head &&
6837 fix->address >= minipool_vector_head->max_address - fix->fix_size)
6838 return NULL;
6840 /* Scan the pool to see if a constant with the same value has
6841 already been added. While we are doing this, also note the
6842 location where we must insert the constant if it doesn't already
6843 exist. */
6844 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
6846 if (GET_CODE (fix->value) == GET_CODE (mp->value)
6847 && fix->mode == mp->mode
6848 && (GET_CODE (fix->value) != CODE_LABEL
6849 || (CODE_LABEL_NUMBER (fix->value)
6850 == CODE_LABEL_NUMBER (mp->value)))
6851 && rtx_equal_p (fix->value, mp->value))
6853 /* More than one fix references this entry. */
6854 mp->refcount++;
6855 return move_minipool_fix_forward_ref (mp, max_mp, max_address);
6858 /* Note the insertion point if necessary. */
6859 if (max_mp == NULL
6860 && mp->max_address > max_address)
6861 max_mp = mp;
6863 /* If we are inserting an 8-bytes aligned quantity and
6864 we have not already found an insertion point, then
6865 make sure that all such 8-byte aligned quantities are
6866 placed at the start of the pool. */
6867 if (TARGET_REALLY_IWMMXT
6868 && max_mp == NULL
6869 && fix->fix_size == 8
6870 && mp->fix_size != 8)
6872 max_mp = mp;
6873 max_address = mp->max_address;
6877 /* The value is not currently in the minipool, so we need to create
6878 a new entry for it. If MAX_MP is NULL, the entry will be put on
6879 the end of the list since the placement is less constrained than
6880 any existing entry. Otherwise, we insert the new fix before
6881 MAX_MP and, if necessary, adjust the constraints on the other
6882 entries. */
6883 mp = xmalloc (sizeof (* mp));
6884 mp->fix_size = fix->fix_size;
6885 mp->mode = fix->mode;
6886 mp->value = fix->value;
6887 mp->refcount = 1;
6888 /* Not yet required for a backwards ref. */
6889 mp->min_address = -65536;
6891 if (max_mp == NULL)
6893 mp->max_address = max_address;
6894 mp->next = NULL;
6895 mp->prev = minipool_vector_tail;
6897 if (mp->prev == NULL)
6899 minipool_vector_head = mp;
6900 minipool_vector_label = gen_label_rtx ();
6902 else
6903 mp->prev->next = mp;
6905 minipool_vector_tail = mp;
6907 else
6909 if (max_address > max_mp->max_address - mp->fix_size)
6910 mp->max_address = max_mp->max_address - mp->fix_size;
6911 else
6912 mp->max_address = max_address;
6914 mp->next = max_mp;
6915 mp->prev = max_mp->prev;
6916 max_mp->prev = mp;
6917 if (mp->prev != NULL)
6918 mp->prev->next = mp;
6919 else
6920 minipool_vector_head = mp;
6923 /* Save the new entry. */
6924 max_mp = mp;
6926 /* Scan over the preceding entries and adjust their addresses as
6927 required. */
6928 while (mp->prev != NULL
6929 && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
6931 mp->prev->max_address = mp->max_address - mp->prev->fix_size;
6932 mp = mp->prev;
6935 return max_mp;
6938 static Mnode *
6939 move_minipool_fix_backward_ref (Mnode *mp, Mnode *min_mp,
6940 HOST_WIDE_INT min_address)
6942 HOST_WIDE_INT offset;
6944 /* This should never be true, and the code below assumes these are
6945 different. */
6946 if (mp == min_mp)
6947 abort ();
6949 if (min_mp == NULL)
6951 if (min_address > mp->min_address)
6952 mp->min_address = min_address;
6954 else
6956 /* We will adjust this below if it is too loose. */
6957 mp->min_address = min_address;
6959 /* Unlink MP from its current position. Since min_mp is non-null,
6960 mp->next must be non-null. */
6961 mp->next->prev = mp->prev;
6962 if (mp->prev != NULL)
6963 mp->prev->next = mp->next;
6964 else
6965 minipool_vector_head = mp->next;
6967 /* Reinsert it after MIN_MP. */
6968 mp->prev = min_mp;
6969 mp->next = min_mp->next;
6970 min_mp->next = mp;
6971 if (mp->next != NULL)
6972 mp->next->prev = mp;
6973 else
6974 minipool_vector_tail = mp;
6977 min_mp = mp;
6979 offset = 0;
6980 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
6982 mp->offset = offset;
6983 if (mp->refcount > 0)
6984 offset += mp->fix_size;
6986 if (mp->next && mp->next->min_address < mp->min_address + mp->fix_size)
6987 mp->next->min_address = mp->min_address + mp->fix_size;
6990 return min_mp;
6993 /* Add a constant to the minipool for a backward reference. Returns the
6994 node added or NULL if the constant will not fit in this pool.
6996 Note that the code for insertion for a backwards reference can be
6997 somewhat confusing because the calculated offsets for each fix do
6998 not take into account the size of the pool (which is still under
6999 construction. */
7000 static Mnode *
7001 add_minipool_backward_ref (Mfix *fix)
7003 /* If set, min_mp is the last pool_entry that has a lower constraint
7004 than the one we are trying to add. */
7005 Mnode *min_mp = NULL;
7006 /* This can be negative, since it is only a constraint. */
7007 HOST_WIDE_INT min_address = fix->address - fix->backwards;
7008 Mnode *mp;
7010 /* If we can't reach the current pool from this insn, or if we can't
7011 insert this entry at the end of the pool without pushing other
7012 fixes out of range, then we don't try. This ensures that we
7013 can't fail later on. */
7014 if (min_address >= minipool_barrier->address
7015 || (minipool_vector_tail->min_address + fix->fix_size
7016 >= minipool_barrier->address))
7017 return NULL;
7019 /* Scan the pool to see if a constant with the same value has
7020 already been added. While we are doing this, also note the
7021 location where we must insert the constant if it doesn't already
7022 exist. */
7023 for (mp = minipool_vector_tail; mp != NULL; mp = mp->prev)
7025 if (GET_CODE (fix->value) == GET_CODE (mp->value)
7026 && fix->mode == mp->mode
7027 && (GET_CODE (fix->value) != CODE_LABEL
7028 || (CODE_LABEL_NUMBER (fix->value)
7029 == CODE_LABEL_NUMBER (mp->value)))
7030 && rtx_equal_p (fix->value, mp->value)
7031 /* Check that there is enough slack to move this entry to the
7032 end of the table (this is conservative). */
7033 && (mp->max_address
7034 > (minipool_barrier->address
7035 + minipool_vector_tail->offset
7036 + minipool_vector_tail->fix_size)))
7038 mp->refcount++;
7039 return move_minipool_fix_backward_ref (mp, min_mp, min_address);
7042 if (min_mp != NULL)
7043 mp->min_address += fix->fix_size;
7044 else
7046 /* Note the insertion point if necessary. */
7047 if (mp->min_address < min_address)
7049 /* For now, we do not allow the insertion of 8-byte alignment
7050 requiring nodes anywhere but at the start of the pool. */
7051 if (TARGET_REALLY_IWMMXT && fix->fix_size == 8 && mp->fix_size != 8)
7052 return NULL;
7053 else
7054 min_mp = mp;
7056 else if (mp->max_address
7057 < minipool_barrier->address + mp->offset + fix->fix_size)
7059 /* Inserting before this entry would push the fix beyond
7060 its maximum address (which can happen if we have
7061 re-located a forwards fix); force the new fix to come
7062 after it. */
7063 min_mp = mp;
7064 min_address = mp->min_address + fix->fix_size;
7066 /* If we are inserting an 8-bytes aligned quantity and
7067 we have not already found an insertion point, then
7068 make sure that all such 8-byte aligned quantities are
7069 placed at the start of the pool. */
7070 else if (TARGET_REALLY_IWMMXT
7071 && min_mp == NULL
7072 && fix->fix_size == 8
7073 && mp->fix_size < 8)
7075 min_mp = mp;
7076 min_address = mp->min_address + fix->fix_size;
7081 /* We need to create a new entry. */
7082 mp = xmalloc (sizeof (* mp));
7083 mp->fix_size = fix->fix_size;
7084 mp->mode = fix->mode;
7085 mp->value = fix->value;
7086 mp->refcount = 1;
7087 mp->max_address = minipool_barrier->address + 65536;
7089 mp->min_address = min_address;
7091 if (min_mp == NULL)
7093 mp->prev = NULL;
7094 mp->next = minipool_vector_head;
7096 if (mp->next == NULL)
7098 minipool_vector_tail = mp;
7099 minipool_vector_label = gen_label_rtx ();
7101 else
7102 mp->next->prev = mp;
7104 minipool_vector_head = mp;
7106 else
7108 mp->next = min_mp->next;
7109 mp->prev = min_mp;
7110 min_mp->next = mp;
7112 if (mp->next != NULL)
7113 mp->next->prev = mp;
7114 else
7115 minipool_vector_tail = mp;
7118 /* Save the new entry. */
7119 min_mp = mp;
7121 if (mp->prev)
7122 mp = mp->prev;
7123 else
7124 mp->offset = 0;
7126 /* Scan over the following entries and adjust their offsets. */
7127 while (mp->next != NULL)
7129 if (mp->next->min_address < mp->min_address + mp->fix_size)
7130 mp->next->min_address = mp->min_address + mp->fix_size;
7132 if (mp->refcount)
7133 mp->next->offset = mp->offset + mp->fix_size;
7134 else
7135 mp->next->offset = mp->offset;
7137 mp = mp->next;
7140 return min_mp;
7143 static void
7144 assign_minipool_offsets (Mfix *barrier)
7146 HOST_WIDE_INT offset = 0;
7147 Mnode *mp;
7149 minipool_barrier = barrier;
7151 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7153 mp->offset = offset;
7155 if (mp->refcount > 0)
7156 offset += mp->fix_size;
7160 /* Output the literal table */
7161 static void
7162 dump_minipool (rtx scan)
7164 Mnode * mp;
7165 Mnode * nmp;
7166 int align64 = 0;
7168 if (TARGET_REALLY_IWMMXT)
7169 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7170 if (mp->refcount > 0 && mp->fix_size == 8)
7172 align64 = 1;
7173 break;
7176 if (dump_file)
7177 fprintf (dump_file,
7178 ";; Emitting minipool after insn %u; address %ld; align %d (bytes)\n",
7179 INSN_UID (scan), (unsigned long) minipool_barrier->address, align64 ? 8 : 4);
7181 scan = emit_label_after (gen_label_rtx (), scan);
7182 scan = emit_insn_after (align64 ? gen_align_8 () : gen_align_4 (), scan);
7183 scan = emit_label_after (minipool_vector_label, scan);
7185 for (mp = minipool_vector_head; mp != NULL; mp = nmp)
7187 if (mp->refcount > 0)
7189 if (dump_file)
7191 fprintf (dump_file,
7192 ";; Offset %u, min %ld, max %ld ",
7193 (unsigned) mp->offset, (unsigned long) mp->min_address,
7194 (unsigned long) mp->max_address);
7195 arm_print_value (dump_file, mp->value);
7196 fputc ('\n', dump_file);
7199 switch (mp->fix_size)
7201 #ifdef HAVE_consttable_1
7202 case 1:
7203 scan = emit_insn_after (gen_consttable_1 (mp->value), scan);
7204 break;
7206 #endif
7207 #ifdef HAVE_consttable_2
7208 case 2:
7209 scan = emit_insn_after (gen_consttable_2 (mp->value), scan);
7210 break;
7212 #endif
7213 #ifdef HAVE_consttable_4
7214 case 4:
7215 scan = emit_insn_after (gen_consttable_4 (mp->value), scan);
7216 break;
7218 #endif
7219 #ifdef HAVE_consttable_8
7220 case 8:
7221 scan = emit_insn_after (gen_consttable_8 (mp->value), scan);
7222 break;
7224 #endif
7225 default:
7226 abort ();
7227 break;
7231 nmp = mp->next;
7232 free (mp);
7235 minipool_vector_head = minipool_vector_tail = NULL;
7236 scan = emit_insn_after (gen_consttable_end (), scan);
7237 scan = emit_barrier_after (scan);
7240 /* Return the cost of forcibly inserting a barrier after INSN. */
7241 static int
7242 arm_barrier_cost (rtx insn)
7244 /* Basing the location of the pool on the loop depth is preferable,
7245 but at the moment, the basic block information seems to be
7246 corrupt by this stage of the compilation. */
7247 int base_cost = 50;
7248 rtx next = next_nonnote_insn (insn);
7250 if (next != NULL && GET_CODE (next) == CODE_LABEL)
7251 base_cost -= 20;
7253 switch (GET_CODE (insn))
7255 case CODE_LABEL:
7256 /* It will always be better to place the table before the label, rather
7257 than after it. */
7258 return 50;
7260 case INSN:
7261 case CALL_INSN:
7262 return base_cost;
7264 case JUMP_INSN:
7265 return base_cost - 10;
7267 default:
7268 return base_cost + 10;
7272 /* Find the best place in the insn stream in the range
7273 (FIX->address,MAX_ADDRESS) to forcibly insert a minipool barrier.
7274 Create the barrier by inserting a jump and add a new fix entry for
7275 it. */
7276 static Mfix *
7277 create_fix_barrier (Mfix *fix, HOST_WIDE_INT max_address)
7279 HOST_WIDE_INT count = 0;
7280 rtx barrier;
7281 rtx from = fix->insn;
7282 rtx selected = from;
7283 int selected_cost;
7284 HOST_WIDE_INT selected_address;
7285 Mfix * new_fix;
7286 HOST_WIDE_INT max_count = max_address - fix->address;
7287 rtx label = gen_label_rtx ();
7289 selected_cost = arm_barrier_cost (from);
7290 selected_address = fix->address;
7292 while (from && count < max_count)
7294 rtx tmp;
7295 int new_cost;
7297 /* This code shouldn't have been called if there was a natural barrier
7298 within range. */
7299 if (GET_CODE (from) == BARRIER)
7300 abort ();
7302 /* Count the length of this insn. */
7303 count += get_attr_length (from);
7305 /* If there is a jump table, add its length. */
7306 tmp = is_jump_table (from);
7307 if (tmp != NULL)
7309 count += get_jump_table_size (tmp);
7311 /* Jump tables aren't in a basic block, so base the cost on
7312 the dispatch insn. If we select this location, we will
7313 still put the pool after the table. */
7314 new_cost = arm_barrier_cost (from);
7316 if (count < max_count && new_cost <= selected_cost)
7318 selected = tmp;
7319 selected_cost = new_cost;
7320 selected_address = fix->address + count;
7323 /* Continue after the dispatch table. */
7324 from = NEXT_INSN (tmp);
7325 continue;
7328 new_cost = arm_barrier_cost (from);
7330 if (count < max_count && new_cost <= selected_cost)
7332 selected = from;
7333 selected_cost = new_cost;
7334 selected_address = fix->address + count;
7337 from = NEXT_INSN (from);
7340 /* Create a new JUMP_INSN that branches around a barrier. */
7341 from = emit_jump_insn_after (gen_jump (label), selected);
7342 JUMP_LABEL (from) = label;
7343 barrier = emit_barrier_after (from);
7344 emit_label_after (label, barrier);
7346 /* Create a minipool barrier entry for the new barrier. */
7347 new_fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* new_fix));
7348 new_fix->insn = barrier;
7349 new_fix->address = selected_address;
7350 new_fix->next = fix->next;
7351 fix->next = new_fix;
7353 return new_fix;
7356 /* Record that there is a natural barrier in the insn stream at
7357 ADDRESS. */
7358 static void
7359 push_minipool_barrier (rtx insn, HOST_WIDE_INT address)
7361 Mfix * fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* fix));
7363 fix->insn = insn;
7364 fix->address = address;
7366 fix->next = NULL;
7367 if (minipool_fix_head != NULL)
7368 minipool_fix_tail->next = fix;
7369 else
7370 minipool_fix_head = fix;
7372 minipool_fix_tail = fix;
7375 /* Record INSN, which will need fixing up to load a value from the
7376 minipool. ADDRESS is the offset of the insn since the start of the
7377 function; LOC is a pointer to the part of the insn which requires
7378 fixing; VALUE is the constant that must be loaded, which is of type
7379 MODE. */
7380 static void
7381 push_minipool_fix (rtx insn, HOST_WIDE_INT address, rtx *loc,
7382 enum machine_mode mode, rtx value)
7384 Mfix * fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* fix));
7386 #ifdef AOF_ASSEMBLER
7387 /* PIC symbol references need to be converted into offsets into the
7388 based area. */
7389 /* XXX This shouldn't be done here. */
7390 if (flag_pic && GET_CODE (value) == SYMBOL_REF)
7391 value = aof_pic_entry (value);
7392 #endif /* AOF_ASSEMBLER */
7394 fix->insn = insn;
7395 fix->address = address;
7396 fix->loc = loc;
7397 fix->mode = mode;
7398 fix->fix_size = MINIPOOL_FIX_SIZE (mode);
7399 fix->value = value;
7400 fix->forwards = get_attr_pool_range (insn);
7401 fix->backwards = get_attr_neg_pool_range (insn);
7402 fix->minipool = NULL;
7404 /* If an insn doesn't have a range defined for it, then it isn't
7405 expecting to be reworked by this code. Better to abort now than
7406 to generate duff assembly code. */
7407 if (fix->forwards == 0 && fix->backwards == 0)
7408 abort ();
7410 /* With iWMMXt enabled, the pool is aligned to an 8-byte boundary.
7411 So there might be an empty word before the start of the pool.
7412 Hence we reduce the forward range by 4 to allow for this
7413 possibility. */
7414 if (TARGET_REALLY_IWMMXT && fix->fix_size == 8)
7415 fix->forwards -= 4;
7417 if (dump_file)
7419 fprintf (dump_file,
7420 ";; %smode fixup for i%d; addr %lu, range (%ld,%ld): ",
7421 GET_MODE_NAME (mode),
7422 INSN_UID (insn), (unsigned long) address,
7423 -1 * (long)fix->backwards, (long)fix->forwards);
7424 arm_print_value (dump_file, fix->value);
7425 fprintf (dump_file, "\n");
7428 /* Add it to the chain of fixes. */
7429 fix->next = NULL;
7431 if (minipool_fix_head != NULL)
7432 minipool_fix_tail->next = fix;
7433 else
7434 minipool_fix_head = fix;
7436 minipool_fix_tail = fix;
7439 /* Scan INSN and note any of its operands that need fixing.
7440 If DO_PUSHES is false we do not actually push any of the fixups
7441 needed. The function returns TRUE is any fixups were needed/pushed.
7442 This is used by arm_memory_load_p() which needs to know about loads
7443 of constants that will be converted into minipool loads. */
7444 static bool
7445 note_invalid_constants (rtx insn, HOST_WIDE_INT address, int do_pushes)
7447 bool result = false;
7448 int opno;
7450 extract_insn (insn);
7452 if (!constrain_operands (1))
7453 fatal_insn_not_found (insn);
7455 if (recog_data.n_alternatives == 0)
7456 return false;
7458 /* Fill in recog_op_alt with information about the constraints of this insn. */
7459 preprocess_constraints ();
7461 for (opno = 0; opno < recog_data.n_operands; opno++)
7463 /* Things we need to fix can only occur in inputs. */
7464 if (recog_data.operand_type[opno] != OP_IN)
7465 continue;
7467 /* If this alternative is a memory reference, then any mention
7468 of constants in this alternative is really to fool reload
7469 into allowing us to accept one there. We need to fix them up
7470 now so that we output the right code. */
7471 if (recog_op_alt[opno][which_alternative].memory_ok)
7473 rtx op = recog_data.operand[opno];
7475 if (CONSTANT_P (op))
7477 if (do_pushes)
7478 push_minipool_fix (insn, address, recog_data.operand_loc[opno],
7479 recog_data.operand_mode[opno], op);
7480 result = true;
7482 else if (GET_CODE (op) == MEM
7483 && GET_CODE (XEXP (op, 0)) == SYMBOL_REF
7484 && CONSTANT_POOL_ADDRESS_P (XEXP (op, 0)))
7486 if (do_pushes)
7488 rtx cop = avoid_constant_pool_reference (op);
7490 /* Casting the address of something to a mode narrower
7491 than a word can cause avoid_constant_pool_reference()
7492 to return the pool reference itself. That's no good to
7493 us here. Lets just hope that we can use the
7494 constant pool value directly. */
7495 if (op == cop)
7496 cop = get_pool_constant (XEXP (op, 0));
7498 push_minipool_fix (insn, address,
7499 recog_data.operand_loc[opno],
7500 recog_data.operand_mode[opno], cop);
7503 result = true;
7508 return result;
7511 /* Gcc puts the pool in the wrong place for ARM, since we can only
7512 load addresses a limited distance around the pc. We do some
7513 special munging to move the constant pool values to the correct
7514 point in the code. */
7515 static void
7516 arm_reorg (void)
7518 rtx insn;
7519 HOST_WIDE_INT address = 0;
7520 Mfix * fix;
7522 minipool_fix_head = minipool_fix_tail = NULL;
7524 /* The first insn must always be a note, or the code below won't
7525 scan it properly. */
7526 insn = get_insns ();
7527 if (GET_CODE (insn) != NOTE)
7528 abort ();
7530 /* Scan all the insns and record the operands that will need fixing. */
7531 for (insn = next_nonnote_insn (insn); insn; insn = next_nonnote_insn (insn))
7533 if (TARGET_CIRRUS_FIX_INVALID_INSNS
7534 && (arm_cirrus_insn_p (insn)
7535 || GET_CODE (insn) == JUMP_INSN
7536 || arm_memory_load_p (insn)))
7537 cirrus_reorg (insn);
7539 if (GET_CODE (insn) == BARRIER)
7540 push_minipool_barrier (insn, address);
7541 else if (INSN_P (insn))
7543 rtx table;
7545 note_invalid_constants (insn, address, true);
7546 address += get_attr_length (insn);
7548 /* If the insn is a vector jump, add the size of the table
7549 and skip the table. */
7550 if ((table = is_jump_table (insn)) != NULL)
7552 address += get_jump_table_size (table);
7553 insn = table;
7558 fix = minipool_fix_head;
7560 /* Now scan the fixups and perform the required changes. */
7561 while (fix)
7563 Mfix * ftmp;
7564 Mfix * fdel;
7565 Mfix * last_added_fix;
7566 Mfix * last_barrier = NULL;
7567 Mfix * this_fix;
7569 /* Skip any further barriers before the next fix. */
7570 while (fix && GET_CODE (fix->insn) == BARRIER)
7571 fix = fix->next;
7573 /* No more fixes. */
7574 if (fix == NULL)
7575 break;
7577 last_added_fix = NULL;
7579 for (ftmp = fix; ftmp; ftmp = ftmp->next)
7581 if (GET_CODE (ftmp->insn) == BARRIER)
7583 if (ftmp->address >= minipool_vector_head->max_address)
7584 break;
7586 last_barrier = ftmp;
7588 else if ((ftmp->minipool = add_minipool_forward_ref (ftmp)) == NULL)
7589 break;
7591 last_added_fix = ftmp; /* Keep track of the last fix added. */
7594 /* If we found a barrier, drop back to that; any fixes that we
7595 could have reached but come after the barrier will now go in
7596 the next mini-pool. */
7597 if (last_barrier != NULL)
7599 /* Reduce the refcount for those fixes that won't go into this
7600 pool after all. */
7601 for (fdel = last_barrier->next;
7602 fdel && fdel != ftmp;
7603 fdel = fdel->next)
7605 fdel->minipool->refcount--;
7606 fdel->minipool = NULL;
7609 ftmp = last_barrier;
7611 else
7613 /* ftmp is first fix that we can't fit into this pool and
7614 there no natural barriers that we could use. Insert a
7615 new barrier in the code somewhere between the previous
7616 fix and this one, and arrange to jump around it. */
7617 HOST_WIDE_INT max_address;
7619 /* The last item on the list of fixes must be a barrier, so
7620 we can never run off the end of the list of fixes without
7621 last_barrier being set. */
7622 if (ftmp == NULL)
7623 abort ();
7625 max_address = minipool_vector_head->max_address;
7626 /* Check that there isn't another fix that is in range that
7627 we couldn't fit into this pool because the pool was
7628 already too large: we need to put the pool before such an
7629 instruction. */
7630 if (ftmp->address < max_address)
7631 max_address = ftmp->address;
7633 last_barrier = create_fix_barrier (last_added_fix, max_address);
7636 assign_minipool_offsets (last_barrier);
7638 while (ftmp)
7640 if (GET_CODE (ftmp->insn) != BARRIER
7641 && ((ftmp->minipool = add_minipool_backward_ref (ftmp))
7642 == NULL))
7643 break;
7645 ftmp = ftmp->next;
7648 /* Scan over the fixes we have identified for this pool, fixing them
7649 up and adding the constants to the pool itself. */
7650 for (this_fix = fix; this_fix && ftmp != this_fix;
7651 this_fix = this_fix->next)
7652 if (GET_CODE (this_fix->insn) != BARRIER)
7654 rtx addr
7655 = plus_constant (gen_rtx_LABEL_REF (VOIDmode,
7656 minipool_vector_label),
7657 this_fix->minipool->offset);
7658 *this_fix->loc = gen_rtx_MEM (this_fix->mode, addr);
7661 dump_minipool (last_barrier->insn);
7662 fix = ftmp;
7665 /* From now on we must synthesize any constants that we can't handle
7666 directly. This can happen if the RTL gets split during final
7667 instruction generation. */
7668 after_arm_reorg = 1;
7670 /* Free the minipool memory. */
7671 obstack_free (&minipool_obstack, minipool_startobj);
7674 /* Routines to output assembly language. */
7676 /* If the rtx is the correct value then return the string of the number.
7677 In this way we can ensure that valid double constants are generated even
7678 when cross compiling. */
7679 const char *
7680 fp_immediate_constant (rtx x)
7682 REAL_VALUE_TYPE r;
7683 int i;
7685 if (!fp_consts_inited)
7686 init_fp_table ();
7688 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
7689 for (i = 0; i < 8; i++)
7690 if (REAL_VALUES_EQUAL (r, values_fp[i]))
7691 return strings_fp[i];
7693 abort ();
7696 /* As for fp_immediate_constant, but value is passed directly, not in rtx. */
7697 static const char *
7698 fp_const_from_val (REAL_VALUE_TYPE *r)
7700 int i;
7702 if (!fp_consts_inited)
7703 init_fp_table ();
7705 for (i = 0; i < 8; i++)
7706 if (REAL_VALUES_EQUAL (*r, values_fp[i]))
7707 return strings_fp[i];
7709 abort ();
7712 /* Output the operands of a LDM/STM instruction to STREAM.
7713 MASK is the ARM register set mask of which only bits 0-15 are important.
7714 REG is the base register, either the frame pointer or the stack pointer,
7715 INSTR is the possibly suffixed load or store instruction. */
7716 static void
7717 print_multi_reg (FILE *stream, const char *instr, int reg, int mask)
7719 int i;
7720 int not_first = FALSE;
7722 fputc ('\t', stream);
7723 asm_fprintf (stream, instr, reg);
7724 fputs (", {", stream);
7726 for (i = 0; i <= LAST_ARM_REGNUM; i++)
7727 if (mask & (1 << i))
7729 if (not_first)
7730 fprintf (stream, ", ");
7732 asm_fprintf (stream, "%r", i);
7733 not_first = TRUE;
7736 fprintf (stream, "}");
7738 /* Add a ^ character for the 26-bit ABI, but only if we were loading
7739 the PC. Otherwise we would generate an UNPREDICTABLE instruction.
7740 Strictly speaking the instruction would be unpredicatble only if
7741 we were writing back the base register as well, but since we never
7742 want to generate an LDM type 2 instruction (register bank switching)
7743 which is what you get if the PC is not being loaded, we do not need
7744 to check for writeback. */
7745 if (! TARGET_APCS_32
7746 && ((mask & (1 << PC_REGNUM)) != 0))
7747 fprintf (stream, "^");
7749 fprintf (stream, "\n");
7753 /* Output the operands of a FLDM/FSTM instruction to STREAM.
7754 REG is the base register,
7755 INSTR is the possibly suffixed load or store instruction.
7756 FMT specifies now to print the register name.
7757 START and COUNT specify the register range. */
7759 static void
7760 vfp_print_multi (FILE *stream, const char *instr, int reg,
7761 const char * fmt, int start, int count)
7763 int i;
7765 fputc ('\t', stream);
7766 asm_fprintf (stream, instr, reg);
7767 fputs (", {", stream);
7769 for (i = start; i < start + count; i++)
7771 if (i > start)
7772 fputs (", ", stream);
7773 asm_fprintf (stream, fmt, i);
7775 fputs ("}\n", stream);
7779 /* Output the assembly for a store multiple. */
7781 const char *
7782 vfp_output_fstmx (rtx * operands)
7784 char pattern[100];
7785 int p;
7786 int base;
7787 int i;
7789 strcpy (pattern, "fstmfdx\t%m0!, {%P1");
7790 p = strlen (pattern);
7792 if (GET_CODE (operands[1]) != REG)
7793 abort ();
7795 base = (REGNO (operands[1]) - FIRST_VFP_REGNUM) / 2;
7796 for (i = 1; i < XVECLEN (operands[2], 0); i++)
7798 p += sprintf (&pattern[p], ", d%d", base + i);
7800 strcpy (&pattern[p], "}");
7802 output_asm_insn (pattern, operands);
7803 return "";
7807 /* Emit RTL to save block of VFP register pairs to the stack. */
7809 static rtx
7810 vfp_emit_fstmx (int base_reg, int count)
7812 rtx par;
7813 rtx dwarf;
7814 rtx tmp, reg;
7815 int i;
7817 /* ??? The frame layout is implementation defined. We describe
7818 standard format 1 (equivalent to a FSTMD insn and unused pad word).
7819 We really need some way of representing the whole block so that the
7820 unwinder can figure it out at runtime. */
7821 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
7822 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (count + 1));
7824 reg = gen_rtx_REG (DFmode, base_reg);
7825 base_reg += 2;
7827 XVECEXP (par, 0, 0)
7828 = gen_rtx_SET (VOIDmode,
7829 gen_rtx_MEM (BLKmode,
7830 gen_rtx_PRE_DEC (BLKmode, stack_pointer_rtx)),
7831 gen_rtx_UNSPEC (BLKmode,
7832 gen_rtvec (1, reg),
7833 UNSPEC_PUSH_MULT));
7835 tmp = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7836 gen_rtx_PLUS (SImode, stack_pointer_rtx,
7837 GEN_INT (-(count * 8 + 4))));
7838 RTX_FRAME_RELATED_P (tmp) = 1;
7839 XVECEXP (dwarf, 0, 0) = tmp;
7841 tmp = gen_rtx_SET (VOIDmode,
7842 gen_rtx_MEM (DFmode, stack_pointer_rtx),
7843 reg);
7844 RTX_FRAME_RELATED_P (tmp) = 1;
7845 XVECEXP (dwarf, 0, 1) = tmp;
7847 for (i = 1; i < count; i++)
7849 reg = gen_rtx_REG (DFmode, base_reg);
7850 base_reg += 2;
7851 XVECEXP (par, 0, i) = gen_rtx_USE (VOIDmode, reg);
7853 tmp = gen_rtx_SET (VOIDmode,
7854 gen_rtx_MEM (DFmode,
7855 gen_rtx_PLUS (SImode,
7856 stack_pointer_rtx,
7857 GEN_INT (i * 8))),
7858 reg);
7859 RTX_FRAME_RELATED_P (tmp) = 1;
7860 XVECEXP (dwarf, 0, i + 1) = tmp;
7863 par = emit_insn (par);
7864 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
7865 REG_NOTES (par));
7866 return par;
7870 /* Output a 'call' insn. */
7871 const char *
7872 output_call (rtx *operands)
7874 /* Handle calls to lr using ip (which may be clobbered in subr anyway). */
7876 if (REGNO (operands[0]) == LR_REGNUM)
7878 operands[0] = gen_rtx_REG (SImode, IP_REGNUM);
7879 output_asm_insn ("mov%?\t%0, %|lr", operands);
7882 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
7884 if (TARGET_INTERWORK)
7885 output_asm_insn ("bx%?\t%0", operands);
7886 else
7887 output_asm_insn ("mov%?\t%|pc, %0", operands);
7889 return "";
7892 /* Output a 'call' insn that is a reference in memory. */
7893 const char *
7894 output_call_mem (rtx *operands)
7896 if (TARGET_INTERWORK)
7898 output_asm_insn ("ldr%?\t%|ip, %0", operands);
7899 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
7900 output_asm_insn ("bx%?\t%|ip", operands);
7902 else if (regno_use_in (LR_REGNUM, operands[0]))
7904 /* LR is used in the memory address. We load the address in the
7905 first instruction. It's safe to use IP as the target of the
7906 load since the call will kill it anyway. */
7907 output_asm_insn ("ldr%?\t%|ip, %0", operands);
7908 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
7909 output_asm_insn ("mov%?\t%|pc, %|ip", operands);
7911 else
7913 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
7914 output_asm_insn ("ldr%?\t%|pc, %0", operands);
7917 return "";
7921 /* Output a move from arm registers to an fpa registers.
7922 OPERANDS[0] is an fpa register.
7923 OPERANDS[1] is the first registers of an arm register pair. */
7924 const char *
7925 output_mov_long_double_fpa_from_arm (rtx *operands)
7927 int arm_reg0 = REGNO (operands[1]);
7928 rtx ops[3];
7930 if (arm_reg0 == IP_REGNUM)
7931 abort ();
7933 ops[0] = gen_rtx_REG (SImode, arm_reg0);
7934 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
7935 ops[2] = gen_rtx_REG (SImode, 2 + arm_reg0);
7937 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1, %2}", ops);
7938 output_asm_insn ("ldf%?e\t%0, [%|sp], #12", operands);
7940 return "";
7943 /* Output a move from an fpa register to arm registers.
7944 OPERANDS[0] is the first registers of an arm register pair.
7945 OPERANDS[1] is an fpa register. */
7946 const char *
7947 output_mov_long_double_arm_from_fpa (rtx *operands)
7949 int arm_reg0 = REGNO (operands[0]);
7950 rtx ops[3];
7952 if (arm_reg0 == IP_REGNUM)
7953 abort ();
7955 ops[0] = gen_rtx_REG (SImode, arm_reg0);
7956 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
7957 ops[2] = gen_rtx_REG (SImode, 2 + arm_reg0);
7959 output_asm_insn ("stf%?e\t%1, [%|sp, #-12]!", operands);
7960 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1, %2}", ops);
7961 return "";
7964 /* Output a move from arm registers to arm registers of a long double
7965 OPERANDS[0] is the destination.
7966 OPERANDS[1] is the source. */
7967 const char *
7968 output_mov_long_double_arm_from_arm (rtx *operands)
7970 /* We have to be careful here because the two might overlap. */
7971 int dest_start = REGNO (operands[0]);
7972 int src_start = REGNO (operands[1]);
7973 rtx ops[2];
7974 int i;
7976 if (dest_start < src_start)
7978 for (i = 0; i < 3; i++)
7980 ops[0] = gen_rtx_REG (SImode, dest_start + i);
7981 ops[1] = gen_rtx_REG (SImode, src_start + i);
7982 output_asm_insn ("mov%?\t%0, %1", ops);
7985 else
7987 for (i = 2; i >= 0; i--)
7989 ops[0] = gen_rtx_REG (SImode, dest_start + i);
7990 ops[1] = gen_rtx_REG (SImode, src_start + i);
7991 output_asm_insn ("mov%?\t%0, %1", ops);
7995 return "";
7999 /* Output a move from arm registers to an fpa registers.
8000 OPERANDS[0] is an fpa register.
8001 OPERANDS[1] is the first registers of an arm register pair. */
8002 const char *
8003 output_mov_double_fpa_from_arm (rtx *operands)
8005 int arm_reg0 = REGNO (operands[1]);
8006 rtx ops[2];
8008 if (arm_reg0 == IP_REGNUM)
8009 abort ();
8011 ops[0] = gen_rtx_REG (SImode, arm_reg0);
8012 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
8013 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1}", ops);
8014 output_asm_insn ("ldf%?d\t%0, [%|sp], #8", operands);
8015 return "";
8018 /* Output a move from an fpa register to arm registers.
8019 OPERANDS[0] is the first registers of an arm register pair.
8020 OPERANDS[1] is an fpa register. */
8021 const char *
8022 output_mov_double_arm_from_fpa (rtx *operands)
8024 int arm_reg0 = REGNO (operands[0]);
8025 rtx ops[2];
8027 if (arm_reg0 == IP_REGNUM)
8028 abort ();
8030 ops[0] = gen_rtx_REG (SImode, arm_reg0);
8031 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
8032 output_asm_insn ("stf%?d\t%1, [%|sp, #-8]!", operands);
8033 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1}", ops);
8034 return "";
8037 /* Output a move between double words.
8038 It must be REG<-REG, REG<-CONST_DOUBLE, REG<-CONST_INT, REG<-MEM
8039 or MEM<-REG and all MEMs must be offsettable addresses. */
8040 const char *
8041 output_move_double (rtx *operands)
8043 enum rtx_code code0 = GET_CODE (operands[0]);
8044 enum rtx_code code1 = GET_CODE (operands[1]);
8045 rtx otherops[3];
8047 if (code0 == REG)
8049 int reg0 = REGNO (operands[0]);
8051 otherops[0] = gen_rtx_REG (SImode, 1 + reg0);
8053 if (code1 == REG)
8055 int reg1 = REGNO (operands[1]);
8056 if (reg1 == IP_REGNUM)
8057 abort ();
8059 /* Ensure the second source is not overwritten. */
8060 if (reg1 == reg0 + (WORDS_BIG_ENDIAN ? -1 : 1))
8061 output_asm_insn ("mov%?\t%Q0, %Q1\n\tmov%?\t%R0, %R1", operands);
8062 else
8063 output_asm_insn ("mov%?\t%R0, %R1\n\tmov%?\t%Q0, %Q1", operands);
8065 else if (code1 == CONST_VECTOR)
8067 HOST_WIDE_INT hint = 0;
8069 switch (GET_MODE (operands[1]))
8071 case V2SImode:
8072 otherops[1] = GEN_INT (INTVAL (CONST_VECTOR_ELT (operands[1], 1)));
8073 operands[1] = GEN_INT (INTVAL (CONST_VECTOR_ELT (operands[1], 0)));
8074 break;
8076 case V4HImode:
8077 if (BYTES_BIG_ENDIAN)
8079 hint = INTVAL (CONST_VECTOR_ELT (operands[1], 2));
8080 hint <<= 16;
8081 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 3));
8083 else
8085 hint = INTVAL (CONST_VECTOR_ELT (operands[1], 3));
8086 hint <<= 16;
8087 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 2));
8090 otherops[1] = GEN_INT (hint);
8091 hint = 0;
8093 if (BYTES_BIG_ENDIAN)
8095 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 0));
8096 hint <<= 16;
8097 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 1));
8099 else
8101 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 1));
8102 hint <<= 16;
8103 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 0));
8106 operands[1] = GEN_INT (hint);
8107 break;
8109 case V8QImode:
8110 if (BYTES_BIG_ENDIAN)
8112 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 4));
8113 hint <<= 8;
8114 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 5));
8115 hint <<= 8;
8116 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 6));
8117 hint <<= 8;
8118 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 7));
8120 else
8122 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 7));
8123 hint <<= 8;
8124 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 6));
8125 hint <<= 8;
8126 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 5));
8127 hint <<= 8;
8128 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 4));
8131 otherops[1] = GEN_INT (hint);
8132 hint = 0;
8134 if (BYTES_BIG_ENDIAN)
8136 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 0));
8137 hint <<= 8;
8138 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 1));
8139 hint <<= 8;
8140 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 2));
8141 hint <<= 8;
8142 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 3));
8144 else
8146 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 3));
8147 hint <<= 8;
8148 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 2));
8149 hint <<= 8;
8150 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 1));
8151 hint <<= 8;
8152 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 0));
8155 operands[1] = GEN_INT (hint);
8156 break;
8158 default:
8159 abort ();
8161 output_mov_immediate (operands);
8162 output_mov_immediate (otherops);
8164 else if (code1 == CONST_DOUBLE)
8166 if (GET_MODE (operands[1]) == DFmode)
8168 REAL_VALUE_TYPE r;
8169 long l[2];
8171 REAL_VALUE_FROM_CONST_DOUBLE (r, operands[1]);
8172 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
8173 otherops[1] = GEN_INT (l[1]);
8174 operands[1] = GEN_INT (l[0]);
8176 else if (GET_MODE (operands[1]) != VOIDmode)
8177 abort ();
8178 else if (WORDS_BIG_ENDIAN)
8180 otherops[1] = GEN_INT (CONST_DOUBLE_LOW (operands[1]));
8181 operands[1] = GEN_INT (CONST_DOUBLE_HIGH (operands[1]));
8183 else
8185 otherops[1] = GEN_INT (CONST_DOUBLE_HIGH (operands[1]));
8186 operands[1] = GEN_INT (CONST_DOUBLE_LOW (operands[1]));
8189 output_mov_immediate (operands);
8190 output_mov_immediate (otherops);
8192 else if (code1 == CONST_INT)
8194 #if HOST_BITS_PER_WIDE_INT > 32
8195 /* If HOST_WIDE_INT is more than 32 bits, the intval tells us
8196 what the upper word is. */
8197 if (WORDS_BIG_ENDIAN)
8199 otherops[1] = GEN_INT (ARM_SIGN_EXTEND (INTVAL (operands[1])));
8200 operands[1] = GEN_INT (INTVAL (operands[1]) >> 32);
8202 else
8204 otherops[1] = GEN_INT (INTVAL (operands[1]) >> 32);
8205 operands[1] = GEN_INT (ARM_SIGN_EXTEND (INTVAL (operands[1])));
8207 #else
8208 /* Sign extend the intval into the high-order word. */
8209 if (WORDS_BIG_ENDIAN)
8211 otherops[1] = operands[1];
8212 operands[1] = (INTVAL (operands[1]) < 0
8213 ? constm1_rtx : const0_rtx);
8215 else
8216 otherops[1] = INTVAL (operands[1]) < 0 ? constm1_rtx : const0_rtx;
8217 #endif
8218 output_mov_immediate (otherops);
8219 output_mov_immediate (operands);
8221 else if (code1 == MEM)
8223 switch (GET_CODE (XEXP (operands[1], 0)))
8225 case REG:
8226 output_asm_insn ("ldm%?ia\t%m1, %M0", operands);
8227 break;
8229 case PRE_INC:
8230 abort (); /* Should never happen now. */
8231 break;
8233 case PRE_DEC:
8234 output_asm_insn ("ldm%?db\t%m1!, %M0", operands);
8235 break;
8237 case POST_INC:
8238 output_asm_insn ("ldm%?ia\t%m1!, %M0", operands);
8239 break;
8241 case POST_DEC:
8242 abort (); /* Should never happen now. */
8243 break;
8245 case LABEL_REF:
8246 case CONST:
8247 output_asm_insn ("adr%?\t%0, %1", operands);
8248 output_asm_insn ("ldm%?ia\t%0, %M0", operands);
8249 break;
8251 default:
8252 if (arm_add_operand (XEXP (XEXP (operands[1], 0), 1),
8253 GET_MODE (XEXP (XEXP (operands[1], 0), 1))))
8255 otherops[0] = operands[0];
8256 otherops[1] = XEXP (XEXP (operands[1], 0), 0);
8257 otherops[2] = XEXP (XEXP (operands[1], 0), 1);
8259 if (GET_CODE (XEXP (operands[1], 0)) == PLUS)
8261 if (GET_CODE (otherops[2]) == CONST_INT)
8263 switch ((int) INTVAL (otherops[2]))
8265 case -8:
8266 output_asm_insn ("ldm%?db\t%1, %M0", otherops);
8267 return "";
8268 case -4:
8269 output_asm_insn ("ldm%?da\t%1, %M0", otherops);
8270 return "";
8271 case 4:
8272 output_asm_insn ("ldm%?ib\t%1, %M0", otherops);
8273 return "";
8276 if (!(const_ok_for_arm (INTVAL (otherops[2]))))
8277 output_asm_insn ("sub%?\t%0, %1, #%n2", otherops);
8278 else
8279 output_asm_insn ("add%?\t%0, %1, %2", otherops);
8281 else
8282 output_asm_insn ("add%?\t%0, %1, %2", otherops);
8284 else
8285 output_asm_insn ("sub%?\t%0, %1, %2", otherops);
8287 return "ldm%?ia\t%0, %M0";
8289 else
8291 otherops[1] = adjust_address (operands[1], SImode, 4);
8292 /* Take care of overlapping base/data reg. */
8293 if (reg_mentioned_p (operands[0], operands[1]))
8295 output_asm_insn ("ldr%?\t%0, %1", otherops);
8296 output_asm_insn ("ldr%?\t%0, %1", operands);
8298 else
8300 output_asm_insn ("ldr%?\t%0, %1", operands);
8301 output_asm_insn ("ldr%?\t%0, %1", otherops);
8306 else
8307 abort (); /* Constraints should prevent this. */
8309 else if (code0 == MEM && code1 == REG)
8311 if (REGNO (operands[1]) == IP_REGNUM)
8312 abort ();
8314 switch (GET_CODE (XEXP (operands[0], 0)))
8316 case REG:
8317 output_asm_insn ("stm%?ia\t%m0, %M1", operands);
8318 break;
8320 case PRE_INC:
8321 abort (); /* Should never happen now. */
8322 break;
8324 case PRE_DEC:
8325 output_asm_insn ("stm%?db\t%m0!, %M1", operands);
8326 break;
8328 case POST_INC:
8329 output_asm_insn ("stm%?ia\t%m0!, %M1", operands);
8330 break;
8332 case POST_DEC:
8333 abort (); /* Should never happen now. */
8334 break;
8336 case PLUS:
8337 if (GET_CODE (XEXP (XEXP (operands[0], 0), 1)) == CONST_INT)
8339 switch ((int) INTVAL (XEXP (XEXP (operands[0], 0), 1)))
8341 case -8:
8342 output_asm_insn ("stm%?db\t%m0, %M1", operands);
8343 return "";
8345 case -4:
8346 output_asm_insn ("stm%?da\t%m0, %M1", operands);
8347 return "";
8349 case 4:
8350 output_asm_insn ("stm%?ib\t%m0, %M1", operands);
8351 return "";
8354 /* Fall through */
8356 default:
8357 otherops[0] = adjust_address (operands[0], SImode, 4);
8358 otherops[1] = gen_rtx_REG (SImode, 1 + REGNO (operands[1]));
8359 output_asm_insn ("str%?\t%1, %0", operands);
8360 output_asm_insn ("str%?\t%1, %0", otherops);
8363 else
8364 /* Constraints should prevent this. */
8365 abort ();
8367 return "";
8371 /* Output an arbitrary MOV reg, #n.
8372 OPERANDS[0] is a register. OPERANDS[1] is a const_int. */
8373 const char *
8374 output_mov_immediate (rtx *operands)
8376 HOST_WIDE_INT n = INTVAL (operands[1]);
8378 /* Try to use one MOV. */
8379 if (const_ok_for_arm (n))
8380 output_asm_insn ("mov%?\t%0, %1", operands);
8382 /* Try to use one MVN. */
8383 else if (const_ok_for_arm (~n))
8385 operands[1] = GEN_INT (~n);
8386 output_asm_insn ("mvn%?\t%0, %1", operands);
8388 else
8390 int n_ones = 0;
8391 int i;
8393 /* If all else fails, make it out of ORRs or BICs as appropriate. */
8394 for (i = 0; i < 32; i++)
8395 if (n & 1 << i)
8396 n_ones++;
8398 if (n_ones > 16) /* Shorter to use MVN with BIC in this case. */
8399 output_multi_immediate (operands, "mvn%?\t%0, %1", "bic%?\t%0, %0, %1", 1, ~ n);
8400 else
8401 output_multi_immediate (operands, "mov%?\t%0, %1", "orr%?\t%0, %0, %1", 1, n);
8404 return "";
8407 /* Output an ADD r, s, #n where n may be too big for one instruction.
8408 If adding zero to one register, output nothing. */
8409 const char *
8410 output_add_immediate (rtx *operands)
8412 HOST_WIDE_INT n = INTVAL (operands[2]);
8414 if (n != 0 || REGNO (operands[0]) != REGNO (operands[1]))
8416 if (n < 0)
8417 output_multi_immediate (operands,
8418 "sub%?\t%0, %1, %2", "sub%?\t%0, %0, %2", 2,
8419 -n);
8420 else
8421 output_multi_immediate (operands,
8422 "add%?\t%0, %1, %2", "add%?\t%0, %0, %2", 2,
8426 return "";
8429 /* Output a multiple immediate operation.
8430 OPERANDS is the vector of operands referred to in the output patterns.
8431 INSTR1 is the output pattern to use for the first constant.
8432 INSTR2 is the output pattern to use for subsequent constants.
8433 IMMED_OP is the index of the constant slot in OPERANDS.
8434 N is the constant value. */
8435 static const char *
8436 output_multi_immediate (rtx *operands, const char *instr1, const char *instr2,
8437 int immed_op, HOST_WIDE_INT n)
8439 #if HOST_BITS_PER_WIDE_INT > 32
8440 n &= 0xffffffff;
8441 #endif
8443 if (n == 0)
8445 /* Quick and easy output. */
8446 operands[immed_op] = const0_rtx;
8447 output_asm_insn (instr1, operands);
8449 else
8451 int i;
8452 const char * instr = instr1;
8454 /* Note that n is never zero here (which would give no output). */
8455 for (i = 0; i < 32; i += 2)
8457 if (n & (3 << i))
8459 operands[immed_op] = GEN_INT (n & (255 << i));
8460 output_asm_insn (instr, operands);
8461 instr = instr2;
8462 i += 6;
8467 return "";
8470 /* Return the appropriate ARM instruction for the operation code.
8471 The returned result should not be overwritten. OP is the rtx of the
8472 operation. SHIFT_FIRST_ARG is TRUE if the first argument of the operator
8473 was shifted. */
8474 const char *
8475 arithmetic_instr (rtx op, int shift_first_arg)
8477 switch (GET_CODE (op))
8479 case PLUS:
8480 return "add";
8482 case MINUS:
8483 return shift_first_arg ? "rsb" : "sub";
8485 case IOR:
8486 return "orr";
8488 case XOR:
8489 return "eor";
8491 case AND:
8492 return "and";
8494 default:
8495 abort ();
8499 /* Ensure valid constant shifts and return the appropriate shift mnemonic
8500 for the operation code. The returned result should not be overwritten.
8501 OP is the rtx code of the shift.
8502 On exit, *AMOUNTP will be -1 if the shift is by a register, or a constant
8503 shift. */
8504 static const char *
8505 shift_op (rtx op, HOST_WIDE_INT *amountp)
8507 const char * mnem;
8508 enum rtx_code code = GET_CODE (op);
8510 if (GET_CODE (XEXP (op, 1)) == REG || GET_CODE (XEXP (op, 1)) == SUBREG)
8511 *amountp = -1;
8512 else if (GET_CODE (XEXP (op, 1)) == CONST_INT)
8513 *amountp = INTVAL (XEXP (op, 1));
8514 else
8515 abort ();
8517 switch (code)
8519 case ASHIFT:
8520 mnem = "asl";
8521 break;
8523 case ASHIFTRT:
8524 mnem = "asr";
8525 break;
8527 case LSHIFTRT:
8528 mnem = "lsr";
8529 break;
8531 case ROTATERT:
8532 mnem = "ror";
8533 break;
8535 case MULT:
8536 /* We never have to worry about the amount being other than a
8537 power of 2, since this case can never be reloaded from a reg. */
8538 if (*amountp != -1)
8539 *amountp = int_log2 (*amountp);
8540 else
8541 abort ();
8542 return "asl";
8544 default:
8545 abort ();
8548 if (*amountp != -1)
8550 /* This is not 100% correct, but follows from the desire to merge
8551 multiplication by a power of 2 with the recognizer for a
8552 shift. >=32 is not a valid shift for "asl", so we must try and
8553 output a shift that produces the correct arithmetical result.
8554 Using lsr #32 is identical except for the fact that the carry bit
8555 is not set correctly if we set the flags; but we never use the
8556 carry bit from such an operation, so we can ignore that. */
8557 if (code == ROTATERT)
8558 /* Rotate is just modulo 32. */
8559 *amountp &= 31;
8560 else if (*amountp != (*amountp & 31))
8562 if (code == ASHIFT)
8563 mnem = "lsr";
8564 *amountp = 32;
8567 /* Shifts of 0 are no-ops. */
8568 if (*amountp == 0)
8569 return NULL;
8572 return mnem;
8575 /* Obtain the shift from the POWER of two. */
8577 static HOST_WIDE_INT
8578 int_log2 (HOST_WIDE_INT power)
8580 HOST_WIDE_INT shift = 0;
8582 while ((((HOST_WIDE_INT) 1 << shift) & power) == 0)
8584 if (shift > 31)
8585 abort ();
8586 shift++;
8589 return shift;
8592 /* Output a .ascii pseudo-op, keeping track of lengths. This is because
8593 /bin/as is horribly restrictive. */
8594 #define MAX_ASCII_LEN 51
8596 void
8597 output_ascii_pseudo_op (FILE *stream, const unsigned char *p, int len)
8599 int i;
8600 int len_so_far = 0;
8602 fputs ("\t.ascii\t\"", stream);
8604 for (i = 0; i < len; i++)
8606 int c = p[i];
8608 if (len_so_far >= MAX_ASCII_LEN)
8610 fputs ("\"\n\t.ascii\t\"", stream);
8611 len_so_far = 0;
8614 switch (c)
8616 case TARGET_TAB:
8617 fputs ("\\t", stream);
8618 len_so_far += 2;
8619 break;
8621 case TARGET_FF:
8622 fputs ("\\f", stream);
8623 len_so_far += 2;
8624 break;
8626 case TARGET_BS:
8627 fputs ("\\b", stream);
8628 len_so_far += 2;
8629 break;
8631 case TARGET_CR:
8632 fputs ("\\r", stream);
8633 len_so_far += 2;
8634 break;
8636 case TARGET_NEWLINE:
8637 fputs ("\\n", stream);
8638 c = p [i + 1];
8639 if ((c >= ' ' && c <= '~')
8640 || c == TARGET_TAB)
8641 /* This is a good place for a line break. */
8642 len_so_far = MAX_ASCII_LEN;
8643 else
8644 len_so_far += 2;
8645 break;
8647 case '\"':
8648 case '\\':
8649 putc ('\\', stream);
8650 len_so_far++;
8651 /* Drop through. */
8653 default:
8654 if (c >= ' ' && c <= '~')
8656 putc (c, stream);
8657 len_so_far++;
8659 else
8661 fprintf (stream, "\\%03o", c);
8662 len_so_far += 4;
8664 break;
8668 fputs ("\"\n", stream);
8671 /* Compute the register sabe mask for registers 0 through 12
8672 inclusive. This code is used by both arm_compute_save_reg_mask
8673 and arm_compute_initial_elimination_offset. */
8674 static unsigned long
8675 arm_compute_save_reg0_reg12_mask (void)
8677 unsigned long func_type = arm_current_func_type ();
8678 unsigned int save_reg_mask = 0;
8679 unsigned int reg;
8681 if (IS_INTERRUPT (func_type))
8683 unsigned int max_reg;
8684 /* Interrupt functions must not corrupt any registers,
8685 even call clobbered ones. If this is a leaf function
8686 we can just examine the registers used by the RTL, but
8687 otherwise we have to assume that whatever function is
8688 called might clobber anything, and so we have to save
8689 all the call-clobbered registers as well. */
8690 if (ARM_FUNC_TYPE (func_type) == ARM_FT_FIQ)
8691 /* FIQ handlers have registers r8 - r12 banked, so
8692 we only need to check r0 - r7, Normal ISRs only
8693 bank r14 and r15, so we must check up to r12.
8694 r13 is the stack pointer which is always preserved,
8695 so we do not need to consider it here. */
8696 max_reg = 7;
8697 else
8698 max_reg = 12;
8700 for (reg = 0; reg <= max_reg; reg++)
8701 if (regs_ever_live[reg]
8702 || (! current_function_is_leaf && call_used_regs [reg]))
8703 save_reg_mask |= (1 << reg);
8705 else
8707 /* In the normal case we only need to save those registers
8708 which are call saved and which are used by this function. */
8709 for (reg = 0; reg <= 10; reg++)
8710 if (regs_ever_live[reg] && ! call_used_regs [reg])
8711 save_reg_mask |= (1 << reg);
8713 /* Handle the frame pointer as a special case. */
8714 if (! TARGET_APCS_FRAME
8715 && ! frame_pointer_needed
8716 && regs_ever_live[HARD_FRAME_POINTER_REGNUM]
8717 && ! call_used_regs[HARD_FRAME_POINTER_REGNUM])
8718 save_reg_mask |= 1 << HARD_FRAME_POINTER_REGNUM;
8720 /* If we aren't loading the PIC register,
8721 don't stack it even though it may be live. */
8722 if (flag_pic
8723 && ! TARGET_SINGLE_PIC_BASE
8724 && regs_ever_live[PIC_OFFSET_TABLE_REGNUM])
8725 save_reg_mask |= 1 << PIC_OFFSET_TABLE_REGNUM;
8728 return save_reg_mask;
8731 /* Compute a bit mask of which registers need to be
8732 saved on the stack for the current function. */
8734 static unsigned long
8735 arm_compute_save_reg_mask (void)
8737 unsigned int save_reg_mask = 0;
8738 unsigned long func_type = arm_current_func_type ();
8740 if (IS_NAKED (func_type))
8741 /* This should never really happen. */
8742 return 0;
8744 /* If we are creating a stack frame, then we must save the frame pointer,
8745 IP (which will hold the old stack pointer), LR and the PC. */
8746 if (frame_pointer_needed)
8747 save_reg_mask |=
8748 (1 << ARM_HARD_FRAME_POINTER_REGNUM)
8749 | (1 << IP_REGNUM)
8750 | (1 << LR_REGNUM)
8751 | (1 << PC_REGNUM);
8753 /* Volatile functions do not return, so there
8754 is no need to save any other registers. */
8755 if (IS_VOLATILE (func_type))
8756 return save_reg_mask;
8758 save_reg_mask |= arm_compute_save_reg0_reg12_mask ();
8760 /* Decide if we need to save the link register.
8761 Interrupt routines have their own banked link register,
8762 so they never need to save it.
8763 Otherwise if we do not use the link register we do not need to save
8764 it. If we are pushing other registers onto the stack however, we
8765 can save an instruction in the epilogue by pushing the link register
8766 now and then popping it back into the PC. This incurs extra memory
8767 accesses though, so we only do it when optimizing for size, and only
8768 if we know that we will not need a fancy return sequence. */
8769 if (regs_ever_live [LR_REGNUM]
8770 || (save_reg_mask
8771 && optimize_size
8772 && ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL))
8773 save_reg_mask |= 1 << LR_REGNUM;
8775 if (cfun->machine->lr_save_eliminated)
8776 save_reg_mask &= ~ (1 << LR_REGNUM);
8778 if (TARGET_REALLY_IWMMXT
8779 && ((bit_count (save_reg_mask)
8780 + ARM_NUM_INTS (current_function_pretend_args_size)) % 2) != 0)
8782 unsigned int reg;
8784 /* The total number of registers that are going to be pushed
8785 onto the stack is odd. We need to ensure that the stack
8786 is 64-bit aligned before we start to save iWMMXt registers,
8787 and also before we start to create locals. (A local variable
8788 might be a double or long long which we will load/store using
8789 an iWMMXt instruction). Therefore we need to push another
8790 ARM register, so that the stack will be 64-bit aligned. We
8791 try to avoid using the arg registers (r0 -r3) as they might be
8792 used to pass values in a tail call. */
8793 for (reg = 4; reg <= 12; reg++)
8794 if ((save_reg_mask & (1 << reg)) == 0)
8795 break;
8797 if (reg <= 12)
8798 save_reg_mask |= (1 << reg);
8799 else
8801 cfun->machine->sibcall_blocked = 1;
8802 save_reg_mask |= (1 << 3);
8806 return save_reg_mask;
8809 /* Generate a function exit sequence. If REALLY_RETURN is false, then do
8810 everything bar the final return instruction. */
8811 const char *
8812 output_return_instruction (rtx operand, int really_return, int reverse)
8814 char conditional[10];
8815 char instr[100];
8816 int reg;
8817 unsigned long live_regs_mask;
8818 unsigned long func_type;
8820 func_type = arm_current_func_type ();
8822 if (IS_NAKED (func_type))
8823 return "";
8825 if (IS_VOLATILE (func_type) && TARGET_ABORT_NORETURN)
8827 /* If this function was declared non-returning, and we have
8828 found a tail call, then we have to trust that the called
8829 function won't return. */
8830 if (really_return)
8832 rtx ops[2];
8834 /* Otherwise, trap an attempted return by aborting. */
8835 ops[0] = operand;
8836 ops[1] = gen_rtx_SYMBOL_REF (Pmode, NEED_PLT_RELOC ? "abort(PLT)"
8837 : "abort");
8838 assemble_external_libcall (ops[1]);
8839 output_asm_insn (reverse ? "bl%D0\t%a1" : "bl%d0\t%a1", ops);
8842 return "";
8845 if (current_function_calls_alloca && !really_return)
8846 abort ();
8848 sprintf (conditional, "%%?%%%c0", reverse ? 'D' : 'd');
8850 return_used_this_function = 1;
8852 live_regs_mask = arm_compute_save_reg_mask ();
8854 if (live_regs_mask)
8856 const char * return_reg;
8858 /* If we do not have any special requirements for function exit
8859 (eg interworking, or ISR) then we can load the return address
8860 directly into the PC. Otherwise we must load it into LR. */
8861 if (really_return
8862 && ! TARGET_INTERWORK)
8863 return_reg = reg_names[PC_REGNUM];
8864 else
8865 return_reg = reg_names[LR_REGNUM];
8867 if ((live_regs_mask & (1 << IP_REGNUM)) == (1 << IP_REGNUM))
8869 /* There are three possible reasons for the IP register
8870 being saved. 1) a stack frame was created, in which case
8871 IP contains the old stack pointer, or 2) an ISR routine
8872 corrupted it, or 3) it was saved to align the stack on
8873 iWMMXt. In case 1, restore IP into SP, otherwise just
8874 restore IP. */
8875 if (frame_pointer_needed)
8877 live_regs_mask &= ~ (1 << IP_REGNUM);
8878 live_regs_mask |= (1 << SP_REGNUM);
8880 else
8882 if (! IS_INTERRUPT (func_type)
8883 && ! TARGET_REALLY_IWMMXT)
8884 abort ();
8888 /* On some ARM architectures it is faster to use LDR rather than
8889 LDM to load a single register. On other architectures, the
8890 cost is the same. In 26 bit mode, or for exception handlers,
8891 we have to use LDM to load the PC so that the CPSR is also
8892 restored. */
8893 for (reg = 0; reg <= LAST_ARM_REGNUM; reg++)
8895 if (live_regs_mask == (unsigned int)(1 << reg))
8896 break;
8898 if (reg <= LAST_ARM_REGNUM
8899 && (reg != LR_REGNUM
8900 || ! really_return
8901 || (TARGET_APCS_32 && ! IS_INTERRUPT (func_type))))
8903 sprintf (instr, "ldr%s\t%%|%s, [%%|sp], #4", conditional,
8904 (reg == LR_REGNUM) ? return_reg : reg_names[reg]);
8906 else
8908 char *p;
8909 int first = 1;
8911 /* Generate the load multiple instruction to restore the
8912 registers. Note we can get here, even if
8913 frame_pointer_needed is true, but only if sp already
8914 points to the base of the saved core registers. */
8915 if (live_regs_mask & (1 << SP_REGNUM))
8917 unsigned HOST_WIDE_INT stack_adjust =
8918 arm_get_frame_size () + current_function_outgoing_args_size;
8920 if (stack_adjust != 0 && stack_adjust != 4)
8921 abort ();
8923 if (stack_adjust && arm_arch5)
8924 sprintf (instr, "ldm%sib\t%%|sp, {", conditional);
8925 else
8927 /* If we can't use ldmib (SA110 bug), then try to pop r3
8928 instead. */
8929 if (stack_adjust)
8930 live_regs_mask |= 1 << 3;
8931 sprintf (instr, "ldm%sfd\t%%|sp, {", conditional);
8934 else
8935 sprintf (instr, "ldm%sfd\t%%|sp!, {", conditional);
8937 p = instr + strlen (instr);
8939 for (reg = 0; reg <= SP_REGNUM; reg++)
8940 if (live_regs_mask & (1 << reg))
8942 int l = strlen (reg_names[reg]);
8944 if (first)
8945 first = 0;
8946 else
8948 memcpy (p, ", ", 2);
8949 p += 2;
8952 memcpy (p, "%|", 2);
8953 memcpy (p + 2, reg_names[reg], l);
8954 p += l + 2;
8957 if (live_regs_mask & (1 << LR_REGNUM))
8959 sprintf (p, "%s%%|%s}", first ? "" : ", ", return_reg);
8960 /* Decide if we need to add the ^ symbol to the end of the
8961 register list. This causes the saved condition codes
8962 register to be copied into the current condition codes
8963 register. We do the copy if we are conforming to the 32-bit
8964 ABI and this is an interrupt function, or if we are
8965 conforming to the 26-bit ABI. There is a special case for
8966 the 26-bit ABI however, which is if we are writing back the
8967 stack pointer but not loading the PC. In this case adding
8968 the ^ symbol would create a type 2 LDM instruction, where
8969 writeback is UNPREDICTABLE. We are safe in leaving the ^
8970 character off in this case however, since the actual return
8971 instruction will be a MOVS which will restore the CPSR. */
8972 if ((TARGET_APCS_32 && IS_INTERRUPT (func_type))
8973 || (! TARGET_APCS_32 && really_return))
8974 strcat (p, "^");
8976 else
8977 strcpy (p, "}");
8980 output_asm_insn (instr, & operand);
8982 /* See if we need to generate an extra instruction to
8983 perform the actual function return. */
8984 if (really_return
8985 && func_type != ARM_FT_INTERWORKED
8986 && (live_regs_mask & (1 << LR_REGNUM)) != 0)
8988 /* The return has already been handled
8989 by loading the LR into the PC. */
8990 really_return = 0;
8994 if (really_return)
8996 switch ((int) ARM_FUNC_TYPE (func_type))
8998 case ARM_FT_ISR:
8999 case ARM_FT_FIQ:
9000 sprintf (instr, "sub%ss\t%%|pc, %%|lr, #4", conditional);
9001 break;
9003 case ARM_FT_INTERWORKED:
9004 sprintf (instr, "bx%s\t%%|lr", conditional);
9005 break;
9007 case ARM_FT_EXCEPTION:
9008 sprintf (instr, "mov%ss\t%%|pc, %%|lr", conditional);
9009 break;
9011 default:
9012 /* ARMv5 implementations always provide BX, so interworking
9013 is the default unless APCS-26 is in use. */
9014 if ((insn_flags & FL_ARCH5) != 0 && TARGET_APCS_32)
9015 sprintf (instr, "bx%s\t%%|lr", conditional);
9016 else
9017 sprintf (instr, "mov%s%s\t%%|pc, %%|lr",
9018 conditional, TARGET_APCS_32 ? "" : "s");
9019 break;
9022 output_asm_insn (instr, & operand);
9025 return "";
9028 /* Write the function name into the code section, directly preceding
9029 the function prologue.
9031 Code will be output similar to this:
9033 .ascii "arm_poke_function_name", 0
9034 .align
9036 .word 0xff000000 + (t1 - t0)
9037 arm_poke_function_name
9038 mov ip, sp
9039 stmfd sp!, {fp, ip, lr, pc}
9040 sub fp, ip, #4
9042 When performing a stack backtrace, code can inspect the value
9043 of 'pc' stored at 'fp' + 0. If the trace function then looks
9044 at location pc - 12 and the top 8 bits are set, then we know
9045 that there is a function name embedded immediately preceding this
9046 location and has length ((pc[-3]) & 0xff000000).
9048 We assume that pc is declared as a pointer to an unsigned long.
9050 It is of no benefit to output the function name if we are assembling
9051 a leaf function. These function types will not contain a stack
9052 backtrace structure, therefore it is not possible to determine the
9053 function name. */
9054 void
9055 arm_poke_function_name (FILE *stream, const char *name)
9057 unsigned long alignlength;
9058 unsigned long length;
9059 rtx x;
9061 length = strlen (name) + 1;
9062 alignlength = ROUND_UP_WORD (length);
9064 ASM_OUTPUT_ASCII (stream, name, length);
9065 ASM_OUTPUT_ALIGN (stream, 2);
9066 x = GEN_INT ((unsigned HOST_WIDE_INT) 0xff000000 + alignlength);
9067 assemble_aligned_integer (UNITS_PER_WORD, x);
9070 /* Place some comments into the assembler stream
9071 describing the current function. */
9072 static void
9073 arm_output_function_prologue (FILE *f, HOST_WIDE_INT frame_size)
9075 unsigned long func_type;
9077 if (!TARGET_ARM)
9079 thumb_output_function_prologue (f, frame_size);
9080 return;
9083 /* Sanity check. */
9084 if (arm_ccfsm_state || arm_target_insn)
9085 abort ();
9087 func_type = arm_current_func_type ();
9089 switch ((int) ARM_FUNC_TYPE (func_type))
9091 default:
9092 case ARM_FT_NORMAL:
9093 break;
9094 case ARM_FT_INTERWORKED:
9095 asm_fprintf (f, "\t%@ Function supports interworking.\n");
9096 break;
9097 case ARM_FT_EXCEPTION_HANDLER:
9098 asm_fprintf (f, "\t%@ C++ Exception Handler.\n");
9099 break;
9100 case ARM_FT_ISR:
9101 asm_fprintf (f, "\t%@ Interrupt Service Routine.\n");
9102 break;
9103 case ARM_FT_FIQ:
9104 asm_fprintf (f, "\t%@ Fast Interrupt Service Routine.\n");
9105 break;
9106 case ARM_FT_EXCEPTION:
9107 asm_fprintf (f, "\t%@ ARM Exception Handler.\n");
9108 break;
9111 if (IS_NAKED (func_type))
9112 asm_fprintf (f, "\t%@ Naked Function: prologue and epilogue provided by programmer.\n");
9114 if (IS_VOLATILE (func_type))
9115 asm_fprintf (f, "\t%@ Volatile: function does not return.\n");
9117 if (IS_NESTED (func_type))
9118 asm_fprintf (f, "\t%@ Nested: function declared inside another function.\n");
9120 asm_fprintf (f, "\t%@ args = %d, pretend = %d, frame = %wd\n",
9121 current_function_args_size,
9122 current_function_pretend_args_size, frame_size);
9124 asm_fprintf (f, "\t%@ frame_needed = %d, uses_anonymous_args = %d\n",
9125 frame_pointer_needed,
9126 cfun->machine->uses_anonymous_args);
9128 if (cfun->machine->lr_save_eliminated)
9129 asm_fprintf (f, "\t%@ link register save eliminated.\n");
9131 #ifdef AOF_ASSEMBLER
9132 if (flag_pic)
9133 asm_fprintf (f, "\tmov\t%r, %r\n", IP_REGNUM, PIC_OFFSET_TABLE_REGNUM);
9134 #endif
9136 return_used_this_function = 0;
9139 const char *
9140 arm_output_epilogue (rtx sibling)
9142 int reg;
9143 unsigned long saved_regs_mask;
9144 unsigned long func_type;
9145 /* Floats_offset is the offset from the "virtual" frame. In an APCS
9146 frame that is $fp + 4 for a non-variadic function. */
9147 int floats_offset = 0;
9148 rtx operands[3];
9149 int frame_size = arm_get_frame_size ();
9150 FILE * f = asm_out_file;
9151 rtx eh_ofs = cfun->machine->eh_epilogue_sp_ofs;
9152 unsigned int lrm_count = 0;
9153 int really_return = (sibling == NULL);
9154 int start_reg;
9156 /* If we have already generated the return instruction
9157 then it is futile to generate anything else. */
9158 if (use_return_insn (FALSE, sibling) && return_used_this_function)
9159 return "";
9161 func_type = arm_current_func_type ();
9163 if (IS_NAKED (func_type))
9164 /* Naked functions don't have epilogues. */
9165 return "";
9167 if (IS_VOLATILE (func_type) && TARGET_ABORT_NORETURN)
9169 rtx op;
9171 /* A volatile function should never return. Call abort. */
9172 op = gen_rtx_SYMBOL_REF (Pmode, NEED_PLT_RELOC ? "abort(PLT)" : "abort");
9173 assemble_external_libcall (op);
9174 output_asm_insn ("bl\t%a0", &op);
9176 return "";
9179 if (ARM_FUNC_TYPE (func_type) == ARM_FT_EXCEPTION_HANDLER
9180 && ! really_return)
9181 /* If we are throwing an exception, then we really must
9182 be doing a return, so we can't tail-call. */
9183 abort ();
9185 saved_regs_mask = arm_compute_save_reg_mask ();
9187 if (TARGET_IWMMXT)
9188 lrm_count = bit_count (saved_regs_mask);
9190 /* XXX We should adjust floats_offset for any anonymous args, and then
9191 re-adjust vfp_offset below to compensate. */
9193 /* Compute how far away the floats will be. */
9194 for (reg = 0; reg <= LAST_ARM_REGNUM; reg++)
9195 if (saved_regs_mask & (1 << reg))
9196 floats_offset += 4;
9198 if (frame_pointer_needed)
9200 /* This variable is for the Virtual Frame Pointer, not VFP regs. */
9201 int vfp_offset = 4;
9203 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
9205 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
9206 if (regs_ever_live[reg] && !call_used_regs[reg])
9208 floats_offset += 12;
9209 asm_fprintf (f, "\tldfe\t%r, [%r, #-%d]\n",
9210 reg, FP_REGNUM, floats_offset - vfp_offset);
9213 else
9215 start_reg = LAST_FPA_REGNUM;
9217 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
9219 if (regs_ever_live[reg] && !call_used_regs[reg])
9221 floats_offset += 12;
9223 /* We can't unstack more than four registers at once. */
9224 if (start_reg - reg == 3)
9226 asm_fprintf (f, "\tlfm\t%r, 4, [%r, #-%d]\n",
9227 reg, FP_REGNUM, floats_offset - vfp_offset);
9228 start_reg = reg - 1;
9231 else
9233 if (reg != start_reg)
9234 asm_fprintf (f, "\tlfm\t%r, %d, [%r, #-%d]\n",
9235 reg + 1, start_reg - reg,
9236 FP_REGNUM, floats_offset - vfp_offset);
9237 start_reg = reg - 1;
9241 /* Just in case the last register checked also needs unstacking. */
9242 if (reg != start_reg)
9243 asm_fprintf (f, "\tlfm\t%r, %d, [%r, #-%d]\n",
9244 reg + 1, start_reg - reg,
9245 FP_REGNUM, floats_offset - vfp_offset);
9248 if (TARGET_HARD_FLOAT && TARGET_VFP)
9250 int nregs = 0;
9252 /* We save regs in pairs. */
9253 /* A special insn for saving/restoring VFP registers. This does
9254 not have base+offset addressing modes, so we use IP to
9255 hold the address. Each block requires nregs*2+1 words. */
9256 start_reg = FIRST_VFP_REGNUM;
9257 /* Count how many blocks of registers need saving. */
9258 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
9260 if ((!regs_ever_live[reg] || call_used_regs[reg])
9261 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
9263 if (start_reg != reg)
9264 floats_offset += 4;
9265 start_reg = reg + 2;
9267 else
9269 floats_offset += 8;
9270 nregs++;
9273 if (start_reg != reg)
9274 floats_offset += 4;
9276 if (nregs > 0)
9278 asm_fprintf (f, "\tsub\t%r, %r, #%d\n", IP_REGNUM,
9279 FP_REGNUM, floats_offset - vfp_offset);
9281 start_reg = FIRST_VFP_REGNUM;
9282 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
9284 if ((!regs_ever_live[reg] || call_used_regs[reg])
9285 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
9287 if (start_reg != reg)
9289 vfp_print_multi (f, "fldmfdx\t%r!", IP_REGNUM, "d%d",
9290 (start_reg - FIRST_VFP_REGNUM) / 2,
9291 (reg - start_reg) / 2);
9293 start_reg = reg + 2;
9296 if (start_reg != reg)
9298 vfp_print_multi (f, "fldmfdx\t%r!", IP_REGNUM, "d%d",
9299 (start_reg - FIRST_VFP_REGNUM) / 2,
9300 (reg - start_reg) / 2);
9304 if (TARGET_IWMMXT)
9306 /* The frame pointer is guaranteed to be non-double-word aligned.
9307 This is because it is set to (old_stack_pointer - 4) and the
9308 old_stack_pointer was double word aligned. Thus the offset to
9309 the iWMMXt registers to be loaded must also be non-double-word
9310 sized, so that the resultant address *is* double-word aligned.
9311 We can ignore floats_offset since that was already included in
9312 the live_regs_mask. */
9313 lrm_count += (lrm_count % 2 ? 2 : 1);
9315 for (reg = FIRST_IWMMXT_REGNUM; reg <= LAST_IWMMXT_REGNUM; reg++)
9316 if (regs_ever_live[reg] && !call_used_regs[reg])
9318 asm_fprintf (f, "\twldrd\t%r, [%r, #-%d]\n",
9319 reg, FP_REGNUM, lrm_count * 4);
9320 lrm_count += 2;
9324 /* saved_regs_mask should contain the IP, which at the time of stack
9325 frame generation actually contains the old stack pointer. So a
9326 quick way to unwind the stack is just pop the IP register directly
9327 into the stack pointer. */
9328 if ((saved_regs_mask & (1 << IP_REGNUM)) == 0)
9329 abort ();
9330 saved_regs_mask &= ~ (1 << IP_REGNUM);
9331 saved_regs_mask |= (1 << SP_REGNUM);
9333 /* There are two registers left in saved_regs_mask - LR and PC. We
9334 only need to restore the LR register (the return address), but to
9335 save time we can load it directly into the PC, unless we need a
9336 special function exit sequence, or we are not really returning. */
9337 if (really_return && ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL)
9338 /* Delete the LR from the register mask, so that the LR on
9339 the stack is loaded into the PC in the register mask. */
9340 saved_regs_mask &= ~ (1 << LR_REGNUM);
9341 else
9342 saved_regs_mask &= ~ (1 << PC_REGNUM);
9344 /* We must use SP as the base register, because SP is one of the
9345 registers being restored. If an interrupt or page fault
9346 happens in the ldm instruction, the SP might or might not
9347 have been restored. That would be bad, as then SP will no
9348 longer indicate the safe area of stack, and we can get stack
9349 corruption. Using SP as the base register means that it will
9350 be reset correctly to the original value, should an interrupt
9351 occur. If the stack pointer already points at the right
9352 place, then omit the subtraction. */
9353 if (((frame_size + current_function_outgoing_args_size + floats_offset)
9354 != 4 * (1 + (int) bit_count (saved_regs_mask)))
9355 || current_function_calls_alloca)
9356 asm_fprintf (f, "\tsub\t%r, %r, #%d\n", SP_REGNUM, FP_REGNUM,
9357 4 * bit_count (saved_regs_mask));
9358 print_multi_reg (f, "ldmfd\t%r", SP_REGNUM, saved_regs_mask);
9360 if (IS_INTERRUPT (func_type))
9361 /* Interrupt handlers will have pushed the
9362 IP onto the stack, so restore it now. */
9363 print_multi_reg (f, "ldmfd\t%r!", SP_REGNUM, 1 << IP_REGNUM);
9365 else
9367 /* Restore stack pointer if necessary. */
9368 if (frame_size + current_function_outgoing_args_size != 0)
9370 operands[0] = operands[1] = stack_pointer_rtx;
9371 operands[2] = GEN_INT (frame_size
9372 + current_function_outgoing_args_size);
9373 output_add_immediate (operands);
9376 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
9378 for (reg = FIRST_FPA_REGNUM; reg <= LAST_FPA_REGNUM; reg++)
9379 if (regs_ever_live[reg] && !call_used_regs[reg])
9380 asm_fprintf (f, "\tldfe\t%r, [%r], #12\n",
9381 reg, SP_REGNUM);
9383 else
9385 start_reg = FIRST_FPA_REGNUM;
9387 for (reg = FIRST_FPA_REGNUM; reg <= LAST_FPA_REGNUM; reg++)
9389 if (regs_ever_live[reg] && !call_used_regs[reg])
9391 if (reg - start_reg == 3)
9393 asm_fprintf (f, "\tlfmfd\t%r, 4, [%r]!\n",
9394 start_reg, SP_REGNUM);
9395 start_reg = reg + 1;
9398 else
9400 if (reg != start_reg)
9401 asm_fprintf (f, "\tlfmfd\t%r, %d, [%r]!\n",
9402 start_reg, reg - start_reg,
9403 SP_REGNUM);
9405 start_reg = reg + 1;
9409 /* Just in case the last register checked also needs unstacking. */
9410 if (reg != start_reg)
9411 asm_fprintf (f, "\tlfmfd\t%r, %d, [%r]!\n",
9412 start_reg, reg - start_reg, SP_REGNUM);
9415 if (TARGET_HARD_FLOAT && TARGET_VFP)
9417 start_reg = FIRST_VFP_REGNUM;
9418 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
9420 if ((!regs_ever_live[reg] || call_used_regs[reg])
9421 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
9423 if (start_reg != reg)
9425 vfp_print_multi (f, "fldmfdx\t%r!", SP_REGNUM, "d%d",
9426 (start_reg - FIRST_VFP_REGNUM) / 2,
9427 (reg - start_reg) / 2);
9429 start_reg = reg + 2;
9432 if (start_reg != reg)
9434 vfp_print_multi (f, "fldmfdx\t%r!", SP_REGNUM, "d%d",
9435 (start_reg - FIRST_VFP_REGNUM) / 2,
9436 (reg - start_reg) / 2);
9439 if (TARGET_IWMMXT)
9440 for (reg = FIRST_IWMMXT_REGNUM; reg <= LAST_IWMMXT_REGNUM; reg++)
9441 if (regs_ever_live[reg] && !call_used_regs[reg])
9442 asm_fprintf (f, "\twldrd\t%r, [%r, #+8]!\n", reg, SP_REGNUM);
9444 /* If we can, restore the LR into the PC. */
9445 if (ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
9446 && really_return
9447 && current_function_pretend_args_size == 0
9448 && saved_regs_mask & (1 << LR_REGNUM))
9450 saved_regs_mask &= ~ (1 << LR_REGNUM);
9451 saved_regs_mask |= (1 << PC_REGNUM);
9454 /* Load the registers off the stack. If we only have one register
9455 to load use the LDR instruction - it is faster. */
9456 if (saved_regs_mask == (1 << LR_REGNUM))
9458 /* The exception handler ignores the LR, so we do
9459 not really need to load it off the stack. */
9460 if (eh_ofs)
9461 asm_fprintf (f, "\tadd\t%r, %r, #4\n", SP_REGNUM, SP_REGNUM);
9462 else
9463 asm_fprintf (f, "\tldr\t%r, [%r], #4\n", LR_REGNUM, SP_REGNUM);
9465 else if (saved_regs_mask)
9467 if (saved_regs_mask & (1 << SP_REGNUM))
9468 /* Note - write back to the stack register is not enabled
9469 (ie "ldmfd sp!..."). We know that the stack pointer is
9470 in the list of registers and if we add writeback the
9471 instruction becomes UNPREDICTABLE. */
9472 print_multi_reg (f, "ldmfd\t%r", SP_REGNUM, saved_regs_mask);
9473 else
9474 print_multi_reg (f, "ldmfd\t%r!", SP_REGNUM, saved_regs_mask);
9477 if (current_function_pretend_args_size)
9479 /* Unwind the pre-pushed regs. */
9480 operands[0] = operands[1] = stack_pointer_rtx;
9481 operands[2] = GEN_INT (current_function_pretend_args_size);
9482 output_add_immediate (operands);
9486 if (! really_return
9487 || (ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
9488 && current_function_pretend_args_size == 0
9489 && saved_regs_mask & (1 << PC_REGNUM)))
9490 return "";
9492 /* Generate the return instruction. */
9493 switch ((int) ARM_FUNC_TYPE (func_type))
9495 case ARM_FT_EXCEPTION_HANDLER:
9496 /* Even in 26-bit mode we do a mov (rather than a movs)
9497 because we don't have the PSR bits set in the address. */
9498 asm_fprintf (f, "\tmov\t%r, %r\n", PC_REGNUM, EXCEPTION_LR_REGNUM);
9499 break;
9501 case ARM_FT_ISR:
9502 case ARM_FT_FIQ:
9503 asm_fprintf (f, "\tsubs\t%r, %r, #4\n", PC_REGNUM, LR_REGNUM);
9504 break;
9506 case ARM_FT_EXCEPTION:
9507 asm_fprintf (f, "\tmovs\t%r, %r\n", PC_REGNUM, LR_REGNUM);
9508 break;
9510 case ARM_FT_INTERWORKED:
9511 asm_fprintf (f, "\tbx\t%r\n", LR_REGNUM);
9512 break;
9514 default:
9515 if (frame_pointer_needed)
9516 /* If we used the frame pointer then the return address
9517 will have been loaded off the stack directly into the
9518 PC, so there is no need to issue a MOV instruction
9519 here. */
9521 else if (current_function_pretend_args_size == 0
9522 && (saved_regs_mask & (1 << LR_REGNUM)))
9523 /* Similarly we may have been able to load LR into the PC
9524 even if we did not create a stack frame. */
9526 else if (TARGET_APCS_32)
9527 asm_fprintf (f, "\tmov\t%r, %r\n", PC_REGNUM, LR_REGNUM);
9528 else
9529 asm_fprintf (f, "\tmovs\t%r, %r\n", PC_REGNUM, LR_REGNUM);
9530 break;
9533 return "";
9536 static void
9537 arm_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
9538 HOST_WIDE_INT frame_size)
9540 if (TARGET_THUMB)
9542 /* ??? Probably not safe to set this here, since it assumes that a
9543 function will be emitted as assembly immediately after we generate
9544 RTL for it. This does not happen for inline functions. */
9545 return_used_this_function = 0;
9547 else
9549 /* We need to take into account any stack-frame rounding. */
9550 frame_size = arm_get_frame_size ();
9552 if (use_return_insn (FALSE, NULL)
9553 && return_used_this_function
9554 && (frame_size + current_function_outgoing_args_size) != 0
9555 && !frame_pointer_needed)
9556 abort ();
9558 /* Reset the ARM-specific per-function variables. */
9559 after_arm_reorg = 0;
9563 /* Generate and emit an insn that we will recognize as a push_multi.
9564 Unfortunately, since this insn does not reflect very well the actual
9565 semantics of the operation, we need to annotate the insn for the benefit
9566 of DWARF2 frame unwind information. */
9567 static rtx
9568 emit_multi_reg_push (int mask)
9570 int num_regs = 0;
9571 int num_dwarf_regs;
9572 int i, j;
9573 rtx par;
9574 rtx dwarf;
9575 int dwarf_par_index;
9576 rtx tmp, reg;
9578 for (i = 0; i <= LAST_ARM_REGNUM; i++)
9579 if (mask & (1 << i))
9580 num_regs++;
9582 if (num_regs == 0 || num_regs > 16)
9583 abort ();
9585 /* We don't record the PC in the dwarf frame information. */
9586 num_dwarf_regs = num_regs;
9587 if (mask & (1 << PC_REGNUM))
9588 num_dwarf_regs--;
9590 /* For the body of the insn we are going to generate an UNSPEC in
9591 parallel with several USEs. This allows the insn to be recognized
9592 by the push_multi pattern in the arm.md file. The insn looks
9593 something like this:
9595 (parallel [
9596 (set (mem:BLK (pre_dec:BLK (reg:SI sp)))
9597 (unspec:BLK [(reg:SI r4)] UNSPEC_PUSH_MULT))
9598 (use (reg:SI 11 fp))
9599 (use (reg:SI 12 ip))
9600 (use (reg:SI 14 lr))
9601 (use (reg:SI 15 pc))
9604 For the frame note however, we try to be more explicit and actually
9605 show each register being stored into the stack frame, plus a (single)
9606 decrement of the stack pointer. We do it this way in order to be
9607 friendly to the stack unwinding code, which only wants to see a single
9608 stack decrement per instruction. The RTL we generate for the note looks
9609 something like this:
9611 (sequence [
9612 (set (reg:SI sp) (plus:SI (reg:SI sp) (const_int -20)))
9613 (set (mem:SI (reg:SI sp)) (reg:SI r4))
9614 (set (mem:SI (plus:SI (reg:SI sp) (const_int 4))) (reg:SI fp))
9615 (set (mem:SI (plus:SI (reg:SI sp) (const_int 8))) (reg:SI ip))
9616 (set (mem:SI (plus:SI (reg:SI sp) (const_int 12))) (reg:SI lr))
9619 This sequence is used both by the code to support stack unwinding for
9620 exceptions handlers and the code to generate dwarf2 frame debugging. */
9622 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (num_regs));
9623 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (num_dwarf_regs + 1));
9624 dwarf_par_index = 1;
9626 for (i = 0; i <= LAST_ARM_REGNUM; i++)
9628 if (mask & (1 << i))
9630 reg = gen_rtx_REG (SImode, i);
9632 XVECEXP (par, 0, 0)
9633 = gen_rtx_SET (VOIDmode,
9634 gen_rtx_MEM (BLKmode,
9635 gen_rtx_PRE_DEC (BLKmode,
9636 stack_pointer_rtx)),
9637 gen_rtx_UNSPEC (BLKmode,
9638 gen_rtvec (1, reg),
9639 UNSPEC_PUSH_MULT));
9641 if (i != PC_REGNUM)
9643 tmp = gen_rtx_SET (VOIDmode,
9644 gen_rtx_MEM (SImode, stack_pointer_rtx),
9645 reg);
9646 RTX_FRAME_RELATED_P (tmp) = 1;
9647 XVECEXP (dwarf, 0, dwarf_par_index) = tmp;
9648 dwarf_par_index++;
9651 break;
9655 for (j = 1, i++; j < num_regs; i++)
9657 if (mask & (1 << i))
9659 reg = gen_rtx_REG (SImode, i);
9661 XVECEXP (par, 0, j) = gen_rtx_USE (VOIDmode, reg);
9663 if (i != PC_REGNUM)
9665 tmp = gen_rtx_SET (VOIDmode,
9666 gen_rtx_MEM (SImode,
9667 plus_constant (stack_pointer_rtx,
9668 4 * j)),
9669 reg);
9670 RTX_FRAME_RELATED_P (tmp) = 1;
9671 XVECEXP (dwarf, 0, dwarf_par_index++) = tmp;
9674 j++;
9678 par = emit_insn (par);
9680 tmp = gen_rtx_SET (SImode,
9681 stack_pointer_rtx,
9682 gen_rtx_PLUS (SImode,
9683 stack_pointer_rtx,
9684 GEN_INT (-4 * num_regs)));
9685 RTX_FRAME_RELATED_P (tmp) = 1;
9686 XVECEXP (dwarf, 0, 0) = tmp;
9688 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
9689 REG_NOTES (par));
9690 return par;
9693 static rtx
9694 emit_sfm (int base_reg, int count)
9696 rtx par;
9697 rtx dwarf;
9698 rtx tmp, reg;
9699 int i;
9701 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
9702 dwarf = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
9704 reg = gen_rtx_REG (XFmode, base_reg++);
9706 XVECEXP (par, 0, 0)
9707 = gen_rtx_SET (VOIDmode,
9708 gen_rtx_MEM (BLKmode,
9709 gen_rtx_PRE_DEC (BLKmode, stack_pointer_rtx)),
9710 gen_rtx_UNSPEC (BLKmode,
9711 gen_rtvec (1, reg),
9712 UNSPEC_PUSH_MULT));
9714 = gen_rtx_SET (VOIDmode,
9715 gen_rtx_MEM (XFmode,
9716 gen_rtx_PRE_DEC (BLKmode, stack_pointer_rtx)),
9717 reg);
9718 RTX_FRAME_RELATED_P (tmp) = 1;
9719 XVECEXP (dwarf, 0, count - 1) = tmp;
9721 for (i = 1; i < count; i++)
9723 reg = gen_rtx_REG (XFmode, base_reg++);
9724 XVECEXP (par, 0, i) = gen_rtx_USE (VOIDmode, reg);
9726 tmp = gen_rtx_SET (VOIDmode,
9727 gen_rtx_MEM (XFmode,
9728 gen_rtx_PRE_DEC (BLKmode,
9729 stack_pointer_rtx)),
9730 reg);
9731 RTX_FRAME_RELATED_P (tmp) = 1;
9732 XVECEXP (dwarf, 0, count - i - 1) = tmp;
9735 par = emit_insn (par);
9736 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
9737 REG_NOTES (par));
9738 return par;
9742 /* Compute the distance from register FROM to register TO.
9743 These can be the arg pointer (26), the soft frame pointer (25),
9744 the stack pointer (13) or the hard frame pointer (11).
9745 Typical stack layout looks like this:
9747 old stack pointer -> | |
9748 ----
9749 | | \
9750 | | saved arguments for
9751 | | vararg functions
9752 | | /
9754 hard FP & arg pointer -> | | \
9755 | | stack
9756 | | frame
9757 | | /
9759 | | \
9760 | | call saved
9761 | | registers
9762 soft frame pointer -> | | /
9764 | | \
9765 | | local
9766 | | variables
9767 | | /
9769 | | \
9770 | | outgoing
9771 | | arguments
9772 current stack pointer -> | | /
9775 For a given function some or all of these stack components
9776 may not be needed, giving rise to the possibility of
9777 eliminating some of the registers.
9779 The values returned by this function must reflect the behavior
9780 of arm_expand_prologue() and arm_compute_save_reg_mask().
9782 The sign of the number returned reflects the direction of stack
9783 growth, so the values are positive for all eliminations except
9784 from the soft frame pointer to the hard frame pointer. */
9785 unsigned int
9786 arm_compute_initial_elimination_offset (unsigned int from, unsigned int to)
9788 unsigned int local_vars = arm_get_frame_size ();
9789 unsigned int outgoing_args = current_function_outgoing_args_size;
9790 unsigned int stack_frame;
9791 unsigned int call_saved_registers;
9792 unsigned long func_type;
9794 func_type = arm_current_func_type ();
9796 /* Volatile functions never return, so there is
9797 no need to save call saved registers. */
9798 call_saved_registers = 0;
9799 if (! IS_VOLATILE (func_type))
9801 unsigned int reg_mask;
9802 unsigned int reg;
9803 bool new_block;
9805 /* Make sure that we compute which registers will be saved
9806 on the stack using the same algorithm that is used by
9807 the prologue creation code. */
9808 reg_mask = arm_compute_save_reg_mask ();
9810 /* Now count the number of bits set in save_reg_mask.
9811 If we have already counted the registers in the stack
9812 frame, do not count them again. Non call-saved registers
9813 might be saved in the call-save area of the stack, if
9814 doing so will preserve the stack's alignment. Hence we
9815 must count them here. For each set bit we need 4 bytes
9816 of stack space. */
9817 if (frame_pointer_needed)
9818 reg_mask &= 0x07ff;
9819 call_saved_registers += 4 * bit_count (reg_mask);
9821 /* If the hard floating point registers are going to be
9822 used then they must be saved on the stack as well.
9823 Each register occupies 12 bytes of stack space. */
9824 for (reg = FIRST_FPA_REGNUM; reg <= LAST_FPA_REGNUM; reg++)
9825 if (regs_ever_live[reg] && ! call_used_regs[reg])
9826 call_saved_registers += 12;
9828 /* Likewise VFP regs. */
9829 if (TARGET_HARD_FLOAT && TARGET_VFP)
9831 new_block = TRUE;
9832 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
9834 if ((regs_ever_live[reg] && !call_used_regs[reg])
9835 || (regs_ever_live[reg + 1] && !call_used_regs[reg + 1]))
9837 if (new_block)
9839 call_saved_registers += 4;
9840 new_block = FALSE;
9842 call_saved_registers += 8;
9844 else
9845 new_block = TRUE;
9849 if (TARGET_REALLY_IWMMXT)
9850 /* Check for the call-saved iWMMXt registers. */
9851 for (reg = FIRST_IWMMXT_REGNUM; reg <= LAST_IWMMXT_REGNUM; reg++)
9852 if (regs_ever_live[reg] && ! call_used_regs [reg])
9853 call_saved_registers += 8;
9856 /* The stack frame contains 4 registers - the old frame pointer,
9857 the old stack pointer, the return address and PC of the start
9858 of the function. */
9859 stack_frame = frame_pointer_needed ? 16 : 0;
9861 /* OK, now we have enough information to compute the distances.
9862 There must be an entry in these switch tables for each pair
9863 of registers in ELIMINABLE_REGS, even if some of the entries
9864 seem to be redundant or useless. */
9865 switch (from)
9867 case ARG_POINTER_REGNUM:
9868 switch (to)
9870 case THUMB_HARD_FRAME_POINTER_REGNUM:
9871 return 0;
9873 case FRAME_POINTER_REGNUM:
9874 /* This is the reverse of the soft frame pointer
9875 to hard frame pointer elimination below. */
9876 if (call_saved_registers == 0 && stack_frame == 0)
9877 return 0;
9878 return (call_saved_registers + stack_frame - 4);
9880 case ARM_HARD_FRAME_POINTER_REGNUM:
9881 /* If there is no stack frame then the hard
9882 frame pointer and the arg pointer coincide. */
9883 if (stack_frame == 0 && call_saved_registers != 0)
9884 return 0;
9885 /* FIXME: Not sure about this. Maybe we should always return 0 ? */
9886 return (frame_pointer_needed
9887 && current_function_needs_context
9888 && ! cfun->machine->uses_anonymous_args) ? 4 : 0;
9890 case STACK_POINTER_REGNUM:
9891 /* If nothing has been pushed on the stack at all
9892 then this will return -4. This *is* correct! */
9893 return call_saved_registers + stack_frame + local_vars + outgoing_args - 4;
9895 default:
9896 abort ();
9898 break;
9900 case FRAME_POINTER_REGNUM:
9901 switch (to)
9903 case THUMB_HARD_FRAME_POINTER_REGNUM:
9904 return 0;
9906 case ARM_HARD_FRAME_POINTER_REGNUM:
9907 /* The hard frame pointer points to the top entry in the
9908 stack frame. The soft frame pointer to the bottom entry
9909 in the stack frame. If there is no stack frame at all,
9910 then they are identical. */
9911 if (call_saved_registers == 0 && stack_frame == 0)
9912 return 0;
9913 return - (call_saved_registers + stack_frame - 4);
9915 case STACK_POINTER_REGNUM:
9916 return local_vars + outgoing_args;
9918 default:
9919 abort ();
9921 break;
9923 default:
9924 /* You cannot eliminate from the stack pointer.
9925 In theory you could eliminate from the hard frame
9926 pointer to the stack pointer, but this will never
9927 happen, since if a stack frame is not needed the
9928 hard frame pointer will never be used. */
9929 abort ();
9933 /* Calculate the size of the stack frame, taking into account any
9934 padding that is required to ensure stack-alignment. */
9935 HOST_WIDE_INT
9936 arm_get_frame_size (void)
9938 int regno;
9940 int base_size = ROUND_UP_WORD (get_frame_size ());
9941 int entry_size = 0;
9942 unsigned long func_type = arm_current_func_type ();
9943 int leaf;
9944 bool new_block;
9946 if (! TARGET_ARM)
9947 abort();
9949 if (! TARGET_ATPCS)
9950 return base_size;
9952 /* We need to know if we are a leaf function. Unfortunately, it
9953 is possible to be called after start_sequence has been called,
9954 which causes get_insns to return the insns for the sequence,
9955 not the function, which will cause leaf_function_p to return
9956 the incorrect result.
9958 To work around this, we cache the computed frame size. This
9959 works because we will only be calling RTL expanders that need
9960 to know about leaf functions once reload has completed, and the
9961 frame size cannot be changed after that time, so we can safely
9962 use the cached value. */
9964 if (reload_completed)
9965 return cfun->machine->frame_size;
9967 leaf = leaf_function_p ();
9969 /* A leaf function does not need any stack alignment if it has nothing
9970 on the stack. */
9971 if (leaf && base_size == 0)
9973 cfun->machine->frame_size = 0;
9974 return 0;
9977 /* We know that SP will be word aligned on entry, and we must
9978 preserve that condition at any subroutine call. But those are
9979 the only constraints. */
9981 /* Space for variadic functions. */
9982 if (current_function_pretend_args_size)
9983 entry_size += current_function_pretend_args_size;
9985 /* Space for saved registers. */
9986 entry_size += bit_count (arm_compute_save_reg_mask ()) * 4;
9988 if (! IS_VOLATILE (func_type))
9990 /* Space for saved FPA registers. */
9991 for (regno = FIRST_FPA_REGNUM; regno <= LAST_FPA_REGNUM; regno++)
9992 if (regs_ever_live[regno] && ! call_used_regs[regno])
9993 entry_size += 12;
9995 /* Space for saved VFP registers. */
9996 if (TARGET_HARD_FLOAT && TARGET_VFP)
9998 new_block = TRUE;
9999 for (regno = FIRST_VFP_REGNUM; regno < LAST_VFP_REGNUM; regno += 2)
10001 if ((regs_ever_live[regno] && !call_used_regs[regno])
10002 || (regs_ever_live[regno + 1] && !call_used_regs[regno + 1]))
10004 if (new_block)
10006 entry_size += 4;
10007 new_block = FALSE;
10009 entry_size += 8;
10011 else
10012 new_block = TRUE;
10017 if (TARGET_REALLY_IWMMXT)
10019 /* Check for the call-saved iWMMXt registers. */
10020 for (regno = FIRST_IWMMXT_REGNUM; regno <= LAST_IWMMXT_REGNUM; regno++)
10021 if (regs_ever_live [regno] && ! call_used_regs [regno])
10022 entry_size += 8;
10025 if ((entry_size + base_size + current_function_outgoing_args_size) & 7)
10026 base_size += 4;
10027 if ((entry_size + base_size + current_function_outgoing_args_size) & 7)
10028 abort ();
10030 cfun->machine->frame_size = base_size;
10032 return base_size;
10035 /* Generate the prologue instructions for entry into an ARM function. */
10036 void
10037 arm_expand_prologue (void)
10039 int reg;
10040 rtx amount;
10041 rtx insn;
10042 rtx ip_rtx;
10043 unsigned long live_regs_mask;
10044 unsigned long func_type;
10045 int fp_offset = 0;
10046 int saved_pretend_args = 0;
10047 unsigned int args_to_push;
10049 func_type = arm_current_func_type ();
10051 /* Naked functions don't have prologues. */
10052 if (IS_NAKED (func_type))
10053 return;
10055 /* Make a copy of c_f_p_a_s as we may need to modify it locally. */
10056 args_to_push = current_function_pretend_args_size;
10058 /* Compute which register we will have to save onto the stack. */
10059 live_regs_mask = arm_compute_save_reg_mask ();
10061 ip_rtx = gen_rtx_REG (SImode, IP_REGNUM);
10063 if (frame_pointer_needed)
10065 if (IS_INTERRUPT (func_type))
10067 /* Interrupt functions must not corrupt any registers.
10068 Creating a frame pointer however, corrupts the IP
10069 register, so we must push it first. */
10070 insn = emit_multi_reg_push (1 << IP_REGNUM);
10072 /* Do not set RTX_FRAME_RELATED_P on this insn.
10073 The dwarf stack unwinding code only wants to see one
10074 stack decrement per function, and this is not it. If
10075 this instruction is labeled as being part of the frame
10076 creation sequence then dwarf2out_frame_debug_expr will
10077 abort when it encounters the assignment of IP to FP
10078 later on, since the use of SP here establishes SP as
10079 the CFA register and not IP.
10081 Anyway this instruction is not really part of the stack
10082 frame creation although it is part of the prologue. */
10084 else if (IS_NESTED (func_type))
10086 /* The Static chain register is the same as the IP register
10087 used as a scratch register during stack frame creation.
10088 To get around this need to find somewhere to store IP
10089 whilst the frame is being created. We try the following
10090 places in order:
10092 1. The last argument register.
10093 2. A slot on the stack above the frame. (This only
10094 works if the function is not a varargs function).
10095 3. Register r3, after pushing the argument registers
10096 onto the stack.
10098 Note - we only need to tell the dwarf2 backend about the SP
10099 adjustment in the second variant; the static chain register
10100 doesn't need to be unwound, as it doesn't contain a value
10101 inherited from the caller. */
10103 if (regs_ever_live[3] == 0)
10105 insn = gen_rtx_REG (SImode, 3);
10106 insn = gen_rtx_SET (SImode, insn, ip_rtx);
10107 insn = emit_insn (insn);
10109 else if (args_to_push == 0)
10111 rtx dwarf;
10112 insn = gen_rtx_PRE_DEC (SImode, stack_pointer_rtx);
10113 insn = gen_rtx_MEM (SImode, insn);
10114 insn = gen_rtx_SET (VOIDmode, insn, ip_rtx);
10115 insn = emit_insn (insn);
10117 fp_offset = 4;
10119 /* Just tell the dwarf backend that we adjusted SP. */
10120 dwarf = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
10121 gen_rtx_PLUS (SImode, stack_pointer_rtx,
10122 GEN_INT (-fp_offset)));
10123 RTX_FRAME_RELATED_P (insn) = 1;
10124 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
10125 dwarf, REG_NOTES (insn));
10127 else
10129 /* Store the args on the stack. */
10130 if (cfun->machine->uses_anonymous_args)
10131 insn = emit_multi_reg_push
10132 ((0xf0 >> (args_to_push / 4)) & 0xf);
10133 else
10134 insn = emit_insn
10135 (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10136 GEN_INT (- args_to_push)));
10138 RTX_FRAME_RELATED_P (insn) = 1;
10140 saved_pretend_args = 1;
10141 fp_offset = args_to_push;
10142 args_to_push = 0;
10144 /* Now reuse r3 to preserve IP. */
10145 insn = gen_rtx_REG (SImode, 3);
10146 insn = gen_rtx_SET (SImode, insn, ip_rtx);
10147 (void) emit_insn (insn);
10151 if (fp_offset)
10153 insn = gen_rtx_PLUS (SImode, stack_pointer_rtx, GEN_INT (fp_offset));
10154 insn = gen_rtx_SET (SImode, ip_rtx, insn);
10156 else
10157 insn = gen_movsi (ip_rtx, stack_pointer_rtx);
10159 insn = emit_insn (insn);
10160 RTX_FRAME_RELATED_P (insn) = 1;
10163 if (args_to_push)
10165 /* Push the argument registers, or reserve space for them. */
10166 if (cfun->machine->uses_anonymous_args)
10167 insn = emit_multi_reg_push
10168 ((0xf0 >> (args_to_push / 4)) & 0xf);
10169 else
10170 insn = emit_insn
10171 (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10172 GEN_INT (- args_to_push)));
10173 RTX_FRAME_RELATED_P (insn) = 1;
10176 /* If this is an interrupt service routine, and the link register
10177 is going to be pushed, and we are not creating a stack frame,
10178 (which would involve an extra push of IP and a pop in the epilogue)
10179 subtracting four from LR now will mean that the function return
10180 can be done with a single instruction. */
10181 if ((func_type == ARM_FT_ISR || func_type == ARM_FT_FIQ)
10182 && (live_regs_mask & (1 << LR_REGNUM)) != 0
10183 && ! frame_pointer_needed)
10184 emit_insn (gen_rtx_SET (SImode,
10185 gen_rtx_REG (SImode, LR_REGNUM),
10186 gen_rtx_PLUS (SImode,
10187 gen_rtx_REG (SImode, LR_REGNUM),
10188 GEN_INT (-4))));
10190 if (live_regs_mask)
10192 insn = emit_multi_reg_push (live_regs_mask);
10193 RTX_FRAME_RELATED_P (insn) = 1;
10196 if (TARGET_IWMMXT)
10197 for (reg = FIRST_IWMMXT_REGNUM; reg <= LAST_IWMMXT_REGNUM; reg++)
10198 if (regs_ever_live[reg] && ! call_used_regs [reg])
10200 insn = gen_rtx_PRE_DEC (V2SImode, stack_pointer_rtx);
10201 insn = gen_rtx_MEM (V2SImode, insn);
10202 insn = emit_insn (gen_rtx_SET (VOIDmode, insn,
10203 gen_rtx_REG (V2SImode, reg)));
10204 RTX_FRAME_RELATED_P (insn) = 1;
10207 if (! IS_VOLATILE (func_type))
10209 int start_reg;
10211 /* Save any floating point call-saved registers used by this
10212 function. */
10213 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
10215 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
10216 if (regs_ever_live[reg] && !call_used_regs[reg])
10218 insn = gen_rtx_PRE_DEC (XFmode, stack_pointer_rtx);
10219 insn = gen_rtx_MEM (XFmode, insn);
10220 insn = emit_insn (gen_rtx_SET (VOIDmode, insn,
10221 gen_rtx_REG (XFmode, reg)));
10222 RTX_FRAME_RELATED_P (insn) = 1;
10225 else
10227 start_reg = LAST_FPA_REGNUM;
10229 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
10231 if (regs_ever_live[reg] && !call_used_regs[reg])
10233 if (start_reg - reg == 3)
10235 insn = emit_sfm (reg, 4);
10236 RTX_FRAME_RELATED_P (insn) = 1;
10237 start_reg = reg - 1;
10240 else
10242 if (start_reg != reg)
10244 insn = emit_sfm (reg + 1, start_reg - reg);
10245 RTX_FRAME_RELATED_P (insn) = 1;
10247 start_reg = reg - 1;
10251 if (start_reg != reg)
10253 insn = emit_sfm (reg + 1, start_reg - reg);
10254 RTX_FRAME_RELATED_P (insn) = 1;
10257 if (TARGET_HARD_FLOAT && TARGET_VFP)
10259 start_reg = FIRST_VFP_REGNUM;
10261 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
10263 if ((!regs_ever_live[reg] || call_used_regs[reg])
10264 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
10266 if (start_reg != reg)
10268 insn = vfp_emit_fstmx (start_reg,
10269 (reg - start_reg) / 2);
10270 RTX_FRAME_RELATED_P (insn) = 1;
10272 start_reg = reg + 2;
10275 if (start_reg != reg)
10277 insn = vfp_emit_fstmx (start_reg,
10278 (reg - start_reg) / 2);
10279 RTX_FRAME_RELATED_P (insn) = 1;
10284 if (frame_pointer_needed)
10286 /* Create the new frame pointer. */
10287 insn = GEN_INT (-(4 + args_to_push + fp_offset));
10288 insn = emit_insn (gen_addsi3 (hard_frame_pointer_rtx, ip_rtx, insn));
10289 RTX_FRAME_RELATED_P (insn) = 1;
10291 if (IS_NESTED (func_type))
10293 /* Recover the static chain register. */
10294 if (regs_ever_live [3] == 0
10295 || saved_pretend_args)
10296 insn = gen_rtx_REG (SImode, 3);
10297 else /* if (current_function_pretend_args_size == 0) */
10299 insn = gen_rtx_PLUS (SImode, hard_frame_pointer_rtx,
10300 GEN_INT (4));
10301 insn = gen_rtx_MEM (SImode, insn);
10304 emit_insn (gen_rtx_SET (SImode, ip_rtx, insn));
10305 /* Add a USE to stop propagate_one_insn() from barfing. */
10306 emit_insn (gen_prologue_use (ip_rtx));
10310 amount = GEN_INT (-(arm_get_frame_size ()
10311 + current_function_outgoing_args_size));
10313 if (amount != const0_rtx)
10315 /* This add can produce multiple insns for a large constant, so we
10316 need to get tricky. */
10317 rtx last = get_last_insn ();
10318 insn = emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10319 amount));
10322 last = last ? NEXT_INSN (last) : get_insns ();
10323 RTX_FRAME_RELATED_P (last) = 1;
10325 while (last != insn);
10327 /* If the frame pointer is needed, emit a special barrier that
10328 will prevent the scheduler from moving stores to the frame
10329 before the stack adjustment. */
10330 if (frame_pointer_needed)
10331 insn = emit_insn (gen_stack_tie (stack_pointer_rtx,
10332 hard_frame_pointer_rtx));
10335 /* If we are profiling, make sure no instructions are scheduled before
10336 the call to mcount. Similarly if the user has requested no
10337 scheduling in the prolog. */
10338 if (current_function_profile || TARGET_NO_SCHED_PRO)
10339 emit_insn (gen_blockage ());
10341 /* If the link register is being kept alive, with the return address in it,
10342 then make sure that it does not get reused by the ce2 pass. */
10343 if ((live_regs_mask & (1 << LR_REGNUM)) == 0)
10345 emit_insn (gen_prologue_use (gen_rtx_REG (SImode, LR_REGNUM)));
10346 cfun->machine->lr_save_eliminated = 1;
10350 /* If CODE is 'd', then the X is a condition operand and the instruction
10351 should only be executed if the condition is true.
10352 if CODE is 'D', then the X is a condition operand and the instruction
10353 should only be executed if the condition is false: however, if the mode
10354 of the comparison is CCFPEmode, then always execute the instruction -- we
10355 do this because in these circumstances !GE does not necessarily imply LT;
10356 in these cases the instruction pattern will take care to make sure that
10357 an instruction containing %d will follow, thereby undoing the effects of
10358 doing this instruction unconditionally.
10359 If CODE is 'N' then X is a floating point operand that must be negated
10360 before output.
10361 If CODE is 'B' then output a bitwise inverted value of X (a const int).
10362 If X is a REG and CODE is `M', output a ldm/stm style multi-reg. */
10363 void
10364 arm_print_operand (FILE *stream, rtx x, int code)
10366 switch (code)
10368 case '@':
10369 fputs (ASM_COMMENT_START, stream);
10370 return;
10372 case '_':
10373 fputs (user_label_prefix, stream);
10374 return;
10376 case '|':
10377 fputs (REGISTER_PREFIX, stream);
10378 return;
10380 case '?':
10381 if (arm_ccfsm_state == 3 || arm_ccfsm_state == 4)
10383 if (TARGET_THUMB || current_insn_predicate != NULL)
10384 abort ();
10386 fputs (arm_condition_codes[arm_current_cc], stream);
10388 else if (current_insn_predicate)
10390 enum arm_cond_code code;
10392 if (TARGET_THUMB)
10393 abort ();
10395 code = get_arm_condition_code (current_insn_predicate);
10396 fputs (arm_condition_codes[code], stream);
10398 return;
10400 case 'N':
10402 REAL_VALUE_TYPE r;
10403 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
10404 r = REAL_VALUE_NEGATE (r);
10405 fprintf (stream, "%s", fp_const_from_val (&r));
10407 return;
10409 case 'B':
10410 if (GET_CODE (x) == CONST_INT)
10412 HOST_WIDE_INT val;
10413 val = ARM_SIGN_EXTEND (~INTVAL (x));
10414 fprintf (stream, HOST_WIDE_INT_PRINT_DEC, val);
10416 else
10418 putc ('~', stream);
10419 output_addr_const (stream, x);
10421 return;
10423 case 'i':
10424 fprintf (stream, "%s", arithmetic_instr (x, 1));
10425 return;
10427 /* Truncate Cirrus shift counts. */
10428 case 's':
10429 if (GET_CODE (x) == CONST_INT)
10431 fprintf (stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0x3f);
10432 return;
10434 arm_print_operand (stream, x, 0);
10435 return;
10437 case 'I':
10438 fprintf (stream, "%s", arithmetic_instr (x, 0));
10439 return;
10441 case 'S':
10443 HOST_WIDE_INT val;
10444 const char * shift = shift_op (x, &val);
10446 if (shift)
10448 fprintf (stream, ", %s ", shift_op (x, &val));
10449 if (val == -1)
10450 arm_print_operand (stream, XEXP (x, 1), 0);
10451 else
10452 fprintf (stream, "#" HOST_WIDE_INT_PRINT_DEC, val);
10455 return;
10457 /* An explanation of the 'Q', 'R' and 'H' register operands:
10459 In a pair of registers containing a DI or DF value the 'Q'
10460 operand returns the register number of the register containing
10461 the least significant part of the value. The 'R' operand returns
10462 the register number of the register containing the most
10463 significant part of the value.
10465 The 'H' operand returns the higher of the two register numbers.
10466 On a run where WORDS_BIG_ENDIAN is true the 'H' operand is the
10467 same as the 'Q' operand, since the most significant part of the
10468 value is held in the lower number register. The reverse is true
10469 on systems where WORDS_BIG_ENDIAN is false.
10471 The purpose of these operands is to distinguish between cases
10472 where the endian-ness of the values is important (for example
10473 when they are added together), and cases where the endian-ness
10474 is irrelevant, but the order of register operations is important.
10475 For example when loading a value from memory into a register
10476 pair, the endian-ness does not matter. Provided that the value
10477 from the lower memory address is put into the lower numbered
10478 register, and the value from the higher address is put into the
10479 higher numbered register, the load will work regardless of whether
10480 the value being loaded is big-wordian or little-wordian. The
10481 order of the two register loads can matter however, if the address
10482 of the memory location is actually held in one of the registers
10483 being overwritten by the load. */
10484 case 'Q':
10485 if (REGNO (x) > LAST_ARM_REGNUM)
10486 abort ();
10487 asm_fprintf (stream, "%r", REGNO (x) + (WORDS_BIG_ENDIAN ? 1 : 0));
10488 return;
10490 case 'R':
10491 if (REGNO (x) > LAST_ARM_REGNUM)
10492 abort ();
10493 asm_fprintf (stream, "%r", REGNO (x) + (WORDS_BIG_ENDIAN ? 0 : 1));
10494 return;
10496 case 'H':
10497 if (REGNO (x) > LAST_ARM_REGNUM)
10498 abort ();
10499 asm_fprintf (stream, "%r", REGNO (x) + 1);
10500 return;
10502 case 'm':
10503 asm_fprintf (stream, "%r",
10504 GET_CODE (XEXP (x, 0)) == REG
10505 ? REGNO (XEXP (x, 0)) : REGNO (XEXP (XEXP (x, 0), 0)));
10506 return;
10508 case 'M':
10509 asm_fprintf (stream, "{%r-%r}",
10510 REGNO (x),
10511 REGNO (x) + ARM_NUM_REGS (GET_MODE (x)) - 1);
10512 return;
10514 case 'd':
10515 /* CONST_TRUE_RTX means always -- that's the default. */
10516 if (x == const_true_rtx)
10517 return;
10519 fputs (arm_condition_codes[get_arm_condition_code (x)],
10520 stream);
10521 return;
10523 case 'D':
10524 /* CONST_TRUE_RTX means not always -- ie never. We shouldn't ever
10525 want to do that. */
10526 if (x == const_true_rtx)
10527 abort ();
10529 fputs (arm_condition_codes[ARM_INVERSE_CONDITION_CODE
10530 (get_arm_condition_code (x))],
10531 stream);
10532 return;
10534 /* Cirrus registers can be accessed in a variety of ways:
10535 single floating point (f)
10536 double floating point (d)
10537 32bit integer (fx)
10538 64bit integer (dx). */
10539 case 'W': /* Cirrus register in F mode. */
10540 case 'X': /* Cirrus register in D mode. */
10541 case 'Y': /* Cirrus register in FX mode. */
10542 case 'Z': /* Cirrus register in DX mode. */
10543 if (GET_CODE (x) != REG || REGNO_REG_CLASS (REGNO (x)) != CIRRUS_REGS)
10544 abort ();
10546 fprintf (stream, "mv%s%s",
10547 code == 'W' ? "f"
10548 : code == 'X' ? "d"
10549 : code == 'Y' ? "fx" : "dx", reg_names[REGNO (x)] + 2);
10551 return;
10553 /* Print cirrus register in the mode specified by the register's mode. */
10554 case 'V':
10556 int mode = GET_MODE (x);
10558 if (GET_CODE (x) != REG || REGNO_REG_CLASS (REGNO (x)) != CIRRUS_REGS)
10559 abort ();
10561 fprintf (stream, "mv%s%s",
10562 mode == DFmode ? "d"
10563 : mode == SImode ? "fx"
10564 : mode == DImode ? "dx"
10565 : "f", reg_names[REGNO (x)] + 2);
10567 return;
10570 case 'U':
10571 if (GET_CODE (x) != REG
10572 || REGNO (x) < FIRST_IWMMXT_GR_REGNUM
10573 || REGNO (x) > LAST_IWMMXT_GR_REGNUM)
10574 /* Bad value for wCG register number. */
10575 abort ();
10576 else
10577 fprintf (stream, "%d", REGNO (x) - FIRST_IWMMXT_GR_REGNUM);
10578 return;
10580 /* Print an iWMMXt control register name. */
10581 case 'w':
10582 if (GET_CODE (x) != CONST_INT
10583 || INTVAL (x) < 0
10584 || INTVAL (x) >= 16)
10585 /* Bad value for wC register number. */
10586 abort ();
10587 else
10589 static const char * wc_reg_names [16] =
10591 "wCID", "wCon", "wCSSF", "wCASF",
10592 "wC4", "wC5", "wC6", "wC7",
10593 "wCGR0", "wCGR1", "wCGR2", "wCGR3",
10594 "wC12", "wC13", "wC14", "wC15"
10597 fprintf (stream, wc_reg_names [INTVAL (x)]);
10599 return;
10601 /* Print a VFP double precision register name. */
10602 case 'P':
10604 int mode = GET_MODE (x);
10605 int num;
10607 if (mode != DImode && mode != DFmode)
10608 abort ();
10610 if (GET_CODE (x) != REG
10611 || !IS_VFP_REGNUM (REGNO (x)))
10612 abort ();
10614 num = REGNO(x) - FIRST_VFP_REGNUM;
10615 if (num & 1)
10616 abort ();
10618 fprintf (stream, "d%d", num >> 1);
10620 return;
10622 default:
10623 if (x == 0)
10624 abort ();
10626 if (GET_CODE (x) == REG)
10627 asm_fprintf (stream, "%r", REGNO (x));
10628 else if (GET_CODE (x) == MEM)
10630 output_memory_reference_mode = GET_MODE (x);
10631 output_address (XEXP (x, 0));
10633 else if (GET_CODE (x) == CONST_DOUBLE)
10634 fprintf (stream, "#%s", fp_immediate_constant (x));
10635 else if (GET_CODE (x) == NEG)
10636 abort (); /* This should never happen now. */
10637 else
10639 fputc ('#', stream);
10640 output_addr_const (stream, x);
10645 #ifndef AOF_ASSEMBLER
10646 /* Target hook for assembling integer objects. The ARM version needs to
10647 handle word-sized values specially. */
10648 static bool
10649 arm_assemble_integer (rtx x, unsigned int size, int aligned_p)
10651 if (size == UNITS_PER_WORD && aligned_p)
10653 fputs ("\t.word\t", asm_out_file);
10654 output_addr_const (asm_out_file, x);
10656 /* Mark symbols as position independent. We only do this in the
10657 .text segment, not in the .data segment. */
10658 if (NEED_GOT_RELOC && flag_pic && making_const_table &&
10659 (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF))
10661 if (GET_CODE (x) == SYMBOL_REF
10662 && (CONSTANT_POOL_ADDRESS_P (x)
10663 || SYMBOL_REF_LOCAL_P (x)))
10664 fputs ("(GOTOFF)", asm_out_file);
10665 else if (GET_CODE (x) == LABEL_REF)
10666 fputs ("(GOTOFF)", asm_out_file);
10667 else
10668 fputs ("(GOT)", asm_out_file);
10670 fputc ('\n', asm_out_file);
10671 return true;
10674 if (VECTOR_MODE_SUPPORTED_P (GET_MODE (x)))
10676 int i, units;
10678 if (GET_CODE (x) != CONST_VECTOR)
10679 abort ();
10681 units = CONST_VECTOR_NUNITS (x);
10683 switch (GET_MODE (x))
10685 case V2SImode: size = 4; break;
10686 case V4HImode: size = 2; break;
10687 case V8QImode: size = 1; break;
10688 default:
10689 abort ();
10692 for (i = 0; i < units; i++)
10694 rtx elt;
10696 elt = CONST_VECTOR_ELT (x, i);
10697 assemble_integer
10698 (elt, size, i == 0 ? BIGGEST_ALIGNMENT : size * BITS_PER_UNIT, 1);
10701 return true;
10704 return default_assemble_integer (x, size, aligned_p);
10706 #endif
10708 /* A finite state machine takes care of noticing whether or not instructions
10709 can be conditionally executed, and thus decrease execution time and code
10710 size by deleting branch instructions. The fsm is controlled by
10711 final_prescan_insn, and controls the actions of ASM_OUTPUT_OPCODE. */
10713 /* The state of the fsm controlling condition codes are:
10714 0: normal, do nothing special
10715 1: make ASM_OUTPUT_OPCODE not output this instruction
10716 2: make ASM_OUTPUT_OPCODE not output this instruction
10717 3: make instructions conditional
10718 4: make instructions conditional
10720 State transitions (state->state by whom under condition):
10721 0 -> 1 final_prescan_insn if the `target' is a label
10722 0 -> 2 final_prescan_insn if the `target' is an unconditional branch
10723 1 -> 3 ASM_OUTPUT_OPCODE after not having output the conditional branch
10724 2 -> 4 ASM_OUTPUT_OPCODE after not having output the conditional branch
10725 3 -> 0 (*targetm.asm_out.internal_label) if the `target' label is reached
10726 (the target label has CODE_LABEL_NUMBER equal to arm_target_label).
10727 4 -> 0 final_prescan_insn if the `target' unconditional branch is reached
10728 (the target insn is arm_target_insn).
10730 If the jump clobbers the conditions then we use states 2 and 4.
10732 A similar thing can be done with conditional return insns.
10734 XXX In case the `target' is an unconditional branch, this conditionalising
10735 of the instructions always reduces code size, but not always execution
10736 time. But then, I want to reduce the code size to somewhere near what
10737 /bin/cc produces. */
10739 /* Returns the index of the ARM condition code string in
10740 `arm_condition_codes'. COMPARISON should be an rtx like
10741 `(eq (...) (...))'. */
10742 static enum arm_cond_code
10743 get_arm_condition_code (rtx comparison)
10745 enum machine_mode mode = GET_MODE (XEXP (comparison, 0));
10746 int code;
10747 enum rtx_code comp_code = GET_CODE (comparison);
10749 if (GET_MODE_CLASS (mode) != MODE_CC)
10750 mode = SELECT_CC_MODE (comp_code, XEXP (comparison, 0),
10751 XEXP (comparison, 1));
10753 switch (mode)
10755 case CC_DNEmode: code = ARM_NE; goto dominance;
10756 case CC_DEQmode: code = ARM_EQ; goto dominance;
10757 case CC_DGEmode: code = ARM_GE; goto dominance;
10758 case CC_DGTmode: code = ARM_GT; goto dominance;
10759 case CC_DLEmode: code = ARM_LE; goto dominance;
10760 case CC_DLTmode: code = ARM_LT; goto dominance;
10761 case CC_DGEUmode: code = ARM_CS; goto dominance;
10762 case CC_DGTUmode: code = ARM_HI; goto dominance;
10763 case CC_DLEUmode: code = ARM_LS; goto dominance;
10764 case CC_DLTUmode: code = ARM_CC;
10766 dominance:
10767 if (comp_code != EQ && comp_code != NE)
10768 abort ();
10770 if (comp_code == EQ)
10771 return ARM_INVERSE_CONDITION_CODE (code);
10772 return code;
10774 case CC_NOOVmode:
10775 switch (comp_code)
10777 case NE: return ARM_NE;
10778 case EQ: return ARM_EQ;
10779 case GE: return ARM_PL;
10780 case LT: return ARM_MI;
10781 default: abort ();
10784 case CC_Zmode:
10785 switch (comp_code)
10787 case NE: return ARM_NE;
10788 case EQ: return ARM_EQ;
10789 default: abort ();
10792 case CC_Nmode:
10793 switch (comp_code)
10795 case NE: return ARM_MI;
10796 case EQ: return ARM_PL;
10797 default: abort ();
10800 case CCFPEmode:
10801 case CCFPmode:
10802 /* These encodings assume that AC=1 in the FPA system control
10803 byte. This allows us to handle all cases except UNEQ and
10804 LTGT. */
10805 switch (comp_code)
10807 case GE: return ARM_GE;
10808 case GT: return ARM_GT;
10809 case LE: return ARM_LS;
10810 case LT: return ARM_MI;
10811 case NE: return ARM_NE;
10812 case EQ: return ARM_EQ;
10813 case ORDERED: return ARM_VC;
10814 case UNORDERED: return ARM_VS;
10815 case UNLT: return ARM_LT;
10816 case UNLE: return ARM_LE;
10817 case UNGT: return ARM_HI;
10818 case UNGE: return ARM_PL;
10819 /* UNEQ and LTGT do not have a representation. */
10820 case UNEQ: /* Fall through. */
10821 case LTGT: /* Fall through. */
10822 default: abort ();
10825 case CC_SWPmode:
10826 switch (comp_code)
10828 case NE: return ARM_NE;
10829 case EQ: return ARM_EQ;
10830 case GE: return ARM_LE;
10831 case GT: return ARM_LT;
10832 case LE: return ARM_GE;
10833 case LT: return ARM_GT;
10834 case GEU: return ARM_LS;
10835 case GTU: return ARM_CC;
10836 case LEU: return ARM_CS;
10837 case LTU: return ARM_HI;
10838 default: abort ();
10841 case CC_Cmode:
10842 switch (comp_code)
10844 case LTU: return ARM_CS;
10845 case GEU: return ARM_CC;
10846 default: abort ();
10849 case CCmode:
10850 switch (comp_code)
10852 case NE: return ARM_NE;
10853 case EQ: return ARM_EQ;
10854 case GE: return ARM_GE;
10855 case GT: return ARM_GT;
10856 case LE: return ARM_LE;
10857 case LT: return ARM_LT;
10858 case GEU: return ARM_CS;
10859 case GTU: return ARM_HI;
10860 case LEU: return ARM_LS;
10861 case LTU: return ARM_CC;
10862 default: abort ();
10865 default: abort ();
10868 abort ();
10871 void
10872 arm_final_prescan_insn (rtx insn)
10874 /* BODY will hold the body of INSN. */
10875 rtx body = PATTERN (insn);
10877 /* This will be 1 if trying to repeat the trick, and things need to be
10878 reversed if it appears to fail. */
10879 int reverse = 0;
10881 /* JUMP_CLOBBERS will be one implies that the conditions if a branch is
10882 taken are clobbered, even if the rtl suggests otherwise. It also
10883 means that we have to grub around within the jump expression to find
10884 out what the conditions are when the jump isn't taken. */
10885 int jump_clobbers = 0;
10887 /* If we start with a return insn, we only succeed if we find another one. */
10888 int seeking_return = 0;
10890 /* START_INSN will hold the insn from where we start looking. This is the
10891 first insn after the following code_label if REVERSE is true. */
10892 rtx start_insn = insn;
10894 /* If in state 4, check if the target branch is reached, in order to
10895 change back to state 0. */
10896 if (arm_ccfsm_state == 4)
10898 if (insn == arm_target_insn)
10900 arm_target_insn = NULL;
10901 arm_ccfsm_state = 0;
10903 return;
10906 /* If in state 3, it is possible to repeat the trick, if this insn is an
10907 unconditional branch to a label, and immediately following this branch
10908 is the previous target label which is only used once, and the label this
10909 branch jumps to is not too far off. */
10910 if (arm_ccfsm_state == 3)
10912 if (simplejump_p (insn))
10914 start_insn = next_nonnote_insn (start_insn);
10915 if (GET_CODE (start_insn) == BARRIER)
10917 /* XXX Isn't this always a barrier? */
10918 start_insn = next_nonnote_insn (start_insn);
10920 if (GET_CODE (start_insn) == CODE_LABEL
10921 && CODE_LABEL_NUMBER (start_insn) == arm_target_label
10922 && LABEL_NUSES (start_insn) == 1)
10923 reverse = TRUE;
10924 else
10925 return;
10927 else if (GET_CODE (body) == RETURN)
10929 start_insn = next_nonnote_insn (start_insn);
10930 if (GET_CODE (start_insn) == BARRIER)
10931 start_insn = next_nonnote_insn (start_insn);
10932 if (GET_CODE (start_insn) == CODE_LABEL
10933 && CODE_LABEL_NUMBER (start_insn) == arm_target_label
10934 && LABEL_NUSES (start_insn) == 1)
10936 reverse = TRUE;
10937 seeking_return = 1;
10939 else
10940 return;
10942 else
10943 return;
10946 if (arm_ccfsm_state != 0 && !reverse)
10947 abort ();
10948 if (GET_CODE (insn) != JUMP_INSN)
10949 return;
10951 /* This jump might be paralleled with a clobber of the condition codes
10952 the jump should always come first */
10953 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) > 0)
10954 body = XVECEXP (body, 0, 0);
10956 if (reverse
10957 || (GET_CODE (body) == SET && GET_CODE (SET_DEST (body)) == PC
10958 && GET_CODE (SET_SRC (body)) == IF_THEN_ELSE))
10960 int insns_skipped;
10961 int fail = FALSE, succeed = FALSE;
10962 /* Flag which part of the IF_THEN_ELSE is the LABEL_REF. */
10963 int then_not_else = TRUE;
10964 rtx this_insn = start_insn, label = 0;
10966 /* If the jump cannot be done with one instruction, we cannot
10967 conditionally execute the instruction in the inverse case. */
10968 if (get_attr_conds (insn) == CONDS_JUMP_CLOB)
10970 jump_clobbers = 1;
10971 return;
10974 /* Register the insn jumped to. */
10975 if (reverse)
10977 if (!seeking_return)
10978 label = XEXP (SET_SRC (body), 0);
10980 else if (GET_CODE (XEXP (SET_SRC (body), 1)) == LABEL_REF)
10981 label = XEXP (XEXP (SET_SRC (body), 1), 0);
10982 else if (GET_CODE (XEXP (SET_SRC (body), 2)) == LABEL_REF)
10984 label = XEXP (XEXP (SET_SRC (body), 2), 0);
10985 then_not_else = FALSE;
10987 else if (GET_CODE (XEXP (SET_SRC (body), 1)) == RETURN)
10988 seeking_return = 1;
10989 else if (GET_CODE (XEXP (SET_SRC (body), 2)) == RETURN)
10991 seeking_return = 1;
10992 then_not_else = FALSE;
10994 else
10995 abort ();
10997 /* See how many insns this branch skips, and what kind of insns. If all
10998 insns are okay, and the label or unconditional branch to the same
10999 label is not too far away, succeed. */
11000 for (insns_skipped = 0;
11001 !fail && !succeed && insns_skipped++ < max_insns_skipped;)
11003 rtx scanbody;
11005 this_insn = next_nonnote_insn (this_insn);
11006 if (!this_insn)
11007 break;
11009 switch (GET_CODE (this_insn))
11011 case CODE_LABEL:
11012 /* Succeed if it is the target label, otherwise fail since
11013 control falls in from somewhere else. */
11014 if (this_insn == label)
11016 if (jump_clobbers)
11018 arm_ccfsm_state = 2;
11019 this_insn = next_nonnote_insn (this_insn);
11021 else
11022 arm_ccfsm_state = 1;
11023 succeed = TRUE;
11025 else
11026 fail = TRUE;
11027 break;
11029 case BARRIER:
11030 /* Succeed if the following insn is the target label.
11031 Otherwise fail.
11032 If return insns are used then the last insn in a function
11033 will be a barrier. */
11034 this_insn = next_nonnote_insn (this_insn);
11035 if (this_insn && this_insn == label)
11037 if (jump_clobbers)
11039 arm_ccfsm_state = 2;
11040 this_insn = next_nonnote_insn (this_insn);
11042 else
11043 arm_ccfsm_state = 1;
11044 succeed = TRUE;
11046 else
11047 fail = TRUE;
11048 break;
11050 case CALL_INSN:
11051 /* If using 32-bit addresses the cc is not preserved over
11052 calls. */
11053 if (TARGET_APCS_32)
11055 /* Succeed if the following insn is the target label,
11056 or if the following two insns are a barrier and
11057 the target label. */
11058 this_insn = next_nonnote_insn (this_insn);
11059 if (this_insn && GET_CODE (this_insn) == BARRIER)
11060 this_insn = next_nonnote_insn (this_insn);
11062 if (this_insn && this_insn == label
11063 && insns_skipped < max_insns_skipped)
11065 if (jump_clobbers)
11067 arm_ccfsm_state = 2;
11068 this_insn = next_nonnote_insn (this_insn);
11070 else
11071 arm_ccfsm_state = 1;
11072 succeed = TRUE;
11074 else
11075 fail = TRUE;
11077 break;
11079 case JUMP_INSN:
11080 /* If this is an unconditional branch to the same label, succeed.
11081 If it is to another label, do nothing. If it is conditional,
11082 fail. */
11083 /* XXX Probably, the tests for SET and the PC are
11084 unnecessary. */
11086 scanbody = PATTERN (this_insn);
11087 if (GET_CODE (scanbody) == SET
11088 && GET_CODE (SET_DEST (scanbody)) == PC)
11090 if (GET_CODE (SET_SRC (scanbody)) == LABEL_REF
11091 && XEXP (SET_SRC (scanbody), 0) == label && !reverse)
11093 arm_ccfsm_state = 2;
11094 succeed = TRUE;
11096 else if (GET_CODE (SET_SRC (scanbody)) == IF_THEN_ELSE)
11097 fail = TRUE;
11099 /* Fail if a conditional return is undesirable (eg on a
11100 StrongARM), but still allow this if optimizing for size. */
11101 else if (GET_CODE (scanbody) == RETURN
11102 && !use_return_insn (TRUE, NULL)
11103 && !optimize_size)
11104 fail = TRUE;
11105 else if (GET_CODE (scanbody) == RETURN
11106 && seeking_return)
11108 arm_ccfsm_state = 2;
11109 succeed = TRUE;
11111 else if (GET_CODE (scanbody) == PARALLEL)
11113 switch (get_attr_conds (this_insn))
11115 case CONDS_NOCOND:
11116 break;
11117 default:
11118 fail = TRUE;
11119 break;
11122 else
11123 fail = TRUE; /* Unrecognized jump (eg epilogue). */
11125 break;
11127 case INSN:
11128 /* Instructions using or affecting the condition codes make it
11129 fail. */
11130 scanbody = PATTERN (this_insn);
11131 if (!(GET_CODE (scanbody) == SET
11132 || GET_CODE (scanbody) == PARALLEL)
11133 || get_attr_conds (this_insn) != CONDS_NOCOND)
11134 fail = TRUE;
11136 /* A conditional cirrus instruction must be followed by
11137 a non Cirrus instruction. However, since we
11138 conditionalize instructions in this function and by
11139 the time we get here we can't add instructions
11140 (nops), because shorten_branches() has already been
11141 called, we will disable conditionalizing Cirrus
11142 instructions to be safe. */
11143 if (GET_CODE (scanbody) != USE
11144 && GET_CODE (scanbody) != CLOBBER
11145 && get_attr_cirrus (this_insn) != CIRRUS_NOT)
11146 fail = TRUE;
11147 break;
11149 default:
11150 break;
11153 if (succeed)
11155 if ((!seeking_return) && (arm_ccfsm_state == 1 || reverse))
11156 arm_target_label = CODE_LABEL_NUMBER (label);
11157 else if (seeking_return || arm_ccfsm_state == 2)
11159 while (this_insn && GET_CODE (PATTERN (this_insn)) == USE)
11161 this_insn = next_nonnote_insn (this_insn);
11162 if (this_insn && (GET_CODE (this_insn) == BARRIER
11163 || GET_CODE (this_insn) == CODE_LABEL))
11164 abort ();
11166 if (!this_insn)
11168 /* Oh, dear! we ran off the end.. give up. */
11169 recog (PATTERN (insn), insn, NULL);
11170 arm_ccfsm_state = 0;
11171 arm_target_insn = NULL;
11172 return;
11174 arm_target_insn = this_insn;
11176 else
11177 abort ();
11178 if (jump_clobbers)
11180 if (reverse)
11181 abort ();
11182 arm_current_cc =
11183 get_arm_condition_code (XEXP (XEXP (XEXP (SET_SRC (body),
11184 0), 0), 1));
11185 if (GET_CODE (XEXP (XEXP (SET_SRC (body), 0), 0)) == AND)
11186 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
11187 if (GET_CODE (XEXP (SET_SRC (body), 0)) == NE)
11188 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
11190 else
11192 /* If REVERSE is true, ARM_CURRENT_CC needs to be inverted from
11193 what it was. */
11194 if (!reverse)
11195 arm_current_cc = get_arm_condition_code (XEXP (SET_SRC (body),
11196 0));
11199 if (reverse || then_not_else)
11200 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
11203 /* Restore recog_data (getting the attributes of other insns can
11204 destroy this array, but final.c assumes that it remains intact
11205 across this call; since the insn has been recognized already we
11206 call recog direct). */
11207 recog (PATTERN (insn), insn, NULL);
11211 /* Returns true if REGNO is a valid register
11212 for holding a quantity of tyoe MODE. */
11214 arm_hard_regno_mode_ok (unsigned int regno, enum machine_mode mode)
11216 if (GET_MODE_CLASS (mode) == MODE_CC)
11217 return regno == CC_REGNUM || regno == VFPCC_REGNUM;
11219 if (TARGET_THUMB)
11220 /* For the Thumb we only allow values bigger than SImode in
11221 registers 0 - 6, so that there is always a second low
11222 register available to hold the upper part of the value.
11223 We probably we ought to ensure that the register is the
11224 start of an even numbered register pair. */
11225 return (ARM_NUM_REGS (mode) < 2) || (regno < LAST_LO_REGNUM);
11227 if (IS_CIRRUS_REGNUM (regno))
11228 /* We have outlawed SI values in Cirrus registers because they
11229 reside in the lower 32 bits, but SF values reside in the
11230 upper 32 bits. This causes gcc all sorts of grief. We can't
11231 even split the registers into pairs because Cirrus SI values
11232 get sign extended to 64bits-- aldyh. */
11233 return (GET_MODE_CLASS (mode) == MODE_FLOAT) || (mode == DImode);
11235 if (IS_VFP_REGNUM (regno))
11237 if (mode == SFmode || mode == SImode)
11238 return TRUE;
11240 /* DFmode values are only valid in even register pairs. */
11241 if (mode == DFmode)
11242 return ((regno - FIRST_VFP_REGNUM) & 1) == 0;
11243 return FALSE;
11246 if (IS_IWMMXT_GR_REGNUM (regno))
11247 return mode == SImode;
11249 if (IS_IWMMXT_REGNUM (regno))
11250 return VALID_IWMMXT_REG_MODE (mode);
11252 if (regno <= LAST_ARM_REGNUM)
11253 /* We allow any value to be stored in the general registers. */
11254 return 1;
11256 if ( regno == FRAME_POINTER_REGNUM
11257 || regno == ARG_POINTER_REGNUM)
11258 /* We only allow integers in the fake hard registers. */
11259 return GET_MODE_CLASS (mode) == MODE_INT;
11261 /* The only registers left are the FPA registers
11262 which we only allow to hold FP values. */
11263 return GET_MODE_CLASS (mode) == MODE_FLOAT
11264 && regno >= FIRST_FPA_REGNUM
11265 && regno <= LAST_FPA_REGNUM;
11269 arm_regno_class (int regno)
11271 if (TARGET_THUMB)
11273 if (regno == STACK_POINTER_REGNUM)
11274 return STACK_REG;
11275 if (regno == CC_REGNUM)
11276 return CC_REG;
11277 if (regno < 8)
11278 return LO_REGS;
11279 return HI_REGS;
11282 if ( regno <= LAST_ARM_REGNUM
11283 || regno == FRAME_POINTER_REGNUM
11284 || regno == ARG_POINTER_REGNUM)
11285 return GENERAL_REGS;
11287 if (regno == CC_REGNUM || regno == VFPCC_REGNUM)
11288 return NO_REGS;
11290 if (IS_CIRRUS_REGNUM (regno))
11291 return CIRRUS_REGS;
11293 if (IS_VFP_REGNUM (regno))
11294 return VFP_REGS;
11296 if (IS_IWMMXT_REGNUM (regno))
11297 return IWMMXT_REGS;
11299 if (IS_IWMMXT_GR_REGNUM (regno))
11300 return IWMMXT_GR_REGS;
11302 return FPA_REGS;
11305 /* Handle a special case when computing the offset
11306 of an argument from the frame pointer. */
11308 arm_debugger_arg_offset (int value, rtx addr)
11310 rtx insn;
11312 /* We are only interested if dbxout_parms() failed to compute the offset. */
11313 if (value != 0)
11314 return 0;
11316 /* We can only cope with the case where the address is held in a register. */
11317 if (GET_CODE (addr) != REG)
11318 return 0;
11320 /* If we are using the frame pointer to point at the argument, then
11321 an offset of 0 is correct. */
11322 if (REGNO (addr) == (unsigned) HARD_FRAME_POINTER_REGNUM)
11323 return 0;
11325 /* If we are using the stack pointer to point at the
11326 argument, then an offset of 0 is correct. */
11327 if ((TARGET_THUMB || !frame_pointer_needed)
11328 && REGNO (addr) == SP_REGNUM)
11329 return 0;
11331 /* Oh dear. The argument is pointed to by a register rather
11332 than being held in a register, or being stored at a known
11333 offset from the frame pointer. Since GDB only understands
11334 those two kinds of argument we must translate the address
11335 held in the register into an offset from the frame pointer.
11336 We do this by searching through the insns for the function
11337 looking to see where this register gets its value. If the
11338 register is initialized from the frame pointer plus an offset
11339 then we are in luck and we can continue, otherwise we give up.
11341 This code is exercised by producing debugging information
11342 for a function with arguments like this:
11344 double func (double a, double b, int c, double d) {return d;}
11346 Without this code the stab for parameter 'd' will be set to
11347 an offset of 0 from the frame pointer, rather than 8. */
11349 /* The if() statement says:
11351 If the insn is a normal instruction
11352 and if the insn is setting the value in a register
11353 and if the register being set is the register holding the address of the argument
11354 and if the address is computing by an addition
11355 that involves adding to a register
11356 which is the frame pointer
11357 a constant integer
11359 then... */
11361 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
11363 if ( GET_CODE (insn) == INSN
11364 && GET_CODE (PATTERN (insn)) == SET
11365 && REGNO (XEXP (PATTERN (insn), 0)) == REGNO (addr)
11366 && GET_CODE (XEXP (PATTERN (insn), 1)) == PLUS
11367 && GET_CODE (XEXP (XEXP (PATTERN (insn), 1), 0)) == REG
11368 && REGNO (XEXP (XEXP (PATTERN (insn), 1), 0)) == (unsigned) HARD_FRAME_POINTER_REGNUM
11369 && GET_CODE (XEXP (XEXP (PATTERN (insn), 1), 1)) == CONST_INT
11372 value = INTVAL (XEXP (XEXP (PATTERN (insn), 1), 1));
11374 break;
11378 if (value == 0)
11380 debug_rtx (addr);
11381 warning ("unable to compute real location of stacked parameter");
11382 value = 8; /* XXX magic hack */
11385 return value;
11388 #define def_mbuiltin(MASK, NAME, TYPE, CODE) \
11389 do \
11391 if ((MASK) & insn_flags) \
11392 builtin_function ((NAME), (TYPE), (CODE), BUILT_IN_MD, NULL, NULL_TREE); \
11394 while (0)
11396 struct builtin_description
11398 const unsigned int mask;
11399 const enum insn_code icode;
11400 const char * const name;
11401 const enum arm_builtins code;
11402 const enum rtx_code comparison;
11403 const unsigned int flag;
11406 static const struct builtin_description bdesc_2arg[] =
11408 #define IWMMXT_BUILTIN(code, string, builtin) \
11409 { FL_IWMMXT, CODE_FOR_##code, "__builtin_arm_" string, \
11410 ARM_BUILTIN_##builtin, 0, 0 },
11412 IWMMXT_BUILTIN (addv8qi3, "waddb", WADDB)
11413 IWMMXT_BUILTIN (addv4hi3, "waddh", WADDH)
11414 IWMMXT_BUILTIN (addv2si3, "waddw", WADDW)
11415 IWMMXT_BUILTIN (subv8qi3, "wsubb", WSUBB)
11416 IWMMXT_BUILTIN (subv4hi3, "wsubh", WSUBH)
11417 IWMMXT_BUILTIN (subv2si3, "wsubw", WSUBW)
11418 IWMMXT_BUILTIN (ssaddv8qi3, "waddbss", WADDSSB)
11419 IWMMXT_BUILTIN (ssaddv4hi3, "waddhss", WADDSSH)
11420 IWMMXT_BUILTIN (ssaddv2si3, "waddwss", WADDSSW)
11421 IWMMXT_BUILTIN (sssubv8qi3, "wsubbss", WSUBSSB)
11422 IWMMXT_BUILTIN (sssubv4hi3, "wsubhss", WSUBSSH)
11423 IWMMXT_BUILTIN (sssubv2si3, "wsubwss", WSUBSSW)
11424 IWMMXT_BUILTIN (usaddv8qi3, "waddbus", WADDUSB)
11425 IWMMXT_BUILTIN (usaddv4hi3, "waddhus", WADDUSH)
11426 IWMMXT_BUILTIN (usaddv2si3, "waddwus", WADDUSW)
11427 IWMMXT_BUILTIN (ussubv8qi3, "wsubbus", WSUBUSB)
11428 IWMMXT_BUILTIN (ussubv4hi3, "wsubhus", WSUBUSH)
11429 IWMMXT_BUILTIN (ussubv2si3, "wsubwus", WSUBUSW)
11430 IWMMXT_BUILTIN (mulv4hi3, "wmulul", WMULUL)
11431 IWMMXT_BUILTIN (smulv4hi3_highpart, "wmulsm", WMULSM)
11432 IWMMXT_BUILTIN (umulv4hi3_highpart, "wmulum", WMULUM)
11433 IWMMXT_BUILTIN (eqv8qi3, "wcmpeqb", WCMPEQB)
11434 IWMMXT_BUILTIN (eqv4hi3, "wcmpeqh", WCMPEQH)
11435 IWMMXT_BUILTIN (eqv2si3, "wcmpeqw", WCMPEQW)
11436 IWMMXT_BUILTIN (gtuv8qi3, "wcmpgtub", WCMPGTUB)
11437 IWMMXT_BUILTIN (gtuv4hi3, "wcmpgtuh", WCMPGTUH)
11438 IWMMXT_BUILTIN (gtuv2si3, "wcmpgtuw", WCMPGTUW)
11439 IWMMXT_BUILTIN (gtv8qi3, "wcmpgtsb", WCMPGTSB)
11440 IWMMXT_BUILTIN (gtv4hi3, "wcmpgtsh", WCMPGTSH)
11441 IWMMXT_BUILTIN (gtv2si3, "wcmpgtsw", WCMPGTSW)
11442 IWMMXT_BUILTIN (umaxv8qi3, "wmaxub", WMAXUB)
11443 IWMMXT_BUILTIN (smaxv8qi3, "wmaxsb", WMAXSB)
11444 IWMMXT_BUILTIN (umaxv4hi3, "wmaxuh", WMAXUH)
11445 IWMMXT_BUILTIN (smaxv4hi3, "wmaxsh", WMAXSH)
11446 IWMMXT_BUILTIN (umaxv2si3, "wmaxuw", WMAXUW)
11447 IWMMXT_BUILTIN (smaxv2si3, "wmaxsw", WMAXSW)
11448 IWMMXT_BUILTIN (uminv8qi3, "wminub", WMINUB)
11449 IWMMXT_BUILTIN (sminv8qi3, "wminsb", WMINSB)
11450 IWMMXT_BUILTIN (uminv4hi3, "wminuh", WMINUH)
11451 IWMMXT_BUILTIN (sminv4hi3, "wminsh", WMINSH)
11452 IWMMXT_BUILTIN (uminv2si3, "wminuw", WMINUW)
11453 IWMMXT_BUILTIN (sminv2si3, "wminsw", WMINSW)
11454 IWMMXT_BUILTIN (iwmmxt_anddi3, "wand", WAND)
11455 IWMMXT_BUILTIN (iwmmxt_nanddi3, "wandn", WANDN)
11456 IWMMXT_BUILTIN (iwmmxt_iordi3, "wor", WOR)
11457 IWMMXT_BUILTIN (iwmmxt_xordi3, "wxor", WXOR)
11458 IWMMXT_BUILTIN (iwmmxt_uavgv8qi3, "wavg2b", WAVG2B)
11459 IWMMXT_BUILTIN (iwmmxt_uavgv4hi3, "wavg2h", WAVG2H)
11460 IWMMXT_BUILTIN (iwmmxt_uavgrndv8qi3, "wavg2br", WAVG2BR)
11461 IWMMXT_BUILTIN (iwmmxt_uavgrndv4hi3, "wavg2hr", WAVG2HR)
11462 IWMMXT_BUILTIN (iwmmxt_wunpckilb, "wunpckilb", WUNPCKILB)
11463 IWMMXT_BUILTIN (iwmmxt_wunpckilh, "wunpckilh", WUNPCKILH)
11464 IWMMXT_BUILTIN (iwmmxt_wunpckilw, "wunpckilw", WUNPCKILW)
11465 IWMMXT_BUILTIN (iwmmxt_wunpckihb, "wunpckihb", WUNPCKIHB)
11466 IWMMXT_BUILTIN (iwmmxt_wunpckihh, "wunpckihh", WUNPCKIHH)
11467 IWMMXT_BUILTIN (iwmmxt_wunpckihw, "wunpckihw", WUNPCKIHW)
11468 IWMMXT_BUILTIN (iwmmxt_wmadds, "wmadds", WMADDS)
11469 IWMMXT_BUILTIN (iwmmxt_wmaddu, "wmaddu", WMADDU)
11471 #define IWMMXT_BUILTIN2(code, builtin) \
11472 { FL_IWMMXT, CODE_FOR_##code, NULL, ARM_BUILTIN_##builtin, 0, 0 },
11474 IWMMXT_BUILTIN2 (iwmmxt_wpackhss, WPACKHSS)
11475 IWMMXT_BUILTIN2 (iwmmxt_wpackwss, WPACKWSS)
11476 IWMMXT_BUILTIN2 (iwmmxt_wpackdss, WPACKDSS)
11477 IWMMXT_BUILTIN2 (iwmmxt_wpackhus, WPACKHUS)
11478 IWMMXT_BUILTIN2 (iwmmxt_wpackwus, WPACKWUS)
11479 IWMMXT_BUILTIN2 (iwmmxt_wpackdus, WPACKDUS)
11480 IWMMXT_BUILTIN2 (ashlv4hi3_di, WSLLH)
11481 IWMMXT_BUILTIN2 (ashlv4hi3, WSLLHI)
11482 IWMMXT_BUILTIN2 (ashlv2si3_di, WSLLW)
11483 IWMMXT_BUILTIN2 (ashlv2si3, WSLLWI)
11484 IWMMXT_BUILTIN2 (ashldi3_di, WSLLD)
11485 IWMMXT_BUILTIN2 (ashldi3_iwmmxt, WSLLDI)
11486 IWMMXT_BUILTIN2 (lshrv4hi3_di, WSRLH)
11487 IWMMXT_BUILTIN2 (lshrv4hi3, WSRLHI)
11488 IWMMXT_BUILTIN2 (lshrv2si3_di, WSRLW)
11489 IWMMXT_BUILTIN2 (lshrv2si3, WSRLWI)
11490 IWMMXT_BUILTIN2 (lshrdi3_di, WSRLD)
11491 IWMMXT_BUILTIN2 (lshrdi3_iwmmxt, WSRLDI)
11492 IWMMXT_BUILTIN2 (ashrv4hi3_di, WSRAH)
11493 IWMMXT_BUILTIN2 (ashrv4hi3, WSRAHI)
11494 IWMMXT_BUILTIN2 (ashrv2si3_di, WSRAW)
11495 IWMMXT_BUILTIN2 (ashrv2si3, WSRAWI)
11496 IWMMXT_BUILTIN2 (ashrdi3_di, WSRAD)
11497 IWMMXT_BUILTIN2 (ashrdi3_iwmmxt, WSRADI)
11498 IWMMXT_BUILTIN2 (rorv4hi3_di, WRORH)
11499 IWMMXT_BUILTIN2 (rorv4hi3, WRORHI)
11500 IWMMXT_BUILTIN2 (rorv2si3_di, WRORW)
11501 IWMMXT_BUILTIN2 (rorv2si3, WRORWI)
11502 IWMMXT_BUILTIN2 (rordi3_di, WRORD)
11503 IWMMXT_BUILTIN2 (rordi3, WRORDI)
11504 IWMMXT_BUILTIN2 (iwmmxt_wmacuz, WMACUZ)
11505 IWMMXT_BUILTIN2 (iwmmxt_wmacsz, WMACSZ)
11508 static const struct builtin_description bdesc_1arg[] =
11510 IWMMXT_BUILTIN (iwmmxt_tmovmskb, "tmovmskb", TMOVMSKB)
11511 IWMMXT_BUILTIN (iwmmxt_tmovmskh, "tmovmskh", TMOVMSKH)
11512 IWMMXT_BUILTIN (iwmmxt_tmovmskw, "tmovmskw", TMOVMSKW)
11513 IWMMXT_BUILTIN (iwmmxt_waccb, "waccb", WACCB)
11514 IWMMXT_BUILTIN (iwmmxt_wacch, "wacch", WACCH)
11515 IWMMXT_BUILTIN (iwmmxt_waccw, "waccw", WACCW)
11516 IWMMXT_BUILTIN (iwmmxt_wunpckehub, "wunpckehub", WUNPCKEHUB)
11517 IWMMXT_BUILTIN (iwmmxt_wunpckehuh, "wunpckehuh", WUNPCKEHUH)
11518 IWMMXT_BUILTIN (iwmmxt_wunpckehuw, "wunpckehuw", WUNPCKEHUW)
11519 IWMMXT_BUILTIN (iwmmxt_wunpckehsb, "wunpckehsb", WUNPCKEHSB)
11520 IWMMXT_BUILTIN (iwmmxt_wunpckehsh, "wunpckehsh", WUNPCKEHSH)
11521 IWMMXT_BUILTIN (iwmmxt_wunpckehsw, "wunpckehsw", WUNPCKEHSW)
11522 IWMMXT_BUILTIN (iwmmxt_wunpckelub, "wunpckelub", WUNPCKELUB)
11523 IWMMXT_BUILTIN (iwmmxt_wunpckeluh, "wunpckeluh", WUNPCKELUH)
11524 IWMMXT_BUILTIN (iwmmxt_wunpckeluw, "wunpckeluw", WUNPCKELUW)
11525 IWMMXT_BUILTIN (iwmmxt_wunpckelsb, "wunpckelsb", WUNPCKELSB)
11526 IWMMXT_BUILTIN (iwmmxt_wunpckelsh, "wunpckelsh", WUNPCKELSH)
11527 IWMMXT_BUILTIN (iwmmxt_wunpckelsw, "wunpckelsw", WUNPCKELSW)
11530 /* Set up all the iWMMXt builtins. This is
11531 not called if TARGET_IWMMXT is zero. */
11533 static void
11534 arm_init_iwmmxt_builtins (void)
11536 const struct builtin_description * d;
11537 size_t i;
11538 tree endlink = void_list_node;
11540 tree int_ftype_int
11541 = build_function_type (integer_type_node,
11542 tree_cons (NULL_TREE, integer_type_node, endlink));
11543 tree v8qi_ftype_v8qi_v8qi_int
11544 = build_function_type (V8QI_type_node,
11545 tree_cons (NULL_TREE, V8QI_type_node,
11546 tree_cons (NULL_TREE, V8QI_type_node,
11547 tree_cons (NULL_TREE,
11548 integer_type_node,
11549 endlink))));
11550 tree v4hi_ftype_v4hi_int
11551 = build_function_type (V4HI_type_node,
11552 tree_cons (NULL_TREE, V4HI_type_node,
11553 tree_cons (NULL_TREE, integer_type_node,
11554 endlink)));
11555 tree v2si_ftype_v2si_int
11556 = build_function_type (V2SI_type_node,
11557 tree_cons (NULL_TREE, V2SI_type_node,
11558 tree_cons (NULL_TREE, integer_type_node,
11559 endlink)));
11560 tree v2si_ftype_di_di
11561 = build_function_type (V2SI_type_node,
11562 tree_cons (NULL_TREE, long_long_integer_type_node,
11563 tree_cons (NULL_TREE, long_long_integer_type_node,
11564 endlink)));
11565 tree di_ftype_di_int
11566 = build_function_type (long_long_integer_type_node,
11567 tree_cons (NULL_TREE, long_long_integer_type_node,
11568 tree_cons (NULL_TREE, integer_type_node,
11569 endlink)));
11570 tree di_ftype_di_int_int
11571 = build_function_type (long_long_integer_type_node,
11572 tree_cons (NULL_TREE, long_long_integer_type_node,
11573 tree_cons (NULL_TREE, integer_type_node,
11574 tree_cons (NULL_TREE,
11575 integer_type_node,
11576 endlink))));
11577 tree int_ftype_v8qi
11578 = build_function_type (integer_type_node,
11579 tree_cons (NULL_TREE, V8QI_type_node,
11580 endlink));
11581 tree int_ftype_v4hi
11582 = build_function_type (integer_type_node,
11583 tree_cons (NULL_TREE, V4HI_type_node,
11584 endlink));
11585 tree int_ftype_v2si
11586 = build_function_type (integer_type_node,
11587 tree_cons (NULL_TREE, V2SI_type_node,
11588 endlink));
11589 tree int_ftype_v8qi_int
11590 = build_function_type (integer_type_node,
11591 tree_cons (NULL_TREE, V8QI_type_node,
11592 tree_cons (NULL_TREE, integer_type_node,
11593 endlink)));
11594 tree int_ftype_v4hi_int
11595 = build_function_type (integer_type_node,
11596 tree_cons (NULL_TREE, V4HI_type_node,
11597 tree_cons (NULL_TREE, integer_type_node,
11598 endlink)));
11599 tree int_ftype_v2si_int
11600 = build_function_type (integer_type_node,
11601 tree_cons (NULL_TREE, V2SI_type_node,
11602 tree_cons (NULL_TREE, integer_type_node,
11603 endlink)));
11604 tree v8qi_ftype_v8qi_int_int
11605 = build_function_type (V8QI_type_node,
11606 tree_cons (NULL_TREE, V8QI_type_node,
11607 tree_cons (NULL_TREE, integer_type_node,
11608 tree_cons (NULL_TREE,
11609 integer_type_node,
11610 endlink))));
11611 tree v4hi_ftype_v4hi_int_int
11612 = build_function_type (V4HI_type_node,
11613 tree_cons (NULL_TREE, V4HI_type_node,
11614 tree_cons (NULL_TREE, integer_type_node,
11615 tree_cons (NULL_TREE,
11616 integer_type_node,
11617 endlink))));
11618 tree v2si_ftype_v2si_int_int
11619 = build_function_type (V2SI_type_node,
11620 tree_cons (NULL_TREE, V2SI_type_node,
11621 tree_cons (NULL_TREE, integer_type_node,
11622 tree_cons (NULL_TREE,
11623 integer_type_node,
11624 endlink))));
11625 /* Miscellaneous. */
11626 tree v8qi_ftype_v4hi_v4hi
11627 = build_function_type (V8QI_type_node,
11628 tree_cons (NULL_TREE, V4HI_type_node,
11629 tree_cons (NULL_TREE, V4HI_type_node,
11630 endlink)));
11631 tree v4hi_ftype_v2si_v2si
11632 = build_function_type (V4HI_type_node,
11633 tree_cons (NULL_TREE, V2SI_type_node,
11634 tree_cons (NULL_TREE, V2SI_type_node,
11635 endlink)));
11636 tree v2si_ftype_v4hi_v4hi
11637 = build_function_type (V2SI_type_node,
11638 tree_cons (NULL_TREE, V4HI_type_node,
11639 tree_cons (NULL_TREE, V4HI_type_node,
11640 endlink)));
11641 tree v2si_ftype_v8qi_v8qi
11642 = build_function_type (V2SI_type_node,
11643 tree_cons (NULL_TREE, V8QI_type_node,
11644 tree_cons (NULL_TREE, V8QI_type_node,
11645 endlink)));
11646 tree v4hi_ftype_v4hi_di
11647 = build_function_type (V4HI_type_node,
11648 tree_cons (NULL_TREE, V4HI_type_node,
11649 tree_cons (NULL_TREE,
11650 long_long_integer_type_node,
11651 endlink)));
11652 tree v2si_ftype_v2si_di
11653 = build_function_type (V2SI_type_node,
11654 tree_cons (NULL_TREE, V2SI_type_node,
11655 tree_cons (NULL_TREE,
11656 long_long_integer_type_node,
11657 endlink)));
11658 tree void_ftype_int_int
11659 = build_function_type (void_type_node,
11660 tree_cons (NULL_TREE, integer_type_node,
11661 tree_cons (NULL_TREE, integer_type_node,
11662 endlink)));
11663 tree di_ftype_void
11664 = build_function_type (long_long_unsigned_type_node, endlink);
11665 tree di_ftype_v8qi
11666 = build_function_type (long_long_integer_type_node,
11667 tree_cons (NULL_TREE, V8QI_type_node,
11668 endlink));
11669 tree di_ftype_v4hi
11670 = build_function_type (long_long_integer_type_node,
11671 tree_cons (NULL_TREE, V4HI_type_node,
11672 endlink));
11673 tree di_ftype_v2si
11674 = build_function_type (long_long_integer_type_node,
11675 tree_cons (NULL_TREE, V2SI_type_node,
11676 endlink));
11677 tree v2si_ftype_v4hi
11678 = build_function_type (V2SI_type_node,
11679 tree_cons (NULL_TREE, V4HI_type_node,
11680 endlink));
11681 tree v4hi_ftype_v8qi
11682 = build_function_type (V4HI_type_node,
11683 tree_cons (NULL_TREE, V8QI_type_node,
11684 endlink));
11686 tree di_ftype_di_v4hi_v4hi
11687 = build_function_type (long_long_unsigned_type_node,
11688 tree_cons (NULL_TREE,
11689 long_long_unsigned_type_node,
11690 tree_cons (NULL_TREE, V4HI_type_node,
11691 tree_cons (NULL_TREE,
11692 V4HI_type_node,
11693 endlink))));
11695 tree di_ftype_v4hi_v4hi
11696 = build_function_type (long_long_unsigned_type_node,
11697 tree_cons (NULL_TREE, V4HI_type_node,
11698 tree_cons (NULL_TREE, V4HI_type_node,
11699 endlink)));
11701 /* Normal vector binops. */
11702 tree v8qi_ftype_v8qi_v8qi
11703 = build_function_type (V8QI_type_node,
11704 tree_cons (NULL_TREE, V8QI_type_node,
11705 tree_cons (NULL_TREE, V8QI_type_node,
11706 endlink)));
11707 tree v4hi_ftype_v4hi_v4hi
11708 = build_function_type (V4HI_type_node,
11709 tree_cons (NULL_TREE, V4HI_type_node,
11710 tree_cons (NULL_TREE, V4HI_type_node,
11711 endlink)));
11712 tree v2si_ftype_v2si_v2si
11713 = build_function_type (V2SI_type_node,
11714 tree_cons (NULL_TREE, V2SI_type_node,
11715 tree_cons (NULL_TREE, V2SI_type_node,
11716 endlink)));
11717 tree di_ftype_di_di
11718 = build_function_type (long_long_unsigned_type_node,
11719 tree_cons (NULL_TREE, long_long_unsigned_type_node,
11720 tree_cons (NULL_TREE,
11721 long_long_unsigned_type_node,
11722 endlink)));
11724 /* Add all builtins that are more or less simple operations on two
11725 operands. */
11726 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
11728 /* Use one of the operands; the target can have a different mode for
11729 mask-generating compares. */
11730 enum machine_mode mode;
11731 tree type;
11733 if (d->name == 0)
11734 continue;
11736 mode = insn_data[d->icode].operand[1].mode;
11738 switch (mode)
11740 case V8QImode:
11741 type = v8qi_ftype_v8qi_v8qi;
11742 break;
11743 case V4HImode:
11744 type = v4hi_ftype_v4hi_v4hi;
11745 break;
11746 case V2SImode:
11747 type = v2si_ftype_v2si_v2si;
11748 break;
11749 case DImode:
11750 type = di_ftype_di_di;
11751 break;
11753 default:
11754 abort ();
11757 def_mbuiltin (d->mask, d->name, type, d->code);
11760 /* Add the remaining MMX insns with somewhat more complicated types. */
11761 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wzero", di_ftype_void, ARM_BUILTIN_WZERO);
11762 def_mbuiltin (FL_IWMMXT, "__builtin_arm_setwcx", void_ftype_int_int, ARM_BUILTIN_SETWCX);
11763 def_mbuiltin (FL_IWMMXT, "__builtin_arm_getwcx", int_ftype_int, ARM_BUILTIN_GETWCX);
11765 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSLLH);
11766 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllw", v2si_ftype_v2si_di, ARM_BUILTIN_WSLLW);
11767 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wslld", di_ftype_di_di, ARM_BUILTIN_WSLLD);
11768 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSLLHI);
11769 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllwi", v2si_ftype_v2si_int, ARM_BUILTIN_WSLLWI);
11770 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wslldi", di_ftype_di_int, ARM_BUILTIN_WSLLDI);
11772 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSRLH);
11773 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlw", v2si_ftype_v2si_di, ARM_BUILTIN_WSRLW);
11774 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrld", di_ftype_di_di, ARM_BUILTIN_WSRLD);
11775 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSRLHI);
11776 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlwi", v2si_ftype_v2si_int, ARM_BUILTIN_WSRLWI);
11777 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrldi", di_ftype_di_int, ARM_BUILTIN_WSRLDI);
11779 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrah", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSRAH);
11780 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsraw", v2si_ftype_v2si_di, ARM_BUILTIN_WSRAW);
11781 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrad", di_ftype_di_di, ARM_BUILTIN_WSRAD);
11782 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrahi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSRAHI);
11783 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrawi", v2si_ftype_v2si_int, ARM_BUILTIN_WSRAWI);
11784 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsradi", di_ftype_di_int, ARM_BUILTIN_WSRADI);
11786 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WRORH);
11787 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorw", v2si_ftype_v2si_di, ARM_BUILTIN_WRORW);
11788 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrord", di_ftype_di_di, ARM_BUILTIN_WRORD);
11789 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WRORHI);
11790 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorwi", v2si_ftype_v2si_int, ARM_BUILTIN_WRORWI);
11791 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrordi", di_ftype_di_int, ARM_BUILTIN_WRORDI);
11793 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wshufh", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSHUFH);
11795 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadb", v2si_ftype_v8qi_v8qi, ARM_BUILTIN_WSADB);
11796 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadh", v2si_ftype_v4hi_v4hi, ARM_BUILTIN_WSADH);
11797 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadbz", v2si_ftype_v8qi_v8qi, ARM_BUILTIN_WSADBZ);
11798 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadhz", v2si_ftype_v4hi_v4hi, ARM_BUILTIN_WSADHZ);
11800 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsb", int_ftype_v8qi_int, ARM_BUILTIN_TEXTRMSB);
11801 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsh", int_ftype_v4hi_int, ARM_BUILTIN_TEXTRMSH);
11802 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsw", int_ftype_v2si_int, ARM_BUILTIN_TEXTRMSW);
11803 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmub", int_ftype_v8qi_int, ARM_BUILTIN_TEXTRMUB);
11804 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmuh", int_ftype_v4hi_int, ARM_BUILTIN_TEXTRMUH);
11805 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmuw", int_ftype_v2si_int, ARM_BUILTIN_TEXTRMUW);
11806 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrb", v8qi_ftype_v8qi_int_int, ARM_BUILTIN_TINSRB);
11807 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrh", v4hi_ftype_v4hi_int_int, ARM_BUILTIN_TINSRH);
11808 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrw", v2si_ftype_v2si_int_int, ARM_BUILTIN_TINSRW);
11810 def_mbuiltin (FL_IWMMXT, "__builtin_arm_waccb", di_ftype_v8qi, ARM_BUILTIN_WACCB);
11811 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wacch", di_ftype_v4hi, ARM_BUILTIN_WACCH);
11812 def_mbuiltin (FL_IWMMXT, "__builtin_arm_waccw", di_ftype_v2si, ARM_BUILTIN_WACCW);
11814 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskb", int_ftype_v8qi, ARM_BUILTIN_TMOVMSKB);
11815 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskh", int_ftype_v4hi, ARM_BUILTIN_TMOVMSKH);
11816 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskw", int_ftype_v2si, ARM_BUILTIN_TMOVMSKW);
11818 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackhss", v8qi_ftype_v4hi_v4hi, ARM_BUILTIN_WPACKHSS);
11819 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackhus", v8qi_ftype_v4hi_v4hi, ARM_BUILTIN_WPACKHUS);
11820 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackwus", v4hi_ftype_v2si_v2si, ARM_BUILTIN_WPACKWUS);
11821 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackwss", v4hi_ftype_v2si_v2si, ARM_BUILTIN_WPACKWSS);
11822 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackdus", v2si_ftype_di_di, ARM_BUILTIN_WPACKDUS);
11823 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackdss", v2si_ftype_di_di, ARM_BUILTIN_WPACKDSS);
11825 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehub", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKEHUB);
11826 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehuh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKEHUH);
11827 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehuw", di_ftype_v2si, ARM_BUILTIN_WUNPCKEHUW);
11828 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsb", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKEHSB);
11829 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKEHSH);
11830 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsw", di_ftype_v2si, ARM_BUILTIN_WUNPCKEHSW);
11831 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelub", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKELUB);
11832 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckeluh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKELUH);
11833 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckeluw", di_ftype_v2si, ARM_BUILTIN_WUNPCKELUW);
11834 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsb", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKELSB);
11835 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKELSH);
11836 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsw", di_ftype_v2si, ARM_BUILTIN_WUNPCKELSW);
11838 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacs", di_ftype_di_v4hi_v4hi, ARM_BUILTIN_WMACS);
11839 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacsz", di_ftype_v4hi_v4hi, ARM_BUILTIN_WMACSZ);
11840 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacu", di_ftype_di_v4hi_v4hi, ARM_BUILTIN_WMACU);
11841 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacuz", di_ftype_v4hi_v4hi, ARM_BUILTIN_WMACUZ);
11843 def_mbuiltin (FL_IWMMXT, "__builtin_arm_walign", v8qi_ftype_v8qi_v8qi_int, ARM_BUILTIN_WALIGN);
11844 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmia", di_ftype_di_int_int, ARM_BUILTIN_TMIA);
11845 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiaph", di_ftype_di_int_int, ARM_BUILTIN_TMIAPH);
11846 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiabb", di_ftype_di_int_int, ARM_BUILTIN_TMIABB);
11847 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiabt", di_ftype_di_int_int, ARM_BUILTIN_TMIABT);
11848 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiatb", di_ftype_di_int_int, ARM_BUILTIN_TMIATB);
11849 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiatt", di_ftype_di_int_int, ARM_BUILTIN_TMIATT);
11852 static void
11853 arm_init_builtins (void)
11855 if (TARGET_REALLY_IWMMXT)
11856 arm_init_iwmmxt_builtins ();
11859 /* Errors in the source file can cause expand_expr to return const0_rtx
11860 where we expect a vector. To avoid crashing, use one of the vector
11861 clear instructions. */
11863 static rtx
11864 safe_vector_operand (rtx x, enum machine_mode mode)
11866 if (x != const0_rtx)
11867 return x;
11868 x = gen_reg_rtx (mode);
11870 emit_insn (gen_iwmmxt_clrdi (mode == DImode ? x
11871 : gen_rtx_SUBREG (DImode, x, 0)));
11872 return x;
11875 /* Subroutine of arm_expand_builtin to take care of binop insns. */
11877 static rtx
11878 arm_expand_binop_builtin (enum insn_code icode,
11879 tree arglist, rtx target)
11881 rtx pat;
11882 tree arg0 = TREE_VALUE (arglist);
11883 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
11884 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
11885 rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
11886 enum machine_mode tmode = insn_data[icode].operand[0].mode;
11887 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
11888 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
11890 if (VECTOR_MODE_P (mode0))
11891 op0 = safe_vector_operand (op0, mode0);
11892 if (VECTOR_MODE_P (mode1))
11893 op1 = safe_vector_operand (op1, mode1);
11895 if (! target
11896 || GET_MODE (target) != tmode
11897 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
11898 target = gen_reg_rtx (tmode);
11900 /* In case the insn wants input operands in modes different from
11901 the result, abort. */
11902 if (GET_MODE (op0) != mode0 || GET_MODE (op1) != mode1)
11903 abort ();
11905 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
11906 op0 = copy_to_mode_reg (mode0, op0);
11907 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
11908 op1 = copy_to_mode_reg (mode1, op1);
11910 pat = GEN_FCN (icode) (target, op0, op1);
11911 if (! pat)
11912 return 0;
11913 emit_insn (pat);
11914 return target;
11917 /* Subroutine of arm_expand_builtin to take care of unop insns. */
11919 static rtx
11920 arm_expand_unop_builtin (enum insn_code icode,
11921 tree arglist, rtx target, int do_load)
11923 rtx pat;
11924 tree arg0 = TREE_VALUE (arglist);
11925 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
11926 enum machine_mode tmode = insn_data[icode].operand[0].mode;
11927 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
11929 if (! target
11930 || GET_MODE (target) != tmode
11931 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
11932 target = gen_reg_rtx (tmode);
11933 if (do_load)
11934 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
11935 else
11937 if (VECTOR_MODE_P (mode0))
11938 op0 = safe_vector_operand (op0, mode0);
11940 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
11941 op0 = copy_to_mode_reg (mode0, op0);
11944 pat = GEN_FCN (icode) (target, op0);
11945 if (! pat)
11946 return 0;
11947 emit_insn (pat);
11948 return target;
11951 /* Expand an expression EXP that calls a built-in function,
11952 with result going to TARGET if that's convenient
11953 (and in mode MODE if that's convenient).
11954 SUBTARGET may be used as the target for computing one of EXP's operands.
11955 IGNORE is nonzero if the value is to be ignored. */
11957 static rtx
11958 arm_expand_builtin (tree exp,
11959 rtx target,
11960 rtx subtarget ATTRIBUTE_UNUSED,
11961 enum machine_mode mode ATTRIBUTE_UNUSED,
11962 int ignore ATTRIBUTE_UNUSED)
11964 const struct builtin_description * d;
11965 enum insn_code icode;
11966 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
11967 tree arglist = TREE_OPERAND (exp, 1);
11968 tree arg0;
11969 tree arg1;
11970 tree arg2;
11971 rtx op0;
11972 rtx op1;
11973 rtx op2;
11974 rtx pat;
11975 int fcode = DECL_FUNCTION_CODE (fndecl);
11976 size_t i;
11977 enum machine_mode tmode;
11978 enum machine_mode mode0;
11979 enum machine_mode mode1;
11980 enum machine_mode mode2;
11982 switch (fcode)
11984 case ARM_BUILTIN_TEXTRMSB:
11985 case ARM_BUILTIN_TEXTRMUB:
11986 case ARM_BUILTIN_TEXTRMSH:
11987 case ARM_BUILTIN_TEXTRMUH:
11988 case ARM_BUILTIN_TEXTRMSW:
11989 case ARM_BUILTIN_TEXTRMUW:
11990 icode = (fcode == ARM_BUILTIN_TEXTRMSB ? CODE_FOR_iwmmxt_textrmsb
11991 : fcode == ARM_BUILTIN_TEXTRMUB ? CODE_FOR_iwmmxt_textrmub
11992 : fcode == ARM_BUILTIN_TEXTRMSH ? CODE_FOR_iwmmxt_textrmsh
11993 : fcode == ARM_BUILTIN_TEXTRMUH ? CODE_FOR_iwmmxt_textrmuh
11994 : CODE_FOR_iwmmxt_textrmw);
11996 arg0 = TREE_VALUE (arglist);
11997 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
11998 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
11999 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12000 tmode = insn_data[icode].operand[0].mode;
12001 mode0 = insn_data[icode].operand[1].mode;
12002 mode1 = insn_data[icode].operand[2].mode;
12004 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12005 op0 = copy_to_mode_reg (mode0, op0);
12006 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12008 /* @@@ better error message */
12009 error ("selector must be an immediate");
12010 return gen_reg_rtx (tmode);
12012 if (target == 0
12013 || GET_MODE (target) != tmode
12014 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12015 target = gen_reg_rtx (tmode);
12016 pat = GEN_FCN (icode) (target, op0, op1);
12017 if (! pat)
12018 return 0;
12019 emit_insn (pat);
12020 return target;
12022 case ARM_BUILTIN_TINSRB:
12023 case ARM_BUILTIN_TINSRH:
12024 case ARM_BUILTIN_TINSRW:
12025 icode = (fcode == ARM_BUILTIN_TINSRB ? CODE_FOR_iwmmxt_tinsrb
12026 : fcode == ARM_BUILTIN_TINSRH ? CODE_FOR_iwmmxt_tinsrh
12027 : CODE_FOR_iwmmxt_tinsrw);
12028 arg0 = TREE_VALUE (arglist);
12029 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12030 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
12031 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12032 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12033 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
12034 tmode = insn_data[icode].operand[0].mode;
12035 mode0 = insn_data[icode].operand[1].mode;
12036 mode1 = insn_data[icode].operand[2].mode;
12037 mode2 = insn_data[icode].operand[3].mode;
12039 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12040 op0 = copy_to_mode_reg (mode0, op0);
12041 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12042 op1 = copy_to_mode_reg (mode1, op1);
12043 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
12045 /* @@@ better error message */
12046 error ("selector must be an immediate");
12047 return const0_rtx;
12049 if (target == 0
12050 || GET_MODE (target) != tmode
12051 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12052 target = gen_reg_rtx (tmode);
12053 pat = GEN_FCN (icode) (target, op0, op1, op2);
12054 if (! pat)
12055 return 0;
12056 emit_insn (pat);
12057 return target;
12059 case ARM_BUILTIN_SETWCX:
12060 arg0 = TREE_VALUE (arglist);
12061 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12062 op0 = force_reg (SImode, expand_expr (arg0, NULL_RTX, VOIDmode, 0));
12063 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12064 emit_insn (gen_iwmmxt_tmcr (op1, op0));
12065 return 0;
12067 case ARM_BUILTIN_GETWCX:
12068 arg0 = TREE_VALUE (arglist);
12069 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12070 target = gen_reg_rtx (SImode);
12071 emit_insn (gen_iwmmxt_tmrc (target, op0));
12072 return target;
12074 case ARM_BUILTIN_WSHUFH:
12075 icode = CODE_FOR_iwmmxt_wshufh;
12076 arg0 = TREE_VALUE (arglist);
12077 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12078 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12079 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12080 tmode = insn_data[icode].operand[0].mode;
12081 mode1 = insn_data[icode].operand[1].mode;
12082 mode2 = insn_data[icode].operand[2].mode;
12084 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
12085 op0 = copy_to_mode_reg (mode1, op0);
12086 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
12088 /* @@@ better error message */
12089 error ("mask must be an immediate");
12090 return const0_rtx;
12092 if (target == 0
12093 || GET_MODE (target) != tmode
12094 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12095 target = gen_reg_rtx (tmode);
12096 pat = GEN_FCN (icode) (target, op0, op1);
12097 if (! pat)
12098 return 0;
12099 emit_insn (pat);
12100 return target;
12102 case ARM_BUILTIN_WSADB:
12103 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadb, arglist, target);
12104 case ARM_BUILTIN_WSADH:
12105 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadh, arglist, target);
12106 case ARM_BUILTIN_WSADBZ:
12107 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadbz, arglist, target);
12108 case ARM_BUILTIN_WSADHZ:
12109 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadhz, arglist, target);
12111 /* Several three-argument builtins. */
12112 case ARM_BUILTIN_WMACS:
12113 case ARM_BUILTIN_WMACU:
12114 case ARM_BUILTIN_WALIGN:
12115 case ARM_BUILTIN_TMIA:
12116 case ARM_BUILTIN_TMIAPH:
12117 case ARM_BUILTIN_TMIATT:
12118 case ARM_BUILTIN_TMIATB:
12119 case ARM_BUILTIN_TMIABT:
12120 case ARM_BUILTIN_TMIABB:
12121 icode = (fcode == ARM_BUILTIN_WMACS ? CODE_FOR_iwmmxt_wmacs
12122 : fcode == ARM_BUILTIN_WMACU ? CODE_FOR_iwmmxt_wmacu
12123 : fcode == ARM_BUILTIN_TMIA ? CODE_FOR_iwmmxt_tmia
12124 : fcode == ARM_BUILTIN_TMIAPH ? CODE_FOR_iwmmxt_tmiaph
12125 : fcode == ARM_BUILTIN_TMIABB ? CODE_FOR_iwmmxt_tmiabb
12126 : fcode == ARM_BUILTIN_TMIABT ? CODE_FOR_iwmmxt_tmiabt
12127 : fcode == ARM_BUILTIN_TMIATB ? CODE_FOR_iwmmxt_tmiatb
12128 : fcode == ARM_BUILTIN_TMIATT ? CODE_FOR_iwmmxt_tmiatt
12129 : CODE_FOR_iwmmxt_walign);
12130 arg0 = TREE_VALUE (arglist);
12131 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12132 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
12133 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12134 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12135 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
12136 tmode = insn_data[icode].operand[0].mode;
12137 mode0 = insn_data[icode].operand[1].mode;
12138 mode1 = insn_data[icode].operand[2].mode;
12139 mode2 = insn_data[icode].operand[3].mode;
12141 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12142 op0 = copy_to_mode_reg (mode0, op0);
12143 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12144 op1 = copy_to_mode_reg (mode1, op1);
12145 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
12146 op2 = copy_to_mode_reg (mode2, op2);
12147 if (target == 0
12148 || GET_MODE (target) != tmode
12149 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12150 target = gen_reg_rtx (tmode);
12151 pat = GEN_FCN (icode) (target, op0, op1, op2);
12152 if (! pat)
12153 return 0;
12154 emit_insn (pat);
12155 return target;
12157 case ARM_BUILTIN_WZERO:
12158 target = gen_reg_rtx (DImode);
12159 emit_insn (gen_iwmmxt_clrdi (target));
12160 return target;
12162 default:
12163 break;
12166 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
12167 if (d->code == (const enum arm_builtins) fcode)
12168 return arm_expand_binop_builtin (d->icode, arglist, target);
12170 for (i = 0, d = bdesc_1arg; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
12171 if (d->code == (const enum arm_builtins) fcode)
12172 return arm_expand_unop_builtin (d->icode, arglist, target, 0);
12174 /* @@@ Should really do something sensible here. */
12175 return NULL_RTX;
12178 /* Recursively search through all of the blocks in a function
12179 checking to see if any of the variables created in that
12180 function match the RTX called 'orig'. If they do then
12181 replace them with the RTX called 'new'. */
12182 static void
12183 replace_symbols_in_block (tree block, rtx orig, rtx new)
12185 for (; block; block = BLOCK_CHAIN (block))
12187 tree sym;
12189 if (!TREE_USED (block))
12190 continue;
12192 for (sym = BLOCK_VARS (block); sym; sym = TREE_CHAIN (sym))
12194 if ( (DECL_NAME (sym) == 0 && TREE_CODE (sym) != TYPE_DECL)
12195 || DECL_IGNORED_P (sym)
12196 || TREE_CODE (sym) != VAR_DECL
12197 || DECL_EXTERNAL (sym)
12198 || !rtx_equal_p (DECL_RTL (sym), orig)
12200 continue;
12202 SET_DECL_RTL (sym, new);
12205 replace_symbols_in_block (BLOCK_SUBBLOCKS (block), orig, new);
12209 /* Return the number (counting from 0) of
12210 the least significant set bit in MASK. */
12212 inline static int
12213 number_of_first_bit_set (int mask)
12215 int bit;
12217 for (bit = 0;
12218 (mask & (1 << bit)) == 0;
12219 ++bit)
12220 continue;
12222 return bit;
12225 /* Generate code to return from a thumb function.
12226 If 'reg_containing_return_addr' is -1, then the return address is
12227 actually on the stack, at the stack pointer. */
12228 static void
12229 thumb_exit (FILE *f, int reg_containing_return_addr, rtx eh_ofs)
12231 unsigned regs_available_for_popping;
12232 unsigned regs_to_pop;
12233 int pops_needed;
12234 unsigned available;
12235 unsigned required;
12236 int mode;
12237 int size;
12238 int restore_a4 = FALSE;
12240 /* Compute the registers we need to pop. */
12241 regs_to_pop = 0;
12242 pops_needed = 0;
12244 /* There is an assumption here, that if eh_ofs is not NULL, the
12245 normal return address will have been pushed. */
12246 if (reg_containing_return_addr == -1 || eh_ofs)
12248 /* When we are generating a return for __builtin_eh_return,
12249 reg_containing_return_addr must specify the return regno. */
12250 if (eh_ofs && reg_containing_return_addr == -1)
12251 abort ();
12253 regs_to_pop |= 1 << LR_REGNUM;
12254 ++pops_needed;
12257 if (TARGET_BACKTRACE)
12259 /* Restore the (ARM) frame pointer and stack pointer. */
12260 regs_to_pop |= (1 << ARM_HARD_FRAME_POINTER_REGNUM) | (1 << SP_REGNUM);
12261 pops_needed += 2;
12264 /* If there is nothing to pop then just emit the BX instruction and
12265 return. */
12266 if (pops_needed == 0)
12268 if (eh_ofs)
12269 asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, REGNO (eh_ofs));
12271 asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
12272 return;
12274 /* Otherwise if we are not supporting interworking and we have not created
12275 a backtrace structure and the function was not entered in ARM mode then
12276 just pop the return address straight into the PC. */
12277 else if (!TARGET_INTERWORK
12278 && !TARGET_BACKTRACE
12279 && !is_called_in_ARM_mode (current_function_decl))
12281 if (eh_ofs)
12283 asm_fprintf (f, "\tadd\t%r, #4\n", SP_REGNUM);
12284 asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, REGNO (eh_ofs));
12285 asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
12287 else
12288 asm_fprintf (f, "\tpop\t{%r}\n", PC_REGNUM);
12290 return;
12293 /* Find out how many of the (return) argument registers we can corrupt. */
12294 regs_available_for_popping = 0;
12296 /* If returning via __builtin_eh_return, the bottom three registers
12297 all contain information needed for the return. */
12298 if (eh_ofs)
12299 size = 12;
12300 else
12302 #ifdef RTX_CODE
12303 /* If we can deduce the registers used from the function's
12304 return value. This is more reliable that examining
12305 regs_ever_live[] because that will be set if the register is
12306 ever used in the function, not just if the register is used
12307 to hold a return value. */
12309 if (current_function_return_rtx != 0)
12310 mode = GET_MODE (current_function_return_rtx);
12311 else
12312 #endif
12313 mode = DECL_MODE (DECL_RESULT (current_function_decl));
12315 size = GET_MODE_SIZE (mode);
12317 if (size == 0)
12319 /* In a void function we can use any argument register.
12320 In a function that returns a structure on the stack
12321 we can use the second and third argument registers. */
12322 if (mode == VOIDmode)
12323 regs_available_for_popping =
12324 (1 << ARG_REGISTER (1))
12325 | (1 << ARG_REGISTER (2))
12326 | (1 << ARG_REGISTER (3));
12327 else
12328 regs_available_for_popping =
12329 (1 << ARG_REGISTER (2))
12330 | (1 << ARG_REGISTER (3));
12332 else if (size <= 4)
12333 regs_available_for_popping =
12334 (1 << ARG_REGISTER (2))
12335 | (1 << ARG_REGISTER (3));
12336 else if (size <= 8)
12337 regs_available_for_popping =
12338 (1 << ARG_REGISTER (3));
12341 /* Match registers to be popped with registers into which we pop them. */
12342 for (available = regs_available_for_popping,
12343 required = regs_to_pop;
12344 required != 0 && available != 0;
12345 available &= ~(available & - available),
12346 required &= ~(required & - required))
12347 -- pops_needed;
12349 /* If we have any popping registers left over, remove them. */
12350 if (available > 0)
12351 regs_available_for_popping &= ~available;
12353 /* Otherwise if we need another popping register we can use
12354 the fourth argument register. */
12355 else if (pops_needed)
12357 /* If we have not found any free argument registers and
12358 reg a4 contains the return address, we must move it. */
12359 if (regs_available_for_popping == 0
12360 && reg_containing_return_addr == LAST_ARG_REGNUM)
12362 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM, LAST_ARG_REGNUM);
12363 reg_containing_return_addr = LR_REGNUM;
12365 else if (size > 12)
12367 /* Register a4 is being used to hold part of the return value,
12368 but we have dire need of a free, low register. */
12369 restore_a4 = TRUE;
12371 asm_fprintf (f, "\tmov\t%r, %r\n",IP_REGNUM, LAST_ARG_REGNUM);
12374 if (reg_containing_return_addr != LAST_ARG_REGNUM)
12376 /* The fourth argument register is available. */
12377 regs_available_for_popping |= 1 << LAST_ARG_REGNUM;
12379 --pops_needed;
12383 /* Pop as many registers as we can. */
12384 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
12385 regs_available_for_popping);
12387 /* Process the registers we popped. */
12388 if (reg_containing_return_addr == -1)
12390 /* The return address was popped into the lowest numbered register. */
12391 regs_to_pop &= ~(1 << LR_REGNUM);
12393 reg_containing_return_addr =
12394 number_of_first_bit_set (regs_available_for_popping);
12396 /* Remove this register for the mask of available registers, so that
12397 the return address will not be corrupted by further pops. */
12398 regs_available_for_popping &= ~(1 << reg_containing_return_addr);
12401 /* If we popped other registers then handle them here. */
12402 if (regs_available_for_popping)
12404 int frame_pointer;
12406 /* Work out which register currently contains the frame pointer. */
12407 frame_pointer = number_of_first_bit_set (regs_available_for_popping);
12409 /* Move it into the correct place. */
12410 asm_fprintf (f, "\tmov\t%r, %r\n",
12411 ARM_HARD_FRAME_POINTER_REGNUM, frame_pointer);
12413 /* (Temporarily) remove it from the mask of popped registers. */
12414 regs_available_for_popping &= ~(1 << frame_pointer);
12415 regs_to_pop &= ~(1 << ARM_HARD_FRAME_POINTER_REGNUM);
12417 if (regs_available_for_popping)
12419 int stack_pointer;
12421 /* We popped the stack pointer as well,
12422 find the register that contains it. */
12423 stack_pointer = number_of_first_bit_set (regs_available_for_popping);
12425 /* Move it into the stack register. */
12426 asm_fprintf (f, "\tmov\t%r, %r\n", SP_REGNUM, stack_pointer);
12428 /* At this point we have popped all necessary registers, so
12429 do not worry about restoring regs_available_for_popping
12430 to its correct value:
12432 assert (pops_needed == 0)
12433 assert (regs_available_for_popping == (1 << frame_pointer))
12434 assert (regs_to_pop == (1 << STACK_POINTER)) */
12436 else
12438 /* Since we have just move the popped value into the frame
12439 pointer, the popping register is available for reuse, and
12440 we know that we still have the stack pointer left to pop. */
12441 regs_available_for_popping |= (1 << frame_pointer);
12445 /* If we still have registers left on the stack, but we no longer have
12446 any registers into which we can pop them, then we must move the return
12447 address into the link register and make available the register that
12448 contained it. */
12449 if (regs_available_for_popping == 0 && pops_needed > 0)
12451 regs_available_for_popping |= 1 << reg_containing_return_addr;
12453 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM,
12454 reg_containing_return_addr);
12456 reg_containing_return_addr = LR_REGNUM;
12459 /* If we have registers left on the stack then pop some more.
12460 We know that at most we will want to pop FP and SP. */
12461 if (pops_needed > 0)
12463 int popped_into;
12464 int move_to;
12466 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
12467 regs_available_for_popping);
12469 /* We have popped either FP or SP.
12470 Move whichever one it is into the correct register. */
12471 popped_into = number_of_first_bit_set (regs_available_for_popping);
12472 move_to = number_of_first_bit_set (regs_to_pop);
12474 asm_fprintf (f, "\tmov\t%r, %r\n", move_to, popped_into);
12476 regs_to_pop &= ~(1 << move_to);
12478 --pops_needed;
12481 /* If we still have not popped everything then we must have only
12482 had one register available to us and we are now popping the SP. */
12483 if (pops_needed > 0)
12485 int popped_into;
12487 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
12488 regs_available_for_popping);
12490 popped_into = number_of_first_bit_set (regs_available_for_popping);
12492 asm_fprintf (f, "\tmov\t%r, %r\n", SP_REGNUM, popped_into);
12494 assert (regs_to_pop == (1 << STACK_POINTER))
12495 assert (pops_needed == 1)
12499 /* If necessary restore the a4 register. */
12500 if (restore_a4)
12502 if (reg_containing_return_addr != LR_REGNUM)
12504 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM, LAST_ARG_REGNUM);
12505 reg_containing_return_addr = LR_REGNUM;
12508 asm_fprintf (f, "\tmov\t%r, %r\n", LAST_ARG_REGNUM, IP_REGNUM);
12511 if (eh_ofs)
12512 asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, REGNO (eh_ofs));
12514 /* Return to caller. */
12515 asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
12518 /* Emit code to push or pop registers to or from the stack. F is the
12519 assembly file. MASK is the registers to push or pop. PUSH is
12520 nonzero if we should push, and zero if we should pop. For debugging
12521 output, if pushing, adjust CFA_OFFSET by the amount of space added
12522 to the stack. REAL_REGS should have the same number of bits set as
12523 MASK, and will be used instead (in the same order) to describe which
12524 registers were saved - this is used to mark the save slots when we
12525 push high registers after moving them to low registers. */
12526 static void
12527 thumb_pushpop (FILE *f, int mask, int push, int *cfa_offset, int real_regs)
12529 int regno;
12530 int lo_mask = mask & 0xFF;
12531 int pushed_words = 0;
12533 if (lo_mask == 0 && !push && (mask & (1 << 15)))
12535 /* Special case. Do not generate a POP PC statement here, do it in
12536 thumb_exit() */
12537 thumb_exit (f, -1, NULL_RTX);
12538 return;
12541 fprintf (f, "\t%s\t{", push ? "push" : "pop");
12543 /* Look at the low registers first. */
12544 for (regno = 0; regno <= LAST_LO_REGNUM; regno++, lo_mask >>= 1)
12546 if (lo_mask & 1)
12548 asm_fprintf (f, "%r", regno);
12550 if ((lo_mask & ~1) != 0)
12551 fprintf (f, ", ");
12553 pushed_words++;
12557 if (push && (mask & (1 << LR_REGNUM)))
12559 /* Catch pushing the LR. */
12560 if (mask & 0xFF)
12561 fprintf (f, ", ");
12563 asm_fprintf (f, "%r", LR_REGNUM);
12565 pushed_words++;
12567 else if (!push && (mask & (1 << PC_REGNUM)))
12569 /* Catch popping the PC. */
12570 if (TARGET_INTERWORK || TARGET_BACKTRACE)
12572 /* The PC is never poped directly, instead
12573 it is popped into r3 and then BX is used. */
12574 fprintf (f, "}\n");
12576 thumb_exit (f, -1, NULL_RTX);
12578 return;
12580 else
12582 if (mask & 0xFF)
12583 fprintf (f, ", ");
12585 asm_fprintf (f, "%r", PC_REGNUM);
12589 fprintf (f, "}\n");
12591 if (push && pushed_words && dwarf2out_do_frame ())
12593 char *l = dwarf2out_cfi_label ();
12594 int pushed_mask = real_regs;
12596 *cfa_offset += pushed_words * 4;
12597 dwarf2out_def_cfa (l, SP_REGNUM, *cfa_offset);
12599 pushed_words = 0;
12600 pushed_mask = real_regs;
12601 for (regno = 0; regno <= 14; regno++, pushed_mask >>= 1)
12603 if (pushed_mask & 1)
12604 dwarf2out_reg_save (l, regno, 4 * pushed_words++ - *cfa_offset);
12609 void
12610 thumb_final_prescan_insn (rtx insn)
12612 if (flag_print_asm_name)
12613 asm_fprintf (asm_out_file, "%@ 0x%04x\n",
12614 INSN_ADDRESSES (INSN_UID (insn)));
12618 thumb_shiftable_const (unsigned HOST_WIDE_INT val)
12620 unsigned HOST_WIDE_INT mask = 0xff;
12621 int i;
12623 if (val == 0) /* XXX */
12624 return 0;
12626 for (i = 0; i < 25; i++)
12627 if ((val & (mask << i)) == val)
12628 return 1;
12630 return 0;
12633 /* Returns nonzero if the current function contains,
12634 or might contain a far jump. */
12636 thumb_far_jump_used_p (int in_prologue)
12638 rtx insn;
12640 /* This test is only important for leaf functions. */
12641 /* assert (!leaf_function_p ()); */
12643 /* If we have already decided that far jumps may be used,
12644 do not bother checking again, and always return true even if
12645 it turns out that they are not being used. Once we have made
12646 the decision that far jumps are present (and that hence the link
12647 register will be pushed onto the stack) we cannot go back on it. */
12648 if (cfun->machine->far_jump_used)
12649 return 1;
12651 /* If this function is not being called from the prologue/epilogue
12652 generation code then it must be being called from the
12653 INITIAL_ELIMINATION_OFFSET macro. */
12654 if (!in_prologue)
12656 /* In this case we know that we are being asked about the elimination
12657 of the arg pointer register. If that register is not being used,
12658 then there are no arguments on the stack, and we do not have to
12659 worry that a far jump might force the prologue to push the link
12660 register, changing the stack offsets. In this case we can just
12661 return false, since the presence of far jumps in the function will
12662 not affect stack offsets.
12664 If the arg pointer is live (or if it was live, but has now been
12665 eliminated and so set to dead) then we do have to test to see if
12666 the function might contain a far jump. This test can lead to some
12667 false negatives, since before reload is completed, then length of
12668 branch instructions is not known, so gcc defaults to returning their
12669 longest length, which in turn sets the far jump attribute to true.
12671 A false negative will not result in bad code being generated, but it
12672 will result in a needless push and pop of the link register. We
12673 hope that this does not occur too often. */
12674 if (regs_ever_live [ARG_POINTER_REGNUM])
12675 cfun->machine->arg_pointer_live = 1;
12676 else if (!cfun->machine->arg_pointer_live)
12677 return 0;
12680 /* Check to see if the function contains a branch
12681 insn with the far jump attribute set. */
12682 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
12684 if (GET_CODE (insn) == JUMP_INSN
12685 /* Ignore tablejump patterns. */
12686 && GET_CODE (PATTERN (insn)) != ADDR_VEC
12687 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC
12688 && get_attr_far_jump (insn) == FAR_JUMP_YES
12691 /* Record the fact that we have decided that
12692 the function does use far jumps. */
12693 cfun->machine->far_jump_used = 1;
12694 return 1;
12698 return 0;
12701 /* Return nonzero if FUNC must be entered in ARM mode. */
12703 is_called_in_ARM_mode (tree func)
12705 if (TREE_CODE (func) != FUNCTION_DECL)
12706 abort ();
12708 /* Ignore the problem about functions whoes address is taken. */
12709 if (TARGET_CALLEE_INTERWORKING && TREE_PUBLIC (func))
12710 return TRUE;
12712 #ifdef ARM_PE
12713 return lookup_attribute ("interfacearm", DECL_ATTRIBUTES (func)) != NULL_TREE;
12714 #else
12715 return FALSE;
12716 #endif
12719 /* The bits which aren't usefully expanded as rtl. */
12720 const char *
12721 thumb_unexpanded_epilogue (void)
12723 int regno;
12724 int live_regs_mask = 0;
12725 int high_regs_pushed = 0;
12726 int leaf_function = leaf_function_p ();
12727 int had_to_push_lr;
12728 rtx eh_ofs = cfun->machine->eh_epilogue_sp_ofs;
12730 if (return_used_this_function)
12731 return "";
12733 if (IS_NAKED (arm_current_func_type ()))
12734 return "";
12736 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
12737 if (THUMB_REG_PUSHED_P (regno))
12738 live_regs_mask |= 1 << regno;
12740 for (regno = 8; regno < 13; regno++)
12741 if (THUMB_REG_PUSHED_P (regno))
12742 high_regs_pushed++;
12744 /* The prolog may have pushed some high registers to use as
12745 work registers. eg the testsuite file:
12746 gcc/testsuite/gcc/gcc.c-torture/execute/complex-2.c
12747 compiles to produce:
12748 push {r4, r5, r6, r7, lr}
12749 mov r7, r9
12750 mov r6, r8
12751 push {r6, r7}
12752 as part of the prolog. We have to undo that pushing here. */
12754 if (high_regs_pushed)
12756 int mask = live_regs_mask;
12757 int next_hi_reg;
12758 int size;
12759 int mode;
12761 #ifdef RTX_CODE
12762 /* If we can deduce the registers used from the function's return value.
12763 This is more reliable that examining regs_ever_live[] because that
12764 will be set if the register is ever used in the function, not just if
12765 the register is used to hold a return value. */
12767 if (current_function_return_rtx != 0)
12768 mode = GET_MODE (current_function_return_rtx);
12769 else
12770 #endif
12771 mode = DECL_MODE (DECL_RESULT (current_function_decl));
12773 size = GET_MODE_SIZE (mode);
12775 /* Unless we are returning a type of size > 12 register r3 is
12776 available. */
12777 if (size < 13)
12778 mask |= 1 << 3;
12780 if (mask == 0)
12781 /* Oh dear! We have no low registers into which we can pop
12782 high registers! */
12783 internal_error
12784 ("no low registers available for popping high registers");
12786 for (next_hi_reg = 8; next_hi_reg < 13; next_hi_reg++)
12787 if (THUMB_REG_PUSHED_P (next_hi_reg))
12788 break;
12790 while (high_regs_pushed)
12792 /* Find lo register(s) into which the high register(s) can
12793 be popped. */
12794 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
12796 if (mask & (1 << regno))
12797 high_regs_pushed--;
12798 if (high_regs_pushed == 0)
12799 break;
12802 mask &= (2 << regno) - 1; /* A noop if regno == 8 */
12804 /* Pop the values into the low register(s). */
12805 thumb_pushpop (asm_out_file, mask, 0, NULL, mask);
12807 /* Move the value(s) into the high registers. */
12808 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
12810 if (mask & (1 << regno))
12812 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", next_hi_reg,
12813 regno);
12815 for (next_hi_reg++; next_hi_reg < 13; next_hi_reg++)
12816 if (THUMB_REG_PUSHED_P (next_hi_reg))
12817 break;
12823 had_to_push_lr = (live_regs_mask || !leaf_function
12824 || thumb_far_jump_used_p (1));
12826 if (TARGET_BACKTRACE
12827 && ((live_regs_mask & 0xFF) == 0)
12828 && regs_ever_live [LAST_ARG_REGNUM] != 0)
12830 /* The stack backtrace structure creation code had to
12831 push R7 in order to get a work register, so we pop
12832 it now. */
12833 live_regs_mask |= (1 << LAST_LO_REGNUM);
12836 if (current_function_pretend_args_size == 0 || TARGET_BACKTRACE)
12838 if (had_to_push_lr
12839 && !is_called_in_ARM_mode (current_function_decl)
12840 && !eh_ofs)
12841 live_regs_mask |= 1 << PC_REGNUM;
12843 /* Either no argument registers were pushed or a backtrace
12844 structure was created which includes an adjusted stack
12845 pointer, so just pop everything. */
12846 if (live_regs_mask)
12847 thumb_pushpop (asm_out_file, live_regs_mask, FALSE, NULL,
12848 live_regs_mask);
12850 if (eh_ofs)
12851 thumb_exit (asm_out_file, 2, eh_ofs);
12852 /* We have either just popped the return address into the
12853 PC or it is was kept in LR for the entire function or
12854 it is still on the stack because we do not want to
12855 return by doing a pop {pc}. */
12856 else if ((live_regs_mask & (1 << PC_REGNUM)) == 0)
12857 thumb_exit (asm_out_file,
12858 (had_to_push_lr
12859 && is_called_in_ARM_mode (current_function_decl)) ?
12860 -1 : LR_REGNUM, NULL_RTX);
12862 else
12864 /* Pop everything but the return address. */
12865 live_regs_mask &= ~(1 << PC_REGNUM);
12867 if (live_regs_mask)
12868 thumb_pushpop (asm_out_file, live_regs_mask, FALSE, NULL,
12869 live_regs_mask);
12871 if (had_to_push_lr)
12872 /* Get the return address into a temporary register. */
12873 thumb_pushpop (asm_out_file, 1 << LAST_ARG_REGNUM, 0, NULL,
12874 1 << LAST_ARG_REGNUM);
12876 /* Remove the argument registers that were pushed onto the stack. */
12877 asm_fprintf (asm_out_file, "\tadd\t%r, %r, #%d\n",
12878 SP_REGNUM, SP_REGNUM,
12879 current_function_pretend_args_size);
12881 if (eh_ofs)
12882 thumb_exit (asm_out_file, 2, eh_ofs);
12883 else
12884 thumb_exit (asm_out_file,
12885 had_to_push_lr ? LAST_ARG_REGNUM : LR_REGNUM, NULL_RTX);
12888 return "";
12891 /* Functions to save and restore machine-specific function data. */
12892 static struct machine_function *
12893 arm_init_machine_status (void)
12895 struct machine_function *machine;
12896 machine = (machine_function *) ggc_alloc_cleared (sizeof (machine_function));
12898 #if ARM_FT_UNKNOWN != 0
12899 machine->func_type = ARM_FT_UNKNOWN;
12900 #endif
12901 return machine;
12904 /* Return an RTX indicating where the return address to the
12905 calling function can be found. */
12907 arm_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
12909 if (count != 0)
12910 return NULL_RTX;
12912 if (TARGET_APCS_32)
12913 return get_hard_reg_initial_val (Pmode, LR_REGNUM);
12914 else
12916 rtx lr = gen_rtx_AND (Pmode, gen_rtx_REG (Pmode, LR_REGNUM),
12917 GEN_INT (RETURN_ADDR_MASK26));
12918 return get_func_hard_reg_initial_val (cfun, lr);
12922 /* Do anything needed before RTL is emitted for each function. */
12923 void
12924 arm_init_expanders (void)
12926 /* Arrange to initialize and mark the machine per-function status. */
12927 init_machine_status = arm_init_machine_status;
12930 HOST_WIDE_INT
12931 thumb_get_frame_size (void)
12933 int regno;
12935 int base_size = ROUND_UP_WORD (get_frame_size ());
12936 int count_regs = 0;
12937 int entry_size = 0;
12938 int leaf;
12940 if (! TARGET_THUMB)
12941 abort ();
12943 if (! TARGET_ATPCS)
12944 return base_size;
12946 /* We need to know if we are a leaf function. Unfortunately, it
12947 is possible to be called after start_sequence has been called,
12948 which causes get_insns to return the insns for the sequence,
12949 not the function, which will cause leaf_function_p to return
12950 the incorrect result.
12952 To work around this, we cache the computed frame size. This
12953 works because we will only be calling RTL expanders that need
12954 to know about leaf functions once reload has completed, and the
12955 frame size cannot be changed after that time, so we can safely
12956 use the cached value. */
12958 if (reload_completed)
12959 return cfun->machine->frame_size;
12961 leaf = leaf_function_p ();
12963 /* A leaf function does not need any stack alignment if it has nothing
12964 on the stack. */
12965 if (leaf && base_size == 0)
12967 cfun->machine->frame_size = 0;
12968 return 0;
12971 /* We know that SP will be word aligned on entry, and we must
12972 preserve that condition at any subroutine call. But those are
12973 the only constraints. */
12975 /* Space for variadic functions. */
12976 if (current_function_pretend_args_size)
12977 entry_size += current_function_pretend_args_size;
12979 /* Space for pushed lo registers. */
12980 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
12981 if (THUMB_REG_PUSHED_P (regno))
12982 count_regs++;
12984 /* Space for backtrace structure. */
12985 if (TARGET_BACKTRACE)
12987 if (count_regs == 0 && regs_ever_live[LAST_ARG_REGNUM] != 0)
12988 entry_size += 20;
12989 else
12990 entry_size += 16;
12993 if (count_regs || !leaf || thumb_far_jump_used_p (1))
12994 count_regs++; /* LR */
12996 entry_size += count_regs * 4;
12997 count_regs = 0;
12999 /* Space for pushed hi regs. */
13000 for (regno = 8; regno < 13; regno++)
13001 if (THUMB_REG_PUSHED_P (regno))
13002 count_regs++;
13004 entry_size += count_regs * 4;
13006 if ((entry_size + base_size + current_function_outgoing_args_size) & 7)
13007 base_size += 4;
13008 if ((entry_size + base_size + current_function_outgoing_args_size) & 7)
13009 abort ();
13011 cfun->machine->frame_size = base_size;
13013 return base_size;
13016 /* Generate the rest of a function's prologue. */
13017 void
13018 thumb_expand_prologue (void)
13020 rtx insn, dwarf;
13022 HOST_WIDE_INT amount = (thumb_get_frame_size ()
13023 + current_function_outgoing_args_size);
13024 unsigned long func_type;
13026 func_type = arm_current_func_type ();
13028 /* Naked functions don't have prologues. */
13029 if (IS_NAKED (func_type))
13030 return;
13032 if (IS_INTERRUPT (func_type))
13034 error ("interrupt Service Routines cannot be coded in Thumb mode");
13035 return;
13038 if (frame_pointer_needed)
13040 insn = emit_insn (gen_movsi (hard_frame_pointer_rtx, stack_pointer_rtx));
13041 RTX_FRAME_RELATED_P (insn) = 1;
13044 if (amount)
13046 amount = ROUND_UP_WORD (amount);
13048 if (amount < 512)
13050 insn = emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
13051 GEN_INT (- amount)));
13052 RTX_FRAME_RELATED_P (insn) = 1;
13054 else
13056 int regno;
13057 rtx reg;
13059 /* The stack decrement is too big for an immediate value in a single
13060 insn. In theory we could issue multiple subtracts, but after
13061 three of them it becomes more space efficient to place the full
13062 value in the constant pool and load into a register. (Also the
13063 ARM debugger really likes to see only one stack decrement per
13064 function). So instead we look for a scratch register into which
13065 we can load the decrement, and then we subtract this from the
13066 stack pointer. Unfortunately on the thumb the only available
13067 scratch registers are the argument registers, and we cannot use
13068 these as they may hold arguments to the function. Instead we
13069 attempt to locate a call preserved register which is used by this
13070 function. If we can find one, then we know that it will have
13071 been pushed at the start of the prologue and so we can corrupt
13072 it now. */
13073 for (regno = LAST_ARG_REGNUM + 1; regno <= LAST_LO_REGNUM; regno++)
13074 if (THUMB_REG_PUSHED_P (regno)
13075 && !(frame_pointer_needed
13076 && (regno == THUMB_HARD_FRAME_POINTER_REGNUM)))
13077 break;
13079 if (regno > LAST_LO_REGNUM) /* Very unlikely. */
13081 rtx spare = gen_rtx_REG (SImode, IP_REGNUM);
13083 /* Choose an arbitrary, non-argument low register. */
13084 reg = gen_rtx_REG (SImode, LAST_LO_REGNUM);
13086 /* Save it by copying it into a high, scratch register. */
13087 emit_insn (gen_movsi (spare, reg));
13088 /* Add a USE to stop propagate_one_insn() from barfing. */
13089 emit_insn (gen_prologue_use (spare));
13091 /* Decrement the stack. */
13092 emit_insn (gen_movsi (reg, GEN_INT (- amount)));
13093 insn = emit_insn (gen_addsi3 (stack_pointer_rtx,
13094 stack_pointer_rtx, reg));
13095 RTX_FRAME_RELATED_P (insn) = 1;
13096 dwarf = gen_rtx_SET (SImode, stack_pointer_rtx,
13097 plus_constant (stack_pointer_rtx,
13098 GEN_INT (- amount)));
13099 RTX_FRAME_RELATED_P (dwarf) = 1;
13100 REG_NOTES (insn)
13101 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
13102 REG_NOTES (insn));
13104 /* Restore the low register's original value. */
13105 emit_insn (gen_movsi (reg, spare));
13107 /* Emit a USE of the restored scratch register, so that flow
13108 analysis will not consider the restore redundant. The
13109 register won't be used again in this function and isn't
13110 restored by the epilogue. */
13111 emit_insn (gen_prologue_use (reg));
13113 else
13115 reg = gen_rtx_REG (SImode, regno);
13117 emit_insn (gen_movsi (reg, GEN_INT (- amount)));
13119 insn = emit_insn (gen_addsi3 (stack_pointer_rtx,
13120 stack_pointer_rtx, reg));
13121 RTX_FRAME_RELATED_P (insn) = 1;
13122 dwarf = gen_rtx_SET (SImode, stack_pointer_rtx,
13123 plus_constant (stack_pointer_rtx,
13124 GEN_INT (- amount)));
13125 RTX_FRAME_RELATED_P (dwarf) = 1;
13126 REG_NOTES (insn)
13127 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
13128 REG_NOTES (insn));
13133 if (current_function_profile || TARGET_NO_SCHED_PRO)
13134 emit_insn (gen_blockage ());
13137 void
13138 thumb_expand_epilogue (void)
13140 HOST_WIDE_INT amount = (thumb_get_frame_size ()
13141 + current_function_outgoing_args_size);
13142 int regno;
13144 /* Naked functions don't have prologues. */
13145 if (IS_NAKED (arm_current_func_type ()))
13146 return;
13148 if (frame_pointer_needed)
13149 emit_insn (gen_movsi (stack_pointer_rtx, hard_frame_pointer_rtx));
13150 else if (amount)
13152 amount = ROUND_UP_WORD (amount);
13154 if (amount < 512)
13155 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
13156 GEN_INT (amount)));
13157 else
13159 /* r3 is always free in the epilogue. */
13160 rtx reg = gen_rtx_REG (SImode, LAST_ARG_REGNUM);
13162 emit_insn (gen_movsi (reg, GEN_INT (amount)));
13163 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, reg));
13167 /* Emit a USE (stack_pointer_rtx), so that
13168 the stack adjustment will not be deleted. */
13169 emit_insn (gen_prologue_use (stack_pointer_rtx));
13171 if (current_function_profile || TARGET_NO_SCHED_PRO)
13172 emit_insn (gen_blockage ());
13174 /* Emit a clobber for each insn that will be restored in the epilogue,
13175 so that flow2 will get register lifetimes correct. */
13176 for (regno = 0; regno < 13; regno++)
13177 if (regs_ever_live[regno] && !call_used_regs[regno])
13178 emit_insn (gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, regno)));
13180 if (! regs_ever_live[LR_REGNUM])
13181 emit_insn (gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, LR_REGNUM)));
13184 static void
13185 thumb_output_function_prologue (FILE *f, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
13187 int live_regs_mask = 0;
13188 int high_regs_pushed = 0;
13189 int cfa_offset = 0;
13190 int regno;
13192 if (IS_NAKED (arm_current_func_type ()))
13193 return;
13195 if (is_called_in_ARM_mode (current_function_decl))
13197 const char * name;
13199 if (GET_CODE (DECL_RTL (current_function_decl)) != MEM)
13200 abort ();
13201 if (GET_CODE (XEXP (DECL_RTL (current_function_decl), 0)) != SYMBOL_REF)
13202 abort ();
13203 name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
13205 /* Generate code sequence to switch us into Thumb mode. */
13206 /* The .code 32 directive has already been emitted by
13207 ASM_DECLARE_FUNCTION_NAME. */
13208 asm_fprintf (f, "\torr\t%r, %r, #1\n", IP_REGNUM, PC_REGNUM);
13209 asm_fprintf (f, "\tbx\t%r\n", IP_REGNUM);
13211 /* Generate a label, so that the debugger will notice the
13212 change in instruction sets. This label is also used by
13213 the assembler to bypass the ARM code when this function
13214 is called from a Thumb encoded function elsewhere in the
13215 same file. Hence the definition of STUB_NAME here must
13216 agree with the definition in gas/config/tc-arm.c. */
13218 #define STUB_NAME ".real_start_of"
13220 fprintf (f, "\t.code\t16\n");
13221 #ifdef ARM_PE
13222 if (arm_dllexport_name_p (name))
13223 name = arm_strip_name_encoding (name);
13224 #endif
13225 asm_fprintf (f, "\t.globl %s%U%s\n", STUB_NAME, name);
13226 fprintf (f, "\t.thumb_func\n");
13227 asm_fprintf (f, "%s%U%s:\n", STUB_NAME, name);
13230 if (current_function_pretend_args_size)
13232 if (cfun->machine->uses_anonymous_args)
13234 int num_pushes;
13236 fprintf (f, "\tpush\t{");
13238 num_pushes = ARM_NUM_INTS (current_function_pretend_args_size);
13240 for (regno = LAST_ARG_REGNUM + 1 - num_pushes;
13241 regno <= LAST_ARG_REGNUM;
13242 regno++)
13243 asm_fprintf (f, "%r%s", regno,
13244 regno == LAST_ARG_REGNUM ? "" : ", ");
13246 fprintf (f, "}\n");
13248 else
13249 asm_fprintf (f, "\tsub\t%r, %r, #%d\n",
13250 SP_REGNUM, SP_REGNUM,
13251 current_function_pretend_args_size);
13253 /* We don't need to record the stores for unwinding (would it
13254 help the debugger any if we did?), but record the change in
13255 the stack pointer. */
13256 if (dwarf2out_do_frame ())
13258 char *l = dwarf2out_cfi_label ();
13259 cfa_offset = cfa_offset + current_function_pretend_args_size;
13260 dwarf2out_def_cfa (l, SP_REGNUM, cfa_offset);
13264 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
13265 if (THUMB_REG_PUSHED_P (regno))
13266 live_regs_mask |= 1 << regno;
13268 if (live_regs_mask || !leaf_function_p () || thumb_far_jump_used_p (1))
13269 live_regs_mask |= 1 << LR_REGNUM;
13271 if (TARGET_BACKTRACE)
13273 int offset;
13274 int work_register = 0;
13275 int wr;
13277 /* We have been asked to create a stack backtrace structure.
13278 The code looks like this:
13280 0 .align 2
13281 0 func:
13282 0 sub SP, #16 Reserve space for 4 registers.
13283 2 push {R7} Get a work register.
13284 4 add R7, SP, #20 Get the stack pointer before the push.
13285 6 str R7, [SP, #8] Store the stack pointer (before reserving the space).
13286 8 mov R7, PC Get hold of the start of this code plus 12.
13287 10 str R7, [SP, #16] Store it.
13288 12 mov R7, FP Get hold of the current frame pointer.
13289 14 str R7, [SP, #4] Store it.
13290 16 mov R7, LR Get hold of the current return address.
13291 18 str R7, [SP, #12] Store it.
13292 20 add R7, SP, #16 Point at the start of the backtrace structure.
13293 22 mov FP, R7 Put this value into the frame pointer. */
13295 if ((live_regs_mask & 0xFF) == 0)
13297 /* See if the a4 register is free. */
13299 if (regs_ever_live [LAST_ARG_REGNUM] == 0)
13300 work_register = LAST_ARG_REGNUM;
13301 else /* We must push a register of our own. */
13302 live_regs_mask |= (1 << LAST_LO_REGNUM);
13305 if (work_register == 0)
13307 /* Select a register from the list that will be pushed to
13308 use as our work register. */
13309 for (work_register = (LAST_LO_REGNUM + 1); work_register--;)
13310 if ((1 << work_register) & live_regs_mask)
13311 break;
13314 asm_fprintf
13315 (f, "\tsub\t%r, %r, #16\t%@ Create stack backtrace structure\n",
13316 SP_REGNUM, SP_REGNUM);
13318 if (dwarf2out_do_frame ())
13320 char *l = dwarf2out_cfi_label ();
13321 cfa_offset = cfa_offset + 16;
13322 dwarf2out_def_cfa (l, SP_REGNUM, cfa_offset);
13325 if (live_regs_mask)
13326 thumb_pushpop (f, live_regs_mask, 1, &cfa_offset, live_regs_mask);
13328 for (offset = 0, wr = 1 << 15; wr != 0; wr >>= 1)
13329 if (wr & live_regs_mask)
13330 offset += 4;
13332 asm_fprintf (f, "\tadd\t%r, %r, #%d\n", work_register, SP_REGNUM,
13333 offset + 16 + current_function_pretend_args_size);
13335 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13336 offset + 4);
13338 /* Make sure that the instruction fetching the PC is in the right place
13339 to calculate "start of backtrace creation code + 12". */
13340 if (live_regs_mask)
13342 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, PC_REGNUM);
13343 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13344 offset + 12);
13345 asm_fprintf (f, "\tmov\t%r, %r\n", work_register,
13346 ARM_HARD_FRAME_POINTER_REGNUM);
13347 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13348 offset);
13350 else
13352 asm_fprintf (f, "\tmov\t%r, %r\n", work_register,
13353 ARM_HARD_FRAME_POINTER_REGNUM);
13354 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13355 offset);
13356 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, PC_REGNUM);
13357 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13358 offset + 12);
13361 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, LR_REGNUM);
13362 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13363 offset + 8);
13364 asm_fprintf (f, "\tadd\t%r, %r, #%d\n", work_register, SP_REGNUM,
13365 offset + 12);
13366 asm_fprintf (f, "\tmov\t%r, %r\t\t%@ Backtrace structure created\n",
13367 ARM_HARD_FRAME_POINTER_REGNUM, work_register);
13369 else if (live_regs_mask)
13370 thumb_pushpop (f, live_regs_mask, 1, &cfa_offset, live_regs_mask);
13372 for (regno = 8; regno < 13; regno++)
13373 if (THUMB_REG_PUSHED_P (regno))
13374 high_regs_pushed++;
13376 if (high_regs_pushed)
13378 int pushable_regs = 0;
13379 int mask = live_regs_mask & 0xff;
13380 int next_hi_reg;
13382 for (next_hi_reg = 12; next_hi_reg > LAST_LO_REGNUM; next_hi_reg--)
13383 if (THUMB_REG_PUSHED_P (next_hi_reg))
13384 break;
13386 pushable_regs = mask;
13388 if (pushable_regs == 0)
13390 /* Desperation time -- this probably will never happen. */
13391 if (THUMB_REG_PUSHED_P (LAST_ARG_REGNUM))
13392 asm_fprintf (f, "\tmov\t%r, %r\n", IP_REGNUM, LAST_ARG_REGNUM);
13393 mask = 1 << LAST_ARG_REGNUM;
13396 while (high_regs_pushed > 0)
13398 int real_regs_mask = 0;
13400 for (regno = LAST_LO_REGNUM; regno >= 0; regno--)
13402 if (mask & (1 << regno))
13404 asm_fprintf (f, "\tmov\t%r, %r\n", regno, next_hi_reg);
13406 high_regs_pushed--;
13407 real_regs_mask |= (1 << next_hi_reg);
13409 if (high_regs_pushed)
13411 for (next_hi_reg--; next_hi_reg > LAST_LO_REGNUM;
13412 next_hi_reg--)
13413 if (THUMB_REG_PUSHED_P (next_hi_reg))
13414 break;
13416 else
13418 mask &= ~((1 << regno) - 1);
13419 break;
13424 thumb_pushpop (f, mask, 1, &cfa_offset, real_regs_mask);
13427 if (pushable_regs == 0
13428 && (THUMB_REG_PUSHED_P (LAST_ARG_REGNUM)))
13429 asm_fprintf (f, "\tmov\t%r, %r\n", LAST_ARG_REGNUM, IP_REGNUM);
13433 /* Handle the case of a double word load into a low register from
13434 a computed memory address. The computed address may involve a
13435 register which is overwritten by the load. */
13436 const char *
13437 thumb_load_double_from_address (rtx *operands)
13439 rtx addr;
13440 rtx base;
13441 rtx offset;
13442 rtx arg1;
13443 rtx arg2;
13445 if (GET_CODE (operands[0]) != REG)
13446 abort ();
13448 if (GET_CODE (operands[1]) != MEM)
13449 abort ();
13451 /* Get the memory address. */
13452 addr = XEXP (operands[1], 0);
13454 /* Work out how the memory address is computed. */
13455 switch (GET_CODE (addr))
13457 case REG:
13458 operands[2] = gen_rtx_MEM (SImode,
13459 plus_constant (XEXP (operands[1], 0), 4));
13461 if (REGNO (operands[0]) == REGNO (addr))
13463 output_asm_insn ("ldr\t%H0, %2", operands);
13464 output_asm_insn ("ldr\t%0, %1", operands);
13466 else
13468 output_asm_insn ("ldr\t%0, %1", operands);
13469 output_asm_insn ("ldr\t%H0, %2", operands);
13471 break;
13473 case CONST:
13474 /* Compute <address> + 4 for the high order load. */
13475 operands[2] = gen_rtx_MEM (SImode,
13476 plus_constant (XEXP (operands[1], 0), 4));
13478 output_asm_insn ("ldr\t%0, %1", operands);
13479 output_asm_insn ("ldr\t%H0, %2", operands);
13480 break;
13482 case PLUS:
13483 arg1 = XEXP (addr, 0);
13484 arg2 = XEXP (addr, 1);
13486 if (CONSTANT_P (arg1))
13487 base = arg2, offset = arg1;
13488 else
13489 base = arg1, offset = arg2;
13491 if (GET_CODE (base) != REG)
13492 abort ();
13494 /* Catch the case of <address> = <reg> + <reg> */
13495 if (GET_CODE (offset) == REG)
13497 int reg_offset = REGNO (offset);
13498 int reg_base = REGNO (base);
13499 int reg_dest = REGNO (operands[0]);
13501 /* Add the base and offset registers together into the
13502 higher destination register. */
13503 asm_fprintf (asm_out_file, "\tadd\t%r, %r, %r",
13504 reg_dest + 1, reg_base, reg_offset);
13506 /* Load the lower destination register from the address in
13507 the higher destination register. */
13508 asm_fprintf (asm_out_file, "\tldr\t%r, [%r, #0]",
13509 reg_dest, reg_dest + 1);
13511 /* Load the higher destination register from its own address
13512 plus 4. */
13513 asm_fprintf (asm_out_file, "\tldr\t%r, [%r, #4]",
13514 reg_dest + 1, reg_dest + 1);
13516 else
13518 /* Compute <address> + 4 for the high order load. */
13519 operands[2] = gen_rtx_MEM (SImode,
13520 plus_constant (XEXP (operands[1], 0), 4));
13522 /* If the computed address is held in the low order register
13523 then load the high order register first, otherwise always
13524 load the low order register first. */
13525 if (REGNO (operands[0]) == REGNO (base))
13527 output_asm_insn ("ldr\t%H0, %2", operands);
13528 output_asm_insn ("ldr\t%0, %1", operands);
13530 else
13532 output_asm_insn ("ldr\t%0, %1", operands);
13533 output_asm_insn ("ldr\t%H0, %2", operands);
13536 break;
13538 case LABEL_REF:
13539 /* With no registers to worry about we can just load the value
13540 directly. */
13541 operands[2] = gen_rtx_MEM (SImode,
13542 plus_constant (XEXP (operands[1], 0), 4));
13544 output_asm_insn ("ldr\t%H0, %2", operands);
13545 output_asm_insn ("ldr\t%0, %1", operands);
13546 break;
13548 default:
13549 abort ();
13550 break;
13553 return "";
13556 const char *
13557 thumb_output_move_mem_multiple (int n, rtx *operands)
13559 rtx tmp;
13561 switch (n)
13563 case 2:
13564 if (REGNO (operands[4]) > REGNO (operands[5]))
13566 tmp = operands[4];
13567 operands[4] = operands[5];
13568 operands[5] = tmp;
13570 output_asm_insn ("ldmia\t%1!, {%4, %5}", operands);
13571 output_asm_insn ("stmia\t%0!, {%4, %5}", operands);
13572 break;
13574 case 3:
13575 if (REGNO (operands[4]) > REGNO (operands[5]))
13577 tmp = operands[4];
13578 operands[4] = operands[5];
13579 operands[5] = tmp;
13581 if (REGNO (operands[5]) > REGNO (operands[6]))
13583 tmp = operands[5];
13584 operands[5] = operands[6];
13585 operands[6] = tmp;
13587 if (REGNO (operands[4]) > REGNO (operands[5]))
13589 tmp = operands[4];
13590 operands[4] = operands[5];
13591 operands[5] = tmp;
13594 output_asm_insn ("ldmia\t%1!, {%4, %5, %6}", operands);
13595 output_asm_insn ("stmia\t%0!, {%4, %5, %6}", operands);
13596 break;
13598 default:
13599 abort ();
13602 return "";
13605 /* Routines for generating rtl. */
13606 void
13607 thumb_expand_movstrqi (rtx *operands)
13609 rtx out = copy_to_mode_reg (SImode, XEXP (operands[0], 0));
13610 rtx in = copy_to_mode_reg (SImode, XEXP (operands[1], 0));
13611 HOST_WIDE_INT len = INTVAL (operands[2]);
13612 HOST_WIDE_INT offset = 0;
13614 while (len >= 12)
13616 emit_insn (gen_movmem12b (out, in, out, in));
13617 len -= 12;
13620 if (len >= 8)
13622 emit_insn (gen_movmem8b (out, in, out, in));
13623 len -= 8;
13626 if (len >= 4)
13628 rtx reg = gen_reg_rtx (SImode);
13629 emit_insn (gen_movsi (reg, gen_rtx_MEM (SImode, in)));
13630 emit_insn (gen_movsi (gen_rtx_MEM (SImode, out), reg));
13631 len -= 4;
13632 offset += 4;
13635 if (len >= 2)
13637 rtx reg = gen_reg_rtx (HImode);
13638 emit_insn (gen_movhi (reg, gen_rtx_MEM (HImode,
13639 plus_constant (in, offset))));
13640 emit_insn (gen_movhi (gen_rtx_MEM (HImode, plus_constant (out, offset)),
13641 reg));
13642 len -= 2;
13643 offset += 2;
13646 if (len)
13648 rtx reg = gen_reg_rtx (QImode);
13649 emit_insn (gen_movqi (reg, gen_rtx_MEM (QImode,
13650 plus_constant (in, offset))));
13651 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (out, offset)),
13652 reg));
13657 thumb_cmp_operand (rtx op, enum machine_mode mode)
13659 return ((GET_CODE (op) == CONST_INT
13660 && INTVAL (op) < 256
13661 && INTVAL (op) >= 0)
13662 || s_register_operand (op, mode));
13666 thumb_cmpneg_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
13668 return (GET_CODE (op) == CONST_INT
13669 && INTVAL (op) < 0
13670 && INTVAL (op) > -256);
13673 /* Return TRUE if a result can be stored in OP without clobbering the
13674 condition code register. Prior to reload we only accept a
13675 register. After reload we have to be able to handle memory as
13676 well, since a pseudo may not get a hard reg and reload cannot
13677 handle output-reloads on jump insns.
13679 We could possibly handle mem before reload as well, but that might
13680 complicate things with the need to handle increment
13681 side-effects. */
13684 thumb_cbrch_target_operand (rtx op, enum machine_mode mode)
13686 return (s_register_operand (op, mode)
13687 || ((reload_in_progress || reload_completed)
13688 && memory_operand (op, mode)));
13691 /* Handle storing a half-word to memory during reload. */
13692 void
13693 thumb_reload_out_hi (rtx *operands)
13695 emit_insn (gen_thumb_movhi_clobber (operands[0], operands[1], operands[2]));
13698 /* Handle reading a half-word from memory during reload. */
13699 void
13700 thumb_reload_in_hi (rtx *operands ATTRIBUTE_UNUSED)
13702 abort ();
13705 /* Return the length of a function name prefix
13706 that starts with the character 'c'. */
13707 static int
13708 arm_get_strip_length (int c)
13710 switch (c)
13712 ARM_NAME_ENCODING_LENGTHS
13713 default: return 0;
13717 /* Return a pointer to a function's name with any
13718 and all prefix encodings stripped from it. */
13719 const char *
13720 arm_strip_name_encoding (const char *name)
13722 int skip;
13724 while ((skip = arm_get_strip_length (* name)))
13725 name += skip;
13727 return name;
13730 /* If there is a '*' anywhere in the name's prefix, then
13731 emit the stripped name verbatim, otherwise prepend an
13732 underscore if leading underscores are being used. */
13733 void
13734 arm_asm_output_labelref (FILE *stream, const char *name)
13736 int skip;
13737 int verbatim = 0;
13739 while ((skip = arm_get_strip_length (* name)))
13741 verbatim |= (*name == '*');
13742 name += skip;
13745 if (verbatim)
13746 fputs (name, stream);
13747 else
13748 asm_fprintf (stream, "%U%s", name);
13751 rtx aof_pic_label;
13753 #ifdef AOF_ASSEMBLER
13754 /* Special functions only needed when producing AOF syntax assembler. */
13756 struct pic_chain
13758 struct pic_chain * next;
13759 const char * symname;
13762 static struct pic_chain * aof_pic_chain = NULL;
13765 aof_pic_entry (rtx x)
13767 struct pic_chain ** chainp;
13768 int offset;
13770 if (aof_pic_label == NULL_RTX)
13772 aof_pic_label = gen_rtx_SYMBOL_REF (Pmode, "x$adcons");
13775 for (offset = 0, chainp = &aof_pic_chain; *chainp;
13776 offset += 4, chainp = &(*chainp)->next)
13777 if ((*chainp)->symname == XSTR (x, 0))
13778 return plus_constant (aof_pic_label, offset);
13780 *chainp = (struct pic_chain *) xmalloc (sizeof (struct pic_chain));
13781 (*chainp)->next = NULL;
13782 (*chainp)->symname = XSTR (x, 0);
13783 return plus_constant (aof_pic_label, offset);
13786 void
13787 aof_dump_pic_table (FILE *f)
13789 struct pic_chain * chain;
13791 if (aof_pic_chain == NULL)
13792 return;
13794 asm_fprintf (f, "\tAREA |%r$$adcons|, BASED %r\n",
13795 PIC_OFFSET_TABLE_REGNUM,
13796 PIC_OFFSET_TABLE_REGNUM);
13797 fputs ("|x$adcons|\n", f);
13799 for (chain = aof_pic_chain; chain; chain = chain->next)
13801 fputs ("\tDCD\t", f);
13802 assemble_name (f, chain->symname);
13803 fputs ("\n", f);
13807 int arm_text_section_count = 1;
13809 char *
13810 aof_text_section (void )
13812 static char buf[100];
13813 sprintf (buf, "\tAREA |C$$code%d|, CODE, READONLY",
13814 arm_text_section_count++);
13815 if (flag_pic)
13816 strcat (buf, ", PIC, REENTRANT");
13817 return buf;
13820 static int arm_data_section_count = 1;
13822 char *
13823 aof_data_section (void)
13825 static char buf[100];
13826 sprintf (buf, "\tAREA |C$$data%d|, DATA", arm_data_section_count++);
13827 return buf;
13830 /* The AOF assembler is religiously strict about declarations of
13831 imported and exported symbols, so that it is impossible to declare
13832 a function as imported near the beginning of the file, and then to
13833 export it later on. It is, however, possible to delay the decision
13834 until all the functions in the file have been compiled. To get
13835 around this, we maintain a list of the imports and exports, and
13836 delete from it any that are subsequently defined. At the end of
13837 compilation we spit the remainder of the list out before the END
13838 directive. */
13840 struct import
13842 struct import * next;
13843 const char * name;
13846 static struct import * imports_list = NULL;
13848 void
13849 aof_add_import (const char *name)
13851 struct import * new;
13853 for (new = imports_list; new; new = new->next)
13854 if (new->name == name)
13855 return;
13857 new = (struct import *) xmalloc (sizeof (struct import));
13858 new->next = imports_list;
13859 imports_list = new;
13860 new->name = name;
13863 void
13864 aof_delete_import (const char *name)
13866 struct import ** old;
13868 for (old = &imports_list; *old; old = & (*old)->next)
13870 if ((*old)->name == name)
13872 *old = (*old)->next;
13873 return;
13878 int arm_main_function = 0;
13880 static void
13881 aof_dump_imports (FILE *f)
13883 /* The AOF assembler needs this to cause the startup code to be extracted
13884 from the library. Brining in __main causes the whole thing to work
13885 automagically. */
13886 if (arm_main_function)
13888 text_section ();
13889 fputs ("\tIMPORT __main\n", f);
13890 fputs ("\tDCD __main\n", f);
13893 /* Now dump the remaining imports. */
13894 while (imports_list)
13896 fprintf (f, "\tIMPORT\t");
13897 assemble_name (f, imports_list->name);
13898 fputc ('\n', f);
13899 imports_list = imports_list->next;
13903 static void
13904 aof_globalize_label (FILE *stream, const char *name)
13906 default_globalize_label (stream, name);
13907 if (! strcmp (name, "main"))
13908 arm_main_function = 1;
13911 static void
13912 aof_file_start (void)
13914 fputs ("__r0\tRN\t0\n", asm_out_file);
13915 fputs ("__a1\tRN\t0\n", asm_out_file);
13916 fputs ("__a2\tRN\t1\n", asm_out_file);
13917 fputs ("__a3\tRN\t2\n", asm_out_file);
13918 fputs ("__a4\tRN\t3\n", asm_out_file);
13919 fputs ("__v1\tRN\t4\n", asm_out_file);
13920 fputs ("__v2\tRN\t5\n", asm_out_file);
13921 fputs ("__v3\tRN\t6\n", asm_out_file);
13922 fputs ("__v4\tRN\t7\n", asm_out_file);
13923 fputs ("__v5\tRN\t8\n", asm_out_file);
13924 fputs ("__v6\tRN\t9\n", asm_out_file);
13925 fputs ("__sl\tRN\t10\n", asm_out_file);
13926 fputs ("__fp\tRN\t11\n", asm_out_file);
13927 fputs ("__ip\tRN\t12\n", asm_out_file);
13928 fputs ("__sp\tRN\t13\n", asm_out_file);
13929 fputs ("__lr\tRN\t14\n", asm_out_file);
13930 fputs ("__pc\tRN\t15\n", asm_out_file);
13931 fputs ("__f0\tFN\t0\n", asm_out_file);
13932 fputs ("__f1\tFN\t1\n", asm_out_file);
13933 fputs ("__f2\tFN\t2\n", asm_out_file);
13934 fputs ("__f3\tFN\t3\n", asm_out_file);
13935 fputs ("__f4\tFN\t4\n", asm_out_file);
13936 fputs ("__f5\tFN\t5\n", asm_out_file);
13937 fputs ("__f6\tFN\t6\n", asm_out_file);
13938 fputs ("__f7\tFN\t7\n", asm_out_file);
13939 text_section ();
13942 static void
13943 aof_file_end (void)
13945 if (flag_pic)
13946 aof_dump_pic_table (asm_out_file);
13947 aof_dump_imports (asm_out_file);
13948 fputs ("\tEND\n", asm_out_file);
13950 #endif /* AOF_ASSEMBLER */
13952 #ifdef OBJECT_FORMAT_ELF
13953 /* Switch to an arbitrary section NAME with attributes as specified
13954 by FLAGS. ALIGN specifies any known alignment requirements for
13955 the section; 0 if the default should be used.
13957 Differs from the default elf version only in the prefix character
13958 used before the section type. */
13960 static void
13961 arm_elf_asm_named_section (const char *name, unsigned int flags)
13963 char flagchars[10], *f = flagchars;
13965 if (! named_section_first_declaration (name))
13967 fprintf (asm_out_file, "\t.section\t%s\n", name);
13968 return;
13971 if (!(flags & SECTION_DEBUG))
13972 *f++ = 'a';
13973 if (flags & SECTION_WRITE)
13974 *f++ = 'w';
13975 if (flags & SECTION_CODE)
13976 *f++ = 'x';
13977 if (flags & SECTION_SMALL)
13978 *f++ = 's';
13979 if (flags & SECTION_MERGE)
13980 *f++ = 'M';
13981 if (flags & SECTION_STRINGS)
13982 *f++ = 'S';
13983 if (flags & SECTION_TLS)
13984 *f++ = 'T';
13985 *f = '\0';
13987 fprintf (asm_out_file, "\t.section\t%s,\"%s\"", name, flagchars);
13989 if (!(flags & SECTION_NOTYPE))
13991 const char *type;
13993 if (flags & SECTION_BSS)
13994 type = "nobits";
13995 else
13996 type = "progbits";
13998 fprintf (asm_out_file, ",%%%s", type);
14000 if (flags & SECTION_ENTSIZE)
14001 fprintf (asm_out_file, ",%d", flags & SECTION_ENTSIZE);
14004 putc ('\n', asm_out_file);
14006 #endif
14008 #ifndef ARM_PE
14009 /* Symbols in the text segment can be accessed without indirecting via the
14010 constant pool; it may take an extra binary operation, but this is still
14011 faster than indirecting via memory. Don't do this when not optimizing,
14012 since we won't be calculating al of the offsets necessary to do this
14013 simplification. */
14015 static void
14016 arm_encode_section_info (tree decl, rtx rtl, int first)
14018 /* This doesn't work with AOF syntax, since the string table may be in
14019 a different AREA. */
14020 #ifndef AOF_ASSEMBLER
14021 if (optimize > 0 && TREE_CONSTANT (decl))
14022 SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
14023 #endif
14025 /* If we are referencing a function that is weak then encode a long call
14026 flag in the function name, otherwise if the function is static or
14027 or known to be defined in this file then encode a short call flag. */
14028 if (first && TREE_CODE_CLASS (TREE_CODE (decl)) == 'd')
14030 if (TREE_CODE (decl) == FUNCTION_DECL && DECL_WEAK (decl))
14031 arm_encode_call_attribute (decl, LONG_CALL_FLAG_CHAR);
14032 else if (! TREE_PUBLIC (decl))
14033 arm_encode_call_attribute (decl, SHORT_CALL_FLAG_CHAR);
14036 #endif /* !ARM_PE */
14038 static void
14039 arm_internal_label (FILE *stream, const char *prefix, unsigned long labelno)
14041 if (arm_ccfsm_state == 3 && (unsigned) arm_target_label == labelno
14042 && !strcmp (prefix, "L"))
14044 arm_ccfsm_state = 0;
14045 arm_target_insn = NULL;
14047 default_internal_label (stream, prefix, labelno);
14050 /* Output code to add DELTA to the first argument, and then jump
14051 to FUNCTION. Used for C++ multiple inheritance. */
14052 static void
14053 arm_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
14054 HOST_WIDE_INT delta,
14055 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
14056 tree function)
14058 static int thunk_label = 0;
14059 char label[256];
14060 int mi_delta = delta;
14061 const char *const mi_op = mi_delta < 0 ? "sub" : "add";
14062 int shift = 0;
14063 int this_regno = (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function)
14064 ? 1 : 0);
14065 if (mi_delta < 0)
14066 mi_delta = - mi_delta;
14067 if (TARGET_THUMB)
14069 int labelno = thunk_label++;
14070 ASM_GENERATE_INTERNAL_LABEL (label, "LTHUMBFUNC", labelno);
14071 fputs ("\tldr\tr12, ", file);
14072 assemble_name (file, label);
14073 fputc ('\n', file);
14075 while (mi_delta != 0)
14077 if ((mi_delta & (3 << shift)) == 0)
14078 shift += 2;
14079 else
14081 asm_fprintf (file, "\t%s\t%r, %r, #%d\n",
14082 mi_op, this_regno, this_regno,
14083 mi_delta & (0xff << shift));
14084 mi_delta &= ~(0xff << shift);
14085 shift += 8;
14088 if (TARGET_THUMB)
14090 fprintf (file, "\tbx\tr12\n");
14091 ASM_OUTPUT_ALIGN (file, 2);
14092 assemble_name (file, label);
14093 fputs (":\n", file);
14094 assemble_integer (XEXP (DECL_RTL (function), 0), 4, BITS_PER_WORD, 1);
14096 else
14098 fputs ("\tb\t", file);
14099 assemble_name (file, XSTR (XEXP (DECL_RTL (function), 0), 0));
14100 if (NEED_PLT_RELOC)
14101 fputs ("(PLT)", file);
14102 fputc ('\n', file);
14107 arm_emit_vector_const (FILE *file, rtx x)
14109 int i;
14110 const char * pattern;
14112 if (GET_CODE (x) != CONST_VECTOR)
14113 abort ();
14115 switch (GET_MODE (x))
14117 case V2SImode: pattern = "%08x"; break;
14118 case V4HImode: pattern = "%04x"; break;
14119 case V8QImode: pattern = "%02x"; break;
14120 default: abort ();
14123 fprintf (file, "0x");
14124 for (i = CONST_VECTOR_NUNITS (x); i--;)
14126 rtx element;
14128 element = CONST_VECTOR_ELT (x, i);
14129 fprintf (file, pattern, INTVAL (element));
14132 return 1;
14135 const char *
14136 arm_output_load_gr (rtx *operands)
14138 rtx reg;
14139 rtx offset;
14140 rtx wcgr;
14141 rtx sum;
14143 if (GET_CODE (operands [1]) != MEM
14144 || GET_CODE (sum = XEXP (operands [1], 0)) != PLUS
14145 || GET_CODE (reg = XEXP (sum, 0)) != REG
14146 || GET_CODE (offset = XEXP (sum, 1)) != CONST_INT
14147 || ((INTVAL (offset) < 1024) && (INTVAL (offset) > -1024)))
14148 return "wldrw%?\t%0, %1";
14150 /* Fix up an out-of-range load of a GR register. */
14151 output_asm_insn ("str%?\t%0, [sp, #-4]!\t@ Start of GR load expansion", & reg);
14152 wcgr = operands[0];
14153 operands[0] = reg;
14154 output_asm_insn ("ldr%?\t%0, %1", operands);
14156 operands[0] = wcgr;
14157 operands[1] = reg;
14158 output_asm_insn ("tmcr%?\t%0, %1", operands);
14159 output_asm_insn ("ldr%?\t%0, [sp], #4\t@ End of GR load expansion", & reg);
14161 return "";
14164 static rtx
14165 arm_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
14166 int incoming ATTRIBUTE_UNUSED)
14168 #if 0
14169 /* FIXME: The ARM backend has special code to handle structure
14170 returns, and will reserve its own hidden first argument. So
14171 if this macro is enabled a *second* hidden argument will be
14172 reserved, which will break binary compatibility with old
14173 toolchains and also thunk handling. One day this should be
14174 fixed. */
14175 return 0;
14176 #else
14177 /* Register in which address to store a structure value
14178 is passed to a function. */
14179 return gen_rtx_REG (Pmode, ARG_REGISTER (1));
14180 #endif
14183 /* Worker function for TARGET_SETUP_INCOMING_VARARGS.
14185 On the ARM, PRETEND_SIZE is set in order to have the prologue push the last
14186 named arg and all anonymous args onto the stack.
14187 XXX I know the prologue shouldn't be pushing registers, but it is faster
14188 that way. */
14190 static void
14191 arm_setup_incoming_varargs (CUMULATIVE_ARGS *cum,
14192 enum machine_mode mode ATTRIBUTE_UNUSED,
14193 tree type ATTRIBUTE_UNUSED,
14194 int *pretend_size,
14195 int second_time ATTRIBUTE_UNUSED)
14197 cfun->machine->uses_anonymous_args = 1;
14198 if (cum->nregs < NUM_ARG_REGS)
14199 *pretend_size = (NUM_ARG_REGS - cum->nregs) * UNITS_PER_WORD;
14202 /* Return nonzero if the CONSUMER instruction (a store) does not need
14203 PRODUCER's value to calculate the address. */
14206 arm_no_early_store_addr_dep (rtx producer, rtx consumer)
14208 rtx value = PATTERN (producer);
14209 rtx addr = PATTERN (consumer);
14211 if (GET_CODE (value) == COND_EXEC)
14212 value = COND_EXEC_CODE (value);
14213 if (GET_CODE (value) == PARALLEL)
14214 value = XVECEXP (value, 0, 0);
14215 value = XEXP (value, 0);
14216 if (GET_CODE (addr) == COND_EXEC)
14217 addr = COND_EXEC_CODE (addr);
14218 if (GET_CODE (addr) == PARALLEL)
14219 addr = XVECEXP (addr, 0, 0);
14220 addr = XEXP (addr, 0);
14222 return !reg_overlap_mentioned_p (value, addr);
14225 /* Return nonzero if the CONSUMER instruction (an ALU op) does not
14226 have an early register shift value or amount dependency on the
14227 result of PRODUCER. */
14230 arm_no_early_alu_shift_dep (rtx producer, rtx consumer)
14232 rtx value = PATTERN (producer);
14233 rtx op = PATTERN (consumer);
14234 rtx early_op;
14236 if (GET_CODE (value) == COND_EXEC)
14237 value = COND_EXEC_CODE (value);
14238 if (GET_CODE (value) == PARALLEL)
14239 value = XVECEXP (value, 0, 0);
14240 value = XEXP (value, 0);
14241 if (GET_CODE (op) == COND_EXEC)
14242 op = COND_EXEC_CODE (op);
14243 if (GET_CODE (op) == PARALLEL)
14244 op = XVECEXP (op, 0, 0);
14245 op = XEXP (op, 1);
14247 early_op = XEXP (op, 0);
14248 /* This is either an actual independent shift, or a shift applied to
14249 the first operand of another operation. We want the whole shift
14250 operation. */
14251 if (GET_CODE (early_op) == REG)
14252 early_op = op;
14254 return !reg_overlap_mentioned_p (value, early_op);
14257 /* Return nonzero if the CONSUMER instruction (an ALU op) does not
14258 have an early register shift value dependency on the result of
14259 PRODUCER. */
14262 arm_no_early_alu_shift_value_dep (rtx producer, rtx consumer)
14264 rtx value = PATTERN (producer);
14265 rtx op = PATTERN (consumer);
14266 rtx early_op;
14268 if (GET_CODE (value) == COND_EXEC)
14269 value = COND_EXEC_CODE (value);
14270 if (GET_CODE (value) == PARALLEL)
14271 value = XVECEXP (value, 0, 0);
14272 value = XEXP (value, 0);
14273 if (GET_CODE (op) == COND_EXEC)
14274 op = COND_EXEC_CODE (op);
14275 if (GET_CODE (op) == PARALLEL)
14276 op = XVECEXP (op, 0, 0);
14277 op = XEXP (op, 1);
14279 early_op = XEXP (op, 0);
14281 /* This is either an actual independent shift, or a shift applied to
14282 the first operand of another operation. We want the value being
14283 shifted, in either case. */
14284 if (GET_CODE (early_op) != REG)
14285 early_op = XEXP (early_op, 0);
14287 return !reg_overlap_mentioned_p (value, early_op);
14290 /* Return nonzero if the CONSUMER (a mul or mac op) does not
14291 have an early register mult dependency on the result of
14292 PRODUCER. */
14295 arm_no_early_mul_dep (rtx producer, rtx consumer)
14297 rtx value = PATTERN (producer);
14298 rtx op = PATTERN (consumer);
14300 if (GET_CODE (value) == COND_EXEC)
14301 value = COND_EXEC_CODE (value);
14302 if (GET_CODE (value) == PARALLEL)
14303 value = XVECEXP (value, 0, 0);
14304 value = XEXP (value, 0);
14305 if (GET_CODE (op) == COND_EXEC)
14306 op = COND_EXEC_CODE (op);
14307 if (GET_CODE (op) == PARALLEL)
14308 op = XVECEXP (op, 0, 0);
14309 op = XEXP (op, 1);
14311 return (GET_CODE (op) == PLUS
14312 && !reg_overlap_mentioned_p (value, XEXP (op, 0)));