* arm.c (arm_legitimate_address_p): New argument, OUTER. Pass through
[official-gcc.git] / gcc / config / arm / arm.c
blob4cada0c10ebcdace052902cb2497b37161c1b38c
1 /* Output routines for GCC for ARM.
2 Copyright (C) 1991, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004 Free Software Foundation, Inc.
4 Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
5 and Martin Simmons (@harleqn.co.uk).
6 More major hacks by Richard Earnshaw (rearnsha@arm.com).
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it
11 under the terms of the GNU General Public License as published
12 by the Free Software Foundation; either version 2, or (at your
13 option) any later version.
15 GCC is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
17 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
18 License for more details.
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING. If not, write to
22 the Free Software Foundation, 59 Temple Place - Suite 330,
23 Boston, MA 02111-1307, USA. */
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "tm.h"
29 #include "rtl.h"
30 #include "tree.h"
31 #include "obstack.h"
32 #include "regs.h"
33 #include "hard-reg-set.h"
34 #include "real.h"
35 #include "insn-config.h"
36 #include "conditions.h"
37 #include "output.h"
38 #include "insn-attr.h"
39 #include "flags.h"
40 #include "reload.h"
41 #include "function.h"
42 #include "expr.h"
43 #include "optabs.h"
44 #include "toplev.h"
45 #include "recog.h"
46 #include "ggc.h"
47 #include "except.h"
48 #include "c-pragma.h"
49 #include "integrate.h"
50 #include "tm_p.h"
51 #include "target.h"
52 #include "target-def.h"
53 #include "debug.h"
55 /* Forward definitions of types. */
56 typedef struct minipool_node Mnode;
57 typedef struct minipool_fixup Mfix;
59 const struct attribute_spec arm_attribute_table[];
61 /* Forward function declarations. */
62 static void arm_add_gc_roots (void);
63 static int arm_gen_constant (enum rtx_code, enum machine_mode, HOST_WIDE_INT,
64 rtx, rtx, int, int);
65 static unsigned bit_count (unsigned long);
66 static int arm_address_register_rtx_p (rtx, int);
67 static int arm_legitimate_index_p (enum machine_mode, rtx, RTX_CODE, int);
68 static int thumb_base_register_rtx_p (rtx, enum machine_mode, int);
69 inline static int thumb_index_register_rtx_p (rtx, int);
70 static int const_ok_for_op (HOST_WIDE_INT, enum rtx_code);
71 static rtx emit_multi_reg_push (int);
72 static rtx emit_sfm (int, int);
73 #ifndef AOF_ASSEMBLER
74 static bool arm_assemble_integer (rtx, unsigned int, int);
75 #endif
76 static const char *fp_const_from_val (REAL_VALUE_TYPE *);
77 static arm_cc get_arm_condition_code (rtx);
78 static HOST_WIDE_INT int_log2 (HOST_WIDE_INT);
79 static rtx is_jump_table (rtx);
80 static const char *output_multi_immediate (rtx *, const char *, const char *,
81 int, HOST_WIDE_INT);
82 static void print_multi_reg (FILE *, const char *, int, int);
83 static const char *shift_op (rtx, HOST_WIDE_INT *);
84 static struct machine_function *arm_init_machine_status (void);
85 static int number_of_first_bit_set (int);
86 static void replace_symbols_in_block (tree, rtx, rtx);
87 static void thumb_exit (FILE *, int, rtx);
88 static void thumb_pushpop (FILE *, int, int, int *, int);
89 static rtx is_jump_table (rtx);
90 static HOST_WIDE_INT get_jump_table_size (rtx);
91 static Mnode *move_minipool_fix_forward_ref (Mnode *, Mnode *, HOST_WIDE_INT);
92 static Mnode *add_minipool_forward_ref (Mfix *);
93 static Mnode *move_minipool_fix_backward_ref (Mnode *, Mnode *, HOST_WIDE_INT);
94 static Mnode *add_minipool_backward_ref (Mfix *);
95 static void assign_minipool_offsets (Mfix *);
96 static void arm_print_value (FILE *, rtx);
97 static void dump_minipool (rtx);
98 static int arm_barrier_cost (rtx);
99 static Mfix *create_fix_barrier (Mfix *, HOST_WIDE_INT);
100 static void push_minipool_barrier (rtx, HOST_WIDE_INT);
101 static void push_minipool_fix (rtx, HOST_WIDE_INT, rtx *, enum machine_mode,
102 rtx);
103 static void arm_reorg (void);
104 static bool note_invalid_constants (rtx, HOST_WIDE_INT, int);
105 static int current_file_function_operand (rtx);
106 static unsigned long arm_compute_save_reg0_reg12_mask (void);
107 static unsigned long arm_compute_save_reg_mask (void);
108 static unsigned long arm_isr_value (tree);
109 static unsigned long arm_compute_func_type (void);
110 static tree arm_handle_fndecl_attribute (tree *, tree, tree, int, bool *);
111 static tree arm_handle_isr_attribute (tree *, tree, tree, int, bool *);
112 static void arm_output_function_epilogue (FILE *, HOST_WIDE_INT);
113 static void arm_output_function_prologue (FILE *, HOST_WIDE_INT);
114 static void thumb_output_function_prologue (FILE *, HOST_WIDE_INT);
115 static int arm_comp_type_attributes (tree, tree);
116 static void arm_set_default_type_attributes (tree);
117 static int arm_adjust_cost (rtx, rtx, rtx, int);
118 static int arm_use_dfa_pipeline_interface (void);
119 static int count_insns_for_constant (HOST_WIDE_INT, int);
120 static int arm_get_strip_length (int);
121 static bool arm_function_ok_for_sibcall (tree, tree);
122 static void arm_internal_label (FILE *, const char *, unsigned long);
123 static void arm_output_mi_thunk (FILE *, tree, HOST_WIDE_INT, HOST_WIDE_INT,
124 tree);
125 static int arm_rtx_costs_1 (rtx, enum rtx_code, enum rtx_code);
126 static bool arm_slowmul_rtx_costs (rtx, int, int, int *);
127 static bool arm_fastmul_rtx_costs (rtx, int, int, int *);
128 static bool arm_xscale_rtx_costs (rtx, int, int, int *);
129 static bool arm_9e_rtx_costs (rtx, int, int, int *);
130 static int arm_address_cost (rtx);
131 static bool arm_memory_load_p (rtx);
132 static bool arm_cirrus_insn_p (rtx);
133 static void cirrus_reorg (rtx);
134 static void arm_init_builtins (void);
135 static rtx arm_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
136 static void arm_init_iwmmxt_builtins (void);
137 static rtx safe_vector_operand (rtx, enum machine_mode);
138 static rtx arm_expand_binop_builtin (enum insn_code, tree, rtx);
139 static rtx arm_expand_unop_builtin (enum insn_code, tree, rtx, int);
140 static rtx arm_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
142 #ifdef OBJECT_FORMAT_ELF
143 static void arm_elf_asm_named_section (const char *, unsigned int);
144 #endif
145 #ifndef ARM_PE
146 static void arm_encode_section_info (tree, rtx, int);
147 #endif
148 #ifdef AOF_ASSEMBLER
149 static void aof_globalize_label (FILE *, const char *);
150 static void aof_dump_imports (FILE *);
151 static void aof_dump_pic_table (FILE *);
152 static void aof_file_start (void);
153 static void aof_file_end (void);
154 #endif
155 static rtx arm_struct_value_rtx (tree, int);
156 static void arm_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
157 tree, int *, int);
160 /* Initialize the GCC target structure. */
161 #ifdef TARGET_DLLIMPORT_DECL_ATTRIBUTES
162 #undef TARGET_MERGE_DECL_ATTRIBUTES
163 #define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
164 #endif
166 #undef TARGET_ATTRIBUTE_TABLE
167 #define TARGET_ATTRIBUTE_TABLE arm_attribute_table
169 #ifdef AOF_ASSEMBLER
170 #undef TARGET_ASM_BYTE_OP
171 #define TARGET_ASM_BYTE_OP "\tDCB\t"
172 #undef TARGET_ASM_ALIGNED_HI_OP
173 #define TARGET_ASM_ALIGNED_HI_OP "\tDCW\t"
174 #undef TARGET_ASM_ALIGNED_SI_OP
175 #define TARGET_ASM_ALIGNED_SI_OP "\tDCD\t"
176 #undef TARGET_ASM_GLOBALIZE_LABEL
177 #define TARGET_ASM_GLOBALIZE_LABEL aof_globalize_label
178 #undef TARGET_ASM_FILE_START
179 #define TARGET_ASM_FILE_START aof_file_start
180 #undef TARGET_ASM_FILE_END
181 #define TARGET_ASM_FILE_END aof_file_end
182 #else
183 #undef TARGET_ASM_ALIGNED_SI_OP
184 #define TARGET_ASM_ALIGNED_SI_OP NULL
185 #undef TARGET_ASM_INTEGER
186 #define TARGET_ASM_INTEGER arm_assemble_integer
187 #endif
189 #undef TARGET_ASM_FUNCTION_PROLOGUE
190 #define TARGET_ASM_FUNCTION_PROLOGUE arm_output_function_prologue
192 #undef TARGET_ASM_FUNCTION_EPILOGUE
193 #define TARGET_ASM_FUNCTION_EPILOGUE arm_output_function_epilogue
195 #undef TARGET_COMP_TYPE_ATTRIBUTES
196 #define TARGET_COMP_TYPE_ATTRIBUTES arm_comp_type_attributes
198 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
199 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES arm_set_default_type_attributes
201 #undef TARGET_SCHED_ADJUST_COST
202 #define TARGET_SCHED_ADJUST_COST arm_adjust_cost
204 #undef TARGET_SCHED_USE_DFA_PIPELINE_INTERFACE
205 #define TARGET_SCHED_USE_DFA_PIPELINE_INTERFACE arm_use_dfa_pipeline_interface
207 #undef TARGET_ENCODE_SECTION_INFO
208 #ifdef ARM_PE
209 #define TARGET_ENCODE_SECTION_INFO arm_pe_encode_section_info
210 #else
211 #define TARGET_ENCODE_SECTION_INFO arm_encode_section_info
212 #endif
214 #undef TARGET_STRIP_NAME_ENCODING
215 #define TARGET_STRIP_NAME_ENCODING arm_strip_name_encoding
217 #undef TARGET_ASM_INTERNAL_LABEL
218 #define TARGET_ASM_INTERNAL_LABEL arm_internal_label
220 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
221 #define TARGET_FUNCTION_OK_FOR_SIBCALL arm_function_ok_for_sibcall
223 #undef TARGET_ASM_OUTPUT_MI_THUNK
224 #define TARGET_ASM_OUTPUT_MI_THUNK arm_output_mi_thunk
225 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
226 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
228 /* This will be overridden in arm_override_options. */
229 #undef TARGET_RTX_COSTS
230 #define TARGET_RTX_COSTS arm_slowmul_rtx_costs
231 #undef TARGET_ADDRESS_COST
232 #define TARGET_ADDRESS_COST arm_address_cost
234 #undef TARGET_MACHINE_DEPENDENT_REORG
235 #define TARGET_MACHINE_DEPENDENT_REORG arm_reorg
237 #undef TARGET_INIT_BUILTINS
238 #define TARGET_INIT_BUILTINS arm_init_builtins
239 #undef TARGET_EXPAND_BUILTIN
240 #define TARGET_EXPAND_BUILTIN arm_expand_builtin
242 #undef TARGET_PROMOTE_FUNCTION_ARGS
243 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
244 #undef TARGET_PROMOTE_PROTOTYPES
245 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
247 #undef TARGET_STRUCT_VALUE_RTX
248 #define TARGET_STRUCT_VALUE_RTX arm_struct_value_rtx
250 #undef TARGET_SETUP_INCOMING_VARARGS
251 #define TARGET_SETUP_INCOMING_VARARGS arm_setup_incoming_varargs
253 struct gcc_target targetm = TARGET_INITIALIZER;
255 /* Obstack for minipool constant handling. */
256 static struct obstack minipool_obstack;
257 static char * minipool_startobj;
259 /* The maximum number of insns skipped which
260 will be conditionalised if possible. */
261 static int max_insns_skipped = 5;
263 extern FILE * asm_out_file;
265 /* True if we are currently building a constant table. */
266 int making_const_table;
268 /* Define the information needed to generate branch insns. This is
269 stored from the compare operation. */
270 rtx arm_compare_op0, arm_compare_op1;
272 /* The processor for which instructions should be scheduled. */
273 enum processor_type arm_tune = arm_none;
275 /* Which floating point model to use. */
276 enum arm_fp_model arm_fp_model;
278 /* Which floating point hardware is available. */
279 enum fputype arm_fpu_arch;
281 /* Which floating point hardware to schedule for. */
282 enum fputype arm_fpu_tune;
284 /* Whether to use floating point hardware. */
285 enum float_abi_type arm_float_abi;
287 /* What program mode is the cpu running in? 26-bit mode or 32-bit mode. */
288 enum prog_mode_type arm_prgmode;
290 /* Set by the -mfpu=... option. */
291 const char * target_fpu_name = NULL;
293 /* Set by the -mfpe=... option. */
294 const char * target_fpe_name = NULL;
296 /* Set by the -mfloat-abi=... option. */
297 const char * target_float_abi_name = NULL;
299 /* Used to parse -mstructure_size_boundary command line option. */
300 const char * structure_size_string = NULL;
301 int arm_structure_size_boundary = DEFAULT_STRUCTURE_SIZE_BOUNDARY;
303 /* Bit values used to identify processor capabilities. */
304 #define FL_CO_PROC (1 << 0) /* Has external co-processor bus */
305 #define FL_ARCH3M (1 << 1) /* Extended multiply */
306 #define FL_MODE26 (1 << 2) /* 26-bit mode support */
307 #define FL_MODE32 (1 << 3) /* 32-bit mode support */
308 #define FL_ARCH4 (1 << 4) /* Architecture rel 4 */
309 #define FL_ARCH5 (1 << 5) /* Architecture rel 5 */
310 #define FL_THUMB (1 << 6) /* Thumb aware */
311 #define FL_LDSCHED (1 << 7) /* Load scheduling necessary */
312 #define FL_STRONG (1 << 8) /* StrongARM */
313 #define FL_ARCH5E (1 << 9) /* DSP extensions to v5 */
314 #define FL_XSCALE (1 << 10) /* XScale */
315 #define FL_CIRRUS (1 << 11) /* Cirrus/DSP. */
316 #define FL_ARCH6 (1 << 12) /* Architecture rel 6. Adds
317 media instructions. */
318 #define FL_VFPV2 (1 << 13) /* Vector Floating Point V2. */
320 #define FL_IWMMXT (1 << 29) /* XScale v2 or "Intel Wireless MMX technology". */
322 /* The bits in this mask specify which
323 instructions we are allowed to generate. */
324 static unsigned long insn_flags = 0;
326 /* The bits in this mask specify which instruction scheduling options should
327 be used. */
328 static unsigned long tune_flags = 0;
330 /* The following are used in the arm.md file as equivalents to bits
331 in the above two flag variables. */
333 /* Nonzero if this chip supports the ARM Architecture 3M extensions. */
334 int arm_arch3m = 0;
336 /* Nonzero if this chip supports the ARM Architecture 4 extensions. */
337 int arm_arch4 = 0;
339 /* Nonzero if this chip supports the ARM Architecture 5 extensions. */
340 int arm_arch5 = 0;
342 /* Nonzero if this chip supports the ARM Architecture 5E extensions. */
343 int arm_arch5e = 0;
345 /* Nonzero if this chip supports the ARM Architecture 6 extensions. */
346 int arm_arch6 = 0;
348 /* Nonzero if this chip can benefit from load scheduling. */
349 int arm_ld_sched = 0;
351 /* Nonzero if this chip is a StrongARM. */
352 int arm_is_strong = 0;
354 /* Nonzero if this chip supports Intel Wireless MMX technology. */
355 int arm_arch_iwmmxt = 0;
357 /* Nonzero if this chip is an XScale. */
358 int arm_arch_xscale = 0;
360 /* Nonzero if tuning for XScale */
361 int arm_tune_xscale = 0;
363 /* Nonzero if this chip is an ARM6 or an ARM7. */
364 int arm_is_6_or_7 = 0;
366 /* Nonzero if generating Thumb instructions. */
367 int thumb_code = 0;
369 /* In case of a PRE_INC, POST_INC, PRE_DEC, POST_DEC memory reference, we
370 must report the mode of the memory reference from PRINT_OPERAND to
371 PRINT_OPERAND_ADDRESS. */
372 enum machine_mode output_memory_reference_mode;
374 /* The register number to be used for the PIC offset register. */
375 const char * arm_pic_register_string = NULL;
376 int arm_pic_register = INVALID_REGNUM;
378 /* Set to 1 when a return insn is output, this means that the epilogue
379 is not needed. */
380 int return_used_this_function;
382 /* Set to 1 after arm_reorg has started. Reset to start at the start of
383 the next function. */
384 static int after_arm_reorg = 0;
386 /* The maximum number of insns to be used when loading a constant. */
387 static int arm_constant_limit = 3;
389 /* For an explanation of these variables, see final_prescan_insn below. */
390 int arm_ccfsm_state;
391 enum arm_cond_code arm_current_cc;
392 rtx arm_target_insn;
393 int arm_target_label;
395 /* The condition codes of the ARM, and the inverse function. */
396 static const char * const arm_condition_codes[] =
398 "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc",
399 "hi", "ls", "ge", "lt", "gt", "le", "al", "nv"
402 #define streq(string1, string2) (strcmp (string1, string2) == 0)
404 /* Initialization code. */
406 struct processors
408 const char *const name;
409 enum processor_type core;
410 const unsigned long flags;
411 bool (* rtx_costs) (rtx, int, int, int *);
414 /* Not all of these give usefully different compilation alternatives,
415 but there is no simple way of generalizing them. */
416 static const struct processors all_cores[] =
418 /* ARM Cores */
419 #define ARM_CORE(NAME, FLAGS, COSTS) \
420 {#NAME, arm_none, FLAGS, arm_##COSTS##_rtx_costs},
421 #include "arm-cores.def"
422 #undef ARM_CORE
423 {NULL, arm_none, 0, NULL}
426 static const struct processors all_architectures[] =
428 /* ARM Architectures */
429 /* We don't specify rtx_costs here as it will be figured out
430 from the core. */
432 { "armv2", arm2, FL_CO_PROC | FL_MODE26 , NULL},
433 { "armv2a", arm2, FL_CO_PROC | FL_MODE26 , NULL},
434 { "armv3", arm6, FL_CO_PROC | FL_MODE26 | FL_MODE32 , NULL},
435 { "armv3m", arm7m, FL_CO_PROC | FL_MODE26 | FL_MODE32 | FL_ARCH3M , NULL},
436 { "armv4", arm7tdmi, FL_CO_PROC | FL_MODE26 | FL_MODE32 | FL_ARCH3M | FL_ARCH4 , NULL},
437 /* Strictly, FL_MODE26 is a permitted option for v4t, but there are no
438 implementations that support it, so we will leave it out for now. */
439 { "armv4t", arm7tdmi, FL_CO_PROC | FL_MODE32 | FL_ARCH3M | FL_ARCH4 | FL_THUMB , NULL},
440 { "armv5", arm10tdmi, FL_CO_PROC | FL_MODE32 | FL_ARCH3M | FL_ARCH4 | FL_THUMB | FL_ARCH5 , NULL},
441 { "armv5t", arm10tdmi, FL_CO_PROC | FL_MODE32 | FL_ARCH3M | FL_ARCH4 | FL_THUMB | FL_ARCH5 , NULL},
442 { "armv5te", arm1026ejs, FL_CO_PROC | FL_MODE32 | FL_ARCH3M | FL_ARCH4 | FL_THUMB | FL_ARCH5 | FL_ARCH5E , NULL},
443 { "armv6", arm1136js, FL_CO_PROC | FL_MODE32 | FL_ARCH3M | FL_ARCH4 | FL_THUMB | FL_ARCH5 | FL_ARCH5E | FL_ARCH6 , NULL},
444 { "armv6j", arm1136js, FL_CO_PROC | FL_MODE32 | FL_ARCH3M | FL_ARCH4 | FL_THUMB | FL_ARCH5 | FL_ARCH5E | FL_ARCH6 , NULL},
445 { "ep9312", ep9312, FL_MODE32 | FL_ARCH3M | FL_ARCH4 | FL_LDSCHED | FL_CIRRUS , NULL},
446 {"iwmmxt", iwmmxt, FL_MODE32 | FL_ARCH3M | FL_ARCH4 | FL_THUMB | FL_LDSCHED | FL_STRONG | FL_ARCH5 | FL_ARCH5E | FL_XSCALE | FL_IWMMXT , NULL},
447 { NULL, arm_none, 0 , NULL}
450 /* This is a magic structure. The 'string' field is magically filled in
451 with a pointer to the value specified by the user on the command line
452 assuming that the user has specified such a value. */
454 struct arm_cpu_select arm_select[] =
456 /* string name processors */
457 { NULL, "-mcpu=", all_cores },
458 { NULL, "-march=", all_architectures },
459 { NULL, "-mtune=", all_cores }
462 struct fpu_desc
464 const char * name;
465 enum fputype fpu;
469 /* Available values for for -mfpu=. */
471 static const struct fpu_desc all_fpus[] =
473 {"fpa", FPUTYPE_FPA},
474 {"fpe2", FPUTYPE_FPA_EMU2},
475 {"fpe3", FPUTYPE_FPA_EMU2},
476 {"maverick", FPUTYPE_MAVERICK},
477 {"vfp", FPUTYPE_VFP}
481 /* Floating point models used by the different hardware.
482 See fputype in arm.h. */
484 static const enum fputype fp_model_for_fpu[] =
486 /* No FP hardware. */
487 ARM_FP_MODEL_UNKNOWN, /* FPUTYPE_NONE */
488 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA */
489 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA_EMU2 */
490 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA_EMU3 */
491 ARM_FP_MODEL_MAVERICK, /* FPUTYPE_MAVERICK */
492 ARM_FP_MODEL_VFP /* FPUTYPE_VFP */
496 struct float_abi
498 const char * name;
499 enum float_abi_type abi_type;
503 /* Available values for -mfloat-abi=. */
505 static const struct float_abi all_float_abis[] =
507 {"soft", ARM_FLOAT_ABI_SOFT},
508 {"softfp", ARM_FLOAT_ABI_SOFTFP},
509 {"hard", ARM_FLOAT_ABI_HARD}
513 /* Return the number of bits set in VALUE. */
514 static unsigned
515 bit_count (unsigned long value)
517 unsigned long count = 0;
519 while (value)
521 count++;
522 value &= value - 1; /* Clear the least-significant set bit. */
525 return count;
528 /* Fix up any incompatible options that the user has specified.
529 This has now turned into a maze. */
530 void
531 arm_override_options (void)
533 unsigned i;
535 /* Set up the flags based on the cpu/architecture selected by the user. */
536 for (i = ARRAY_SIZE (arm_select); i--;)
538 struct arm_cpu_select * ptr = arm_select + i;
540 if (ptr->string != NULL && ptr->string[0] != '\0')
542 const struct processors * sel;
544 for (sel = ptr->processors; sel->name != NULL; sel++)
545 if (streq (ptr->string, sel->name))
547 /* Determine the processor core for which we should
548 tune code-generation. */
549 if (/* -mcpu= is a sensible default. */
550 i == 0
551 /* If -march= is used, and -mcpu= has not been used,
552 assume that we should tune for a representative
553 CPU from that architecture. */
554 || i == 1
555 /* -mtune= overrides -mcpu= and -march=. */
556 || i == 2)
557 arm_tune = (enum processor_type) (sel - ptr->processors);
559 if (i != 2)
561 /* If we have been given an architecture and a processor
562 make sure that they are compatible. We only generate
563 a warning though, and we prefer the CPU over the
564 architecture. */
565 if (insn_flags != 0 && (insn_flags ^ sel->flags))
566 warning ("switch -mcpu=%s conflicts with -march= switch",
567 ptr->string);
569 insn_flags = sel->flags;
572 break;
575 if (sel->name == NULL)
576 error ("bad value (%s) for %s switch", ptr->string, ptr->name);
580 /* If the user did not specify a processor, choose one for them. */
581 if (insn_flags == 0)
583 const struct processors * sel;
584 unsigned int sought;
585 static const struct cpu_default
587 const int cpu;
588 const char *const name;
590 cpu_defaults[] =
592 { TARGET_CPU_arm2, "arm2" },
593 { TARGET_CPU_arm6, "arm6" },
594 { TARGET_CPU_arm610, "arm610" },
595 { TARGET_CPU_arm710, "arm710" },
596 { TARGET_CPU_arm7m, "arm7m" },
597 { TARGET_CPU_arm7500fe, "arm7500fe" },
598 { TARGET_CPU_arm7tdmi, "arm7tdmi" },
599 { TARGET_CPU_arm8, "arm8" },
600 { TARGET_CPU_arm810, "arm810" },
601 { TARGET_CPU_arm9, "arm9" },
602 { TARGET_CPU_strongarm, "strongarm" },
603 { TARGET_CPU_xscale, "xscale" },
604 { TARGET_CPU_ep9312, "ep9312" },
605 { TARGET_CPU_iwmmxt, "iwmmxt" },
606 { TARGET_CPU_arm926ejs, "arm926ejs" },
607 { TARGET_CPU_arm1026ejs, "arm1026ejs" },
608 { TARGET_CPU_arm1136js, "arm1136js" },
609 { TARGET_CPU_arm1136jfs, "arm1136jfs" },
610 { TARGET_CPU_generic, "arm" },
611 { 0, 0 }
613 const struct cpu_default * def;
615 /* Find the default. */
616 for (def = cpu_defaults; def->name; def++)
617 if (def->cpu == TARGET_CPU_DEFAULT)
618 break;
620 /* Make sure we found the default CPU. */
621 if (def->name == NULL)
622 abort ();
624 /* Find the default CPU's flags. */
625 for (sel = all_cores; sel->name != NULL; sel++)
626 if (streq (def->name, sel->name))
627 break;
629 if (sel->name == NULL)
630 abort ();
632 insn_flags = sel->flags;
634 /* Now check to see if the user has specified some command line
635 switch that require certain abilities from the cpu. */
636 sought = 0;
638 if (TARGET_INTERWORK || TARGET_THUMB)
640 sought |= (FL_THUMB | FL_MODE32);
642 /* Force apcs-32 to be used for interworking. */
643 target_flags |= ARM_FLAG_APCS_32;
645 /* There are no ARM processors that support both APCS-26 and
646 interworking. Therefore we force FL_MODE26 to be removed
647 from insn_flags here (if it was set), so that the search
648 below will always be able to find a compatible processor. */
649 insn_flags &= ~FL_MODE26;
651 else if (!TARGET_APCS_32)
652 sought |= FL_MODE26;
654 if (sought != 0 && ((sought & insn_flags) != sought))
656 /* Try to locate a CPU type that supports all of the abilities
657 of the default CPU, plus the extra abilities requested by
658 the user. */
659 for (sel = all_cores; sel->name != NULL; sel++)
660 if ((sel->flags & sought) == (sought | insn_flags))
661 break;
663 if (sel->name == NULL)
665 unsigned current_bit_count = 0;
666 const struct processors * best_fit = NULL;
668 /* Ideally we would like to issue an error message here
669 saying that it was not possible to find a CPU compatible
670 with the default CPU, but which also supports the command
671 line options specified by the programmer, and so they
672 ought to use the -mcpu=<name> command line option to
673 override the default CPU type.
675 Unfortunately this does not work with multilibing. We
676 need to be able to support multilibs for -mapcs-26 and for
677 -mthumb-interwork and there is no CPU that can support both
678 options. Instead if we cannot find a cpu that has both the
679 characteristics of the default cpu and the given command line
680 options we scan the array again looking for a best match. */
681 for (sel = all_cores; sel->name != NULL; sel++)
682 if ((sel->flags & sought) == sought)
684 unsigned count;
686 count = bit_count (sel->flags & insn_flags);
688 if (count >= current_bit_count)
690 best_fit = sel;
691 current_bit_count = count;
695 if (best_fit == NULL)
696 abort ();
697 else
698 sel = best_fit;
701 insn_flags = sel->flags;
703 if (arm_tune == arm_none)
704 arm_tune = (enum processor_type) (sel - all_cores);
707 /* The processor for which we should tune should now have been
708 chosen. */
709 if (arm_tune == arm_none)
710 abort ();
712 tune_flags = all_cores[(int)arm_tune].flags;
713 targetm.rtx_costs = all_cores[(int)arm_tune].rtx_costs;
715 /* Make sure that the processor choice does not conflict with any of the
716 other command line choices. */
717 if (TARGET_APCS_32 && !(insn_flags & FL_MODE32))
719 /* If APCS-32 was not the default then it must have been set by the
720 user, so issue a warning message. If the user has specified
721 "-mapcs-32 -mcpu=arm2" then we loose here. */
722 if ((TARGET_DEFAULT & ARM_FLAG_APCS_32) == 0)
723 warning ("target CPU does not support APCS-32" );
724 target_flags &= ~ARM_FLAG_APCS_32;
726 else if (!TARGET_APCS_32 && !(insn_flags & FL_MODE26))
728 warning ("target CPU does not support APCS-26" );
729 target_flags |= ARM_FLAG_APCS_32;
732 if (TARGET_INTERWORK && !(insn_flags & FL_THUMB))
734 warning ("target CPU does not support interworking" );
735 target_flags &= ~ARM_FLAG_INTERWORK;
738 if (TARGET_THUMB && !(insn_flags & FL_THUMB))
740 warning ("target CPU does not support THUMB instructions");
741 target_flags &= ~ARM_FLAG_THUMB;
744 if (TARGET_APCS_FRAME && TARGET_THUMB)
746 /* warning ("ignoring -mapcs-frame because -mthumb was used"); */
747 target_flags &= ~ARM_FLAG_APCS_FRAME;
750 /* TARGET_BACKTRACE calls leaf_function_p, which causes a crash if done
751 from here where no function is being compiled currently. */
752 if ((target_flags & (THUMB_FLAG_LEAF_BACKTRACE | THUMB_FLAG_BACKTRACE))
753 && TARGET_ARM)
754 warning ("enabling backtrace support is only meaningful when compiling for the Thumb");
756 if (TARGET_ARM && TARGET_CALLEE_INTERWORKING)
757 warning ("enabling callee interworking support is only meaningful when compiling for the Thumb");
759 if (TARGET_ARM && TARGET_CALLER_INTERWORKING)
760 warning ("enabling caller interworking support is only meaningful when compiling for the Thumb");
762 /* If interworking is enabled then APCS-32 must be selected as well. */
763 if (TARGET_INTERWORK)
765 if (!TARGET_APCS_32)
766 warning ("interworking forces APCS-32 to be used" );
767 target_flags |= ARM_FLAG_APCS_32;
770 if (TARGET_APCS_STACK && !TARGET_APCS_FRAME)
772 warning ("-mapcs-stack-check incompatible with -mno-apcs-frame");
773 target_flags |= ARM_FLAG_APCS_FRAME;
776 if (TARGET_POKE_FUNCTION_NAME)
777 target_flags |= ARM_FLAG_APCS_FRAME;
779 if (TARGET_APCS_REENT && flag_pic)
780 error ("-fpic and -mapcs-reent are incompatible");
782 if (TARGET_APCS_REENT)
783 warning ("APCS reentrant code not supported. Ignored");
785 /* If this target is normally configured to use APCS frames, warn if they
786 are turned off and debugging is turned on. */
787 if (TARGET_ARM
788 && write_symbols != NO_DEBUG
789 && !TARGET_APCS_FRAME
790 && (TARGET_DEFAULT & ARM_FLAG_APCS_FRAME))
791 warning ("-g with -mno-apcs-frame may not give sensible debugging");
793 /* If stack checking is disabled, we can use r10 as the PIC register,
794 which keeps r9 available. */
795 if (flag_pic)
796 arm_pic_register = TARGET_APCS_STACK ? 9 : 10;
798 if (TARGET_APCS_FLOAT)
799 warning ("passing floating point arguments in fp regs not yet supported");
801 /* Initialize boolean versions of the flags, for use in the arm.md file. */
802 arm_arch3m = (insn_flags & FL_ARCH3M) != 0;
803 arm_arch4 = (insn_flags & FL_ARCH4) != 0;
804 arm_arch5 = (insn_flags & FL_ARCH5) != 0;
805 arm_arch5e = (insn_flags & FL_ARCH5E) != 0;
806 arm_arch6 = (insn_flags & FL_ARCH6) != 0;
807 arm_arch_xscale = (insn_flags & FL_XSCALE) != 0;
809 arm_ld_sched = (tune_flags & FL_LDSCHED) != 0;
810 arm_is_strong = (tune_flags & FL_STRONG) != 0;
811 thumb_code = (TARGET_ARM == 0);
812 arm_is_6_or_7 = (((tune_flags & (FL_MODE26 | FL_MODE32))
813 && !(tune_flags & FL_ARCH4))) != 0;
814 arm_tune_xscale = (tune_flags & FL_XSCALE) != 0;
815 arm_arch_iwmmxt = (insn_flags & FL_IWMMXT) != 0;
817 if (TARGET_IWMMXT && (! TARGET_ATPCS))
818 target_flags |= ARM_FLAG_ATPCS;
820 arm_fp_model = ARM_FP_MODEL_UNKNOWN;
821 if (target_fpu_name == NULL && target_fpe_name != NULL)
823 if (streq (target_fpe_name, "2"))
824 target_fpu_name = "fpe2";
825 else if (streq (target_fpe_name, "3"))
826 target_fpu_name = "fpe3";
827 else
828 error ("invalid floating point emulation option: -mfpe=%s",
829 target_fpe_name);
831 if (target_fpu_name != NULL)
833 /* The user specified a FPU. */
834 for (i = 0; i < ARRAY_SIZE (all_fpus); i++)
836 if (streq (all_fpus[i].name, target_fpu_name))
838 arm_fpu_arch = all_fpus[i].fpu;
839 arm_fpu_tune = arm_fpu_arch;
840 arm_fp_model = fp_model_for_fpu[arm_fpu_arch];
841 break;
844 if (arm_fp_model == ARM_FP_MODEL_UNKNOWN)
845 error ("invalid floating point option: -mfpu=%s", target_fpu_name);
847 else
849 #ifdef FPUTYPE_DEFAULT
850 /* Use the default is it is specified for this platform. */
851 arm_fpu_arch = FPUTYPE_DEFAULT;
852 arm_fpu_tune = FPUTYPE_DEFAULT;
853 #else
854 /* Pick one based on CPU type. */
855 if ((insn_flags & FL_VFP) != 0)
856 arm_fpu_arch = FPUTYPE_VFP;
857 else if (insn_flags & FL_CIRRUS)
858 arm_fpu_arch = FPUTYPE_MAVERICK;
859 else
860 arm_fpu_arch = FPUTYPE_FPA_EMU2;
861 #endif
862 if (tune_flags & FL_CO_PROC && arm_fpu_arch == FPUTYPE_FPA_EMU2)
863 arm_fpu_tune = FPUTYPE_FPA;
864 else
865 arm_fpu_tune = arm_fpu_arch;
866 arm_fp_model = fp_model_for_fpu[arm_fpu_arch];
867 if (arm_fp_model == ARM_FP_MODEL_UNKNOWN)
868 abort ();
871 if (target_float_abi_name != NULL)
873 /* The user specified a FP ABI. */
874 for (i = 0; i < ARRAY_SIZE (all_float_abis); i++)
876 if (streq (all_float_abis[i].name, target_float_abi_name))
878 arm_float_abi = all_float_abis[i].abi_type;
879 break;
882 if (i == ARRAY_SIZE (all_float_abis))
883 error ("invalid floating point abi: -mfloat-abi=%s",
884 target_float_abi_name);
886 else
888 /* Use soft-float target flag. */
889 if (target_flags & ARM_FLAG_SOFT_FLOAT)
890 arm_float_abi = ARM_FLOAT_ABI_SOFT;
891 else
892 arm_float_abi = ARM_FLOAT_ABI_HARD;
895 if (arm_float_abi == ARM_FLOAT_ABI_SOFTFP)
896 sorry ("-mfloat-abi=softfp");
897 /* If soft-float is specified then don't use FPU. */
898 if (TARGET_SOFT_FLOAT)
899 arm_fpu_arch = FPUTYPE_NONE;
901 /* For arm2/3 there is no need to do any scheduling if there is only
902 a floating point emulator, or we are doing software floating-point. */
903 if ((TARGET_SOFT_FLOAT
904 || arm_fpu_tune == FPUTYPE_FPA_EMU2
905 || arm_fpu_tune == FPUTYPE_FPA_EMU3)
906 && (tune_flags & FL_MODE32) == 0)
907 flag_schedule_insns = flag_schedule_insns_after_reload = 0;
909 arm_prgmode = TARGET_APCS_32 ? PROG_MODE_PROG32 : PROG_MODE_PROG26;
911 if (structure_size_string != NULL)
913 int size = strtol (structure_size_string, NULL, 0);
915 if (size == 8 || size == 32)
916 arm_structure_size_boundary = size;
917 else
918 warning ("structure size boundary can only be set to 8 or 32");
921 if (arm_pic_register_string != NULL)
923 int pic_register = decode_reg_name (arm_pic_register_string);
925 if (!flag_pic)
926 warning ("-mpic-register= is useless without -fpic");
928 /* Prevent the user from choosing an obviously stupid PIC register. */
929 else if (pic_register < 0 || call_used_regs[pic_register]
930 || pic_register == HARD_FRAME_POINTER_REGNUM
931 || pic_register == STACK_POINTER_REGNUM
932 || pic_register >= PC_REGNUM)
933 error ("unable to use '%s' for PIC register", arm_pic_register_string);
934 else
935 arm_pic_register = pic_register;
938 if (TARGET_THUMB && flag_schedule_insns)
940 /* Don't warn since it's on by default in -O2. */
941 flag_schedule_insns = 0;
944 if (optimize_size)
946 /* There's some dispute as to whether this should be 1 or 2. However,
947 experiments seem to show that in pathological cases a setting of
948 1 degrades less severely than a setting of 2. This could change if
949 other parts of the compiler change their behavior. */
950 arm_constant_limit = 1;
952 /* If optimizing for size, bump the number of instructions that we
953 are prepared to conditionally execute (even on a StrongARM). */
954 max_insns_skipped = 6;
956 else
958 /* For processors with load scheduling, it never costs more than
959 2 cycles to load a constant, and the load scheduler may well
960 reduce that to 1. */
961 if (tune_flags & FL_LDSCHED)
962 arm_constant_limit = 1;
964 /* On XScale the longer latency of a load makes it more difficult
965 to achieve a good schedule, so it's faster to synthesize
966 constants that can be done in two insns. */
967 if (arm_tune_xscale)
968 arm_constant_limit = 2;
970 /* StrongARM has early execution of branches, so a sequence
971 that is worth skipping is shorter. */
972 if (arm_is_strong)
973 max_insns_skipped = 3;
976 /* Register global variables with the garbage collector. */
977 arm_add_gc_roots ();
980 static void
981 arm_add_gc_roots (void)
983 gcc_obstack_init(&minipool_obstack);
984 minipool_startobj = (char *) obstack_alloc (&minipool_obstack, 0);
987 /* A table of known ARM exception types.
988 For use with the interrupt function attribute. */
990 typedef struct
992 const char *const arg;
993 const unsigned long return_value;
995 isr_attribute_arg;
997 static const isr_attribute_arg isr_attribute_args [] =
999 { "IRQ", ARM_FT_ISR },
1000 { "irq", ARM_FT_ISR },
1001 { "FIQ", ARM_FT_FIQ },
1002 { "fiq", ARM_FT_FIQ },
1003 { "ABORT", ARM_FT_ISR },
1004 { "abort", ARM_FT_ISR },
1005 { "ABORT", ARM_FT_ISR },
1006 { "abort", ARM_FT_ISR },
1007 { "UNDEF", ARM_FT_EXCEPTION },
1008 { "undef", ARM_FT_EXCEPTION },
1009 { "SWI", ARM_FT_EXCEPTION },
1010 { "swi", ARM_FT_EXCEPTION },
1011 { NULL, ARM_FT_NORMAL }
1014 /* Returns the (interrupt) function type of the current
1015 function, or ARM_FT_UNKNOWN if the type cannot be determined. */
1017 static unsigned long
1018 arm_isr_value (tree argument)
1020 const isr_attribute_arg * ptr;
1021 const char * arg;
1023 /* No argument - default to IRQ. */
1024 if (argument == NULL_TREE)
1025 return ARM_FT_ISR;
1027 /* Get the value of the argument. */
1028 if (TREE_VALUE (argument) == NULL_TREE
1029 || TREE_CODE (TREE_VALUE (argument)) != STRING_CST)
1030 return ARM_FT_UNKNOWN;
1032 arg = TREE_STRING_POINTER (TREE_VALUE (argument));
1034 /* Check it against the list of known arguments. */
1035 for (ptr = isr_attribute_args; ptr->arg != NULL; ptr++)
1036 if (streq (arg, ptr->arg))
1037 return ptr->return_value;
1039 /* An unrecognized interrupt type. */
1040 return ARM_FT_UNKNOWN;
1043 /* Computes the type of the current function. */
1045 static unsigned long
1046 arm_compute_func_type (void)
1048 unsigned long type = ARM_FT_UNKNOWN;
1049 tree a;
1050 tree attr;
1052 if (TREE_CODE (current_function_decl) != FUNCTION_DECL)
1053 abort ();
1055 /* Decide if the current function is volatile. Such functions
1056 never return, and many memory cycles can be saved by not storing
1057 register values that will never be needed again. This optimization
1058 was added to speed up context switching in a kernel application. */
1059 if (optimize > 0
1060 && current_function_nothrow
1061 && TREE_THIS_VOLATILE (current_function_decl))
1062 type |= ARM_FT_VOLATILE;
1064 if (current_function_needs_context)
1065 type |= ARM_FT_NESTED;
1067 attr = DECL_ATTRIBUTES (current_function_decl);
1069 a = lookup_attribute ("naked", attr);
1070 if (a != NULL_TREE)
1071 type |= ARM_FT_NAKED;
1073 if (cfun->machine->eh_epilogue_sp_ofs != NULL_RTX)
1074 type |= ARM_FT_EXCEPTION_HANDLER;
1075 else
1077 a = lookup_attribute ("isr", attr);
1078 if (a == NULL_TREE)
1079 a = lookup_attribute ("interrupt", attr);
1081 if (a == NULL_TREE)
1082 type |= TARGET_INTERWORK ? ARM_FT_INTERWORKED : ARM_FT_NORMAL;
1083 else
1084 type |= arm_isr_value (TREE_VALUE (a));
1087 return type;
1090 /* Returns the type of the current function. */
1092 unsigned long
1093 arm_current_func_type (void)
1095 if (ARM_FUNC_TYPE (cfun->machine->func_type) == ARM_FT_UNKNOWN)
1096 cfun->machine->func_type = arm_compute_func_type ();
1098 return cfun->machine->func_type;
1101 /* Return 1 if it is possible to return using a single instruction.
1102 If SIBLING is non-null, this is a test for a return before a sibling
1103 call. SIBLING is the call insn, so we can examine its register usage. */
1106 use_return_insn (int iscond, rtx sibling)
1108 int regno;
1109 unsigned int func_type;
1110 unsigned long saved_int_regs;
1111 unsigned HOST_WIDE_INT stack_adjust;
1113 /* Never use a return instruction before reload has run. */
1114 if (!reload_completed)
1115 return 0;
1117 func_type = arm_current_func_type ();
1119 /* Naked functions and volatile functions need special
1120 consideration. */
1121 if (func_type & (ARM_FT_VOLATILE | ARM_FT_NAKED))
1122 return 0;
1124 /* So do interrupt functions that use the frame pointer. */
1125 if (IS_INTERRUPT (func_type) && frame_pointer_needed)
1126 return 0;
1128 stack_adjust = arm_get_frame_size () + current_function_outgoing_args_size;
1130 /* As do variadic functions. */
1131 if (current_function_pretend_args_size
1132 || cfun->machine->uses_anonymous_args
1133 /* Or if the function calls __builtin_eh_return () */
1134 || ARM_FUNC_TYPE (func_type) == ARM_FT_EXCEPTION_HANDLER
1135 /* Or if the function calls alloca */
1136 || current_function_calls_alloca
1137 /* Or if there is a stack adjustment. However, if the stack pointer
1138 is saved on the stack, we can use a pre-incrementing stack load. */
1139 || !(stack_adjust == 0 || (frame_pointer_needed && stack_adjust == 4)))
1140 return 0;
1142 saved_int_regs = arm_compute_save_reg_mask ();
1144 /* Unfortunately, the insn
1146 ldmib sp, {..., sp, ...}
1148 triggers a bug on most SA-110 based devices, such that the stack
1149 pointer won't be correctly restored if the instruction takes a
1150 page fault. We work around this problem by popping r3 along with
1151 the other registers, since that is never slower than executing
1152 another instruction.
1154 We test for !arm_arch5 here, because code for any architecture
1155 less than this could potentially be run on one of the buggy
1156 chips. */
1157 if (stack_adjust == 4 && !arm_arch5)
1159 /* Validate that r3 is a call-clobbered register (always true in
1160 the default abi) ... */
1161 if (!call_used_regs[3])
1162 return 0;
1164 /* ... that it isn't being used for a return value (always true
1165 until we implement return-in-regs), or for a tail-call
1166 argument ... */
1167 if (sibling)
1169 if (GET_CODE (sibling) != CALL_INSN)
1170 abort ();
1172 if (find_regno_fusage (sibling, USE, 3))
1173 return 0;
1176 /* ... and that there are no call-saved registers in r0-r2
1177 (always true in the default ABI). */
1178 if (saved_int_regs & 0x7)
1179 return 0;
1182 /* Can't be done if interworking with Thumb, and any registers have been
1183 stacked. */
1184 if (TARGET_INTERWORK && saved_int_regs != 0)
1185 return 0;
1187 /* On StrongARM, conditional returns are expensive if they aren't
1188 taken and multiple registers have been stacked. */
1189 if (iscond && arm_is_strong)
1191 /* Conditional return when just the LR is stored is a simple
1192 conditional-load instruction, that's not expensive. */
1193 if (saved_int_regs != 0 && saved_int_regs != (1 << LR_REGNUM))
1194 return 0;
1196 if (flag_pic && regs_ever_live[PIC_OFFSET_TABLE_REGNUM])
1197 return 0;
1200 /* If there are saved registers but the LR isn't saved, then we need
1201 two instructions for the return. */
1202 if (saved_int_regs && !(saved_int_regs & (1 << LR_REGNUM)))
1203 return 0;
1205 /* Can't be done if any of the FPA regs are pushed,
1206 since this also requires an insn. */
1207 if (TARGET_HARD_FLOAT && TARGET_FPA)
1208 for (regno = FIRST_FPA_REGNUM; regno <= LAST_FPA_REGNUM; regno++)
1209 if (regs_ever_live[regno] && !call_used_regs[regno])
1210 return 0;
1212 /* Likewise VFP regs. */
1213 if (TARGET_HARD_FLOAT && TARGET_VFP)
1214 for (regno = FIRST_VFP_REGNUM; regno <= LAST_VFP_REGNUM; regno++)
1215 if (regs_ever_live[regno] && !call_used_regs[regno])
1216 return 0;
1218 if (TARGET_REALLY_IWMMXT)
1219 for (regno = FIRST_IWMMXT_REGNUM; regno <= LAST_IWMMXT_REGNUM; regno++)
1220 if (regs_ever_live[regno] && ! call_used_regs [regno])
1221 return 0;
1223 return 1;
1226 /* Return TRUE if int I is a valid immediate ARM constant. */
1229 const_ok_for_arm (HOST_WIDE_INT i)
1231 unsigned HOST_WIDE_INT mask = ~(unsigned HOST_WIDE_INT)0xFF;
1233 /* For machines with >32 bit HOST_WIDE_INT, the bits above bit 31 must
1234 be all zero, or all one. */
1235 if ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff) != 0
1236 && ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff)
1237 != ((~(unsigned HOST_WIDE_INT) 0)
1238 & ~(unsigned HOST_WIDE_INT) 0xffffffff)))
1239 return FALSE;
1241 /* Fast return for 0 and powers of 2 */
1242 if ((i & (i - 1)) == 0)
1243 return TRUE;
1247 if ((i & mask & (unsigned HOST_WIDE_INT) 0xffffffff) == 0)
1248 return TRUE;
1249 mask =
1250 (mask << 2) | ((mask & (unsigned HOST_WIDE_INT) 0xffffffff)
1251 >> (32 - 2)) | ~(unsigned HOST_WIDE_INT) 0xffffffff;
1253 while (mask != ~(unsigned HOST_WIDE_INT) 0xFF);
1255 return FALSE;
1258 /* Return true if I is a valid constant for the operation CODE. */
1259 static int
1260 const_ok_for_op (HOST_WIDE_INT i, enum rtx_code code)
1262 if (const_ok_for_arm (i))
1263 return 1;
1265 switch (code)
1267 case PLUS:
1268 return const_ok_for_arm (ARM_SIGN_EXTEND (-i));
1270 case MINUS: /* Should only occur with (MINUS I reg) => rsb */
1271 case XOR:
1272 case IOR:
1273 return 0;
1275 case AND:
1276 return const_ok_for_arm (ARM_SIGN_EXTEND (~i));
1278 default:
1279 abort ();
1283 /* Emit a sequence of insns to handle a large constant.
1284 CODE is the code of the operation required, it can be any of SET, PLUS,
1285 IOR, AND, XOR, MINUS;
1286 MODE is the mode in which the operation is being performed;
1287 VAL is the integer to operate on;
1288 SOURCE is the other operand (a register, or a null-pointer for SET);
1289 SUBTARGETS means it is safe to create scratch registers if that will
1290 either produce a simpler sequence, or we will want to cse the values.
1291 Return value is the number of insns emitted. */
1294 arm_split_constant (enum rtx_code code, enum machine_mode mode,
1295 HOST_WIDE_INT val, rtx target, rtx source, int subtargets)
1297 if (subtargets || code == SET
1298 || (GET_CODE (target) == REG && GET_CODE (source) == REG
1299 && REGNO (target) != REGNO (source)))
1301 /* After arm_reorg has been called, we can't fix up expensive
1302 constants by pushing them into memory so we must synthesize
1303 them in-line, regardless of the cost. This is only likely to
1304 be more costly on chips that have load delay slots and we are
1305 compiling without running the scheduler (so no splitting
1306 occurred before the final instruction emission).
1308 Ref: gcc -O1 -mcpu=strongarm gcc.c-torture/compile/980506-2.c
1310 if (!after_arm_reorg
1311 && (arm_gen_constant (code, mode, val, target, source, 1, 0)
1312 > arm_constant_limit + (code != SET)))
1314 if (code == SET)
1316 /* Currently SET is the only monadic value for CODE, all
1317 the rest are diadic. */
1318 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (val)));
1319 return 1;
1321 else
1323 rtx temp = subtargets ? gen_reg_rtx (mode) : target;
1325 emit_insn (gen_rtx_SET (VOIDmode, temp, GEN_INT (val)));
1326 /* For MINUS, the value is subtracted from, since we never
1327 have subtraction of a constant. */
1328 if (code == MINUS)
1329 emit_insn (gen_rtx_SET (VOIDmode, target,
1330 gen_rtx_MINUS (mode, temp, source)));
1331 else
1332 emit_insn (gen_rtx_SET (VOIDmode, target,
1333 gen_rtx_fmt_ee (code, mode, source, temp)));
1334 return 2;
1339 return arm_gen_constant (code, mode, val, target, source, subtargets, 1);
1342 static int
1343 count_insns_for_constant (HOST_WIDE_INT remainder, int i)
1345 HOST_WIDE_INT temp1;
1346 int num_insns = 0;
1349 int end;
1351 if (i <= 0)
1352 i += 32;
1353 if (remainder & (3 << (i - 2)))
1355 end = i - 8;
1356 if (end < 0)
1357 end += 32;
1358 temp1 = remainder & ((0x0ff << end)
1359 | ((i < end) ? (0xff >> (32 - end)) : 0));
1360 remainder &= ~temp1;
1361 num_insns++;
1362 i -= 6;
1364 i -= 2;
1365 } while (remainder);
1366 return num_insns;
1369 /* As above, but extra parameter GENERATE which, if clear, suppresses
1370 RTL generation. */
1372 static int
1373 arm_gen_constant (enum rtx_code code, enum machine_mode mode,
1374 HOST_WIDE_INT val, rtx target, rtx source, int subtargets,
1375 int generate)
1377 int can_invert = 0;
1378 int can_negate = 0;
1379 int can_negate_initial = 0;
1380 int can_shift = 0;
1381 int i;
1382 int num_bits_set = 0;
1383 int set_sign_bit_copies = 0;
1384 int clear_sign_bit_copies = 0;
1385 int clear_zero_bit_copies = 0;
1386 int set_zero_bit_copies = 0;
1387 int insns = 0;
1388 unsigned HOST_WIDE_INT temp1, temp2;
1389 unsigned HOST_WIDE_INT remainder = val & 0xffffffff;
1391 /* Find out which operations are safe for a given CODE. Also do a quick
1392 check for degenerate cases; these can occur when DImode operations
1393 are split. */
1394 switch (code)
1396 case SET:
1397 can_invert = 1;
1398 can_shift = 1;
1399 can_negate = 1;
1400 break;
1402 case PLUS:
1403 can_negate = 1;
1404 can_negate_initial = 1;
1405 break;
1407 case IOR:
1408 if (remainder == 0xffffffff)
1410 if (generate)
1411 emit_insn (gen_rtx_SET (VOIDmode, target,
1412 GEN_INT (ARM_SIGN_EXTEND (val))));
1413 return 1;
1415 if (remainder == 0)
1417 if (reload_completed && rtx_equal_p (target, source))
1418 return 0;
1419 if (generate)
1420 emit_insn (gen_rtx_SET (VOIDmode, target, source));
1421 return 1;
1423 break;
1425 case AND:
1426 if (remainder == 0)
1428 if (generate)
1429 emit_insn (gen_rtx_SET (VOIDmode, target, const0_rtx));
1430 return 1;
1432 if (remainder == 0xffffffff)
1434 if (reload_completed && rtx_equal_p (target, source))
1435 return 0;
1436 if (generate)
1437 emit_insn (gen_rtx_SET (VOIDmode, target, source));
1438 return 1;
1440 can_invert = 1;
1441 break;
1443 case XOR:
1444 if (remainder == 0)
1446 if (reload_completed && rtx_equal_p (target, source))
1447 return 0;
1448 if (generate)
1449 emit_insn (gen_rtx_SET (VOIDmode, target, source));
1450 return 1;
1452 if (remainder == 0xffffffff)
1454 if (generate)
1455 emit_insn (gen_rtx_SET (VOIDmode, target,
1456 gen_rtx_NOT (mode, source)));
1457 return 1;
1460 /* We don't know how to handle this yet below. */
1461 abort ();
1463 case MINUS:
1464 /* We treat MINUS as (val - source), since (source - val) is always
1465 passed as (source + (-val)). */
1466 if (remainder == 0)
1468 if (generate)
1469 emit_insn (gen_rtx_SET (VOIDmode, target,
1470 gen_rtx_NEG (mode, source)));
1471 return 1;
1473 if (const_ok_for_arm (val))
1475 if (generate)
1476 emit_insn (gen_rtx_SET (VOIDmode, target,
1477 gen_rtx_MINUS (mode, GEN_INT (val),
1478 source)));
1479 return 1;
1481 can_negate = 1;
1483 break;
1485 default:
1486 abort ();
1489 /* If we can do it in one insn get out quickly. */
1490 if (const_ok_for_arm (val)
1491 || (can_negate_initial && const_ok_for_arm (-val))
1492 || (can_invert && const_ok_for_arm (~val)))
1494 if (generate)
1495 emit_insn (gen_rtx_SET (VOIDmode, target,
1496 (source ? gen_rtx_fmt_ee (code, mode, source,
1497 GEN_INT (val))
1498 : GEN_INT (val))));
1499 return 1;
1502 /* Calculate a few attributes that may be useful for specific
1503 optimizations. */
1504 for (i = 31; i >= 0; i--)
1506 if ((remainder & (1 << i)) == 0)
1507 clear_sign_bit_copies++;
1508 else
1509 break;
1512 for (i = 31; i >= 0; i--)
1514 if ((remainder & (1 << i)) != 0)
1515 set_sign_bit_copies++;
1516 else
1517 break;
1520 for (i = 0; i <= 31; i++)
1522 if ((remainder & (1 << i)) == 0)
1523 clear_zero_bit_copies++;
1524 else
1525 break;
1528 for (i = 0; i <= 31; i++)
1530 if ((remainder & (1 << i)) != 0)
1531 set_zero_bit_copies++;
1532 else
1533 break;
1536 switch (code)
1538 case SET:
1539 /* See if we can do this by sign_extending a constant that is known
1540 to be negative. This is a good, way of doing it, since the shift
1541 may well merge into a subsequent insn. */
1542 if (set_sign_bit_copies > 1)
1544 if (const_ok_for_arm
1545 (temp1 = ARM_SIGN_EXTEND (remainder
1546 << (set_sign_bit_copies - 1))))
1548 if (generate)
1550 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1551 emit_insn (gen_rtx_SET (VOIDmode, new_src,
1552 GEN_INT (temp1)));
1553 emit_insn (gen_ashrsi3 (target, new_src,
1554 GEN_INT (set_sign_bit_copies - 1)));
1556 return 2;
1558 /* For an inverted constant, we will need to set the low bits,
1559 these will be shifted out of harm's way. */
1560 temp1 |= (1 << (set_sign_bit_copies - 1)) - 1;
1561 if (const_ok_for_arm (~temp1))
1563 if (generate)
1565 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1566 emit_insn (gen_rtx_SET (VOIDmode, new_src,
1567 GEN_INT (temp1)));
1568 emit_insn (gen_ashrsi3 (target, new_src,
1569 GEN_INT (set_sign_bit_copies - 1)));
1571 return 2;
1575 /* See if we can generate this by setting the bottom (or the top)
1576 16 bits, and then shifting these into the other half of the
1577 word. We only look for the simplest cases, to do more would cost
1578 too much. Be careful, however, not to generate this when the
1579 alternative would take fewer insns. */
1580 if (val & 0xffff0000)
1582 temp1 = remainder & 0xffff0000;
1583 temp2 = remainder & 0x0000ffff;
1585 /* Overlaps outside this range are best done using other methods. */
1586 for (i = 9; i < 24; i++)
1588 if ((((temp2 | (temp2 << i)) & 0xffffffff) == remainder)
1589 && !const_ok_for_arm (temp2))
1591 rtx new_src = (subtargets
1592 ? (generate ? gen_reg_rtx (mode) : NULL_RTX)
1593 : target);
1594 insns = arm_gen_constant (code, mode, temp2, new_src,
1595 source, subtargets, generate);
1596 source = new_src;
1597 if (generate)
1598 emit_insn (gen_rtx_SET
1599 (VOIDmode, target,
1600 gen_rtx_IOR (mode,
1601 gen_rtx_ASHIFT (mode, source,
1602 GEN_INT (i)),
1603 source)));
1604 return insns + 1;
1608 /* Don't duplicate cases already considered. */
1609 for (i = 17; i < 24; i++)
1611 if (((temp1 | (temp1 >> i)) == remainder)
1612 && !const_ok_for_arm (temp1))
1614 rtx new_src = (subtargets
1615 ? (generate ? gen_reg_rtx (mode) : NULL_RTX)
1616 : target);
1617 insns = arm_gen_constant (code, mode, temp1, new_src,
1618 source, subtargets, generate);
1619 source = new_src;
1620 if (generate)
1621 emit_insn
1622 (gen_rtx_SET (VOIDmode, target,
1623 gen_rtx_IOR
1624 (mode,
1625 gen_rtx_LSHIFTRT (mode, source,
1626 GEN_INT (i)),
1627 source)));
1628 return insns + 1;
1632 break;
1634 case IOR:
1635 case XOR:
1636 /* If we have IOR or XOR, and the constant can be loaded in a
1637 single instruction, and we can find a temporary to put it in,
1638 then this can be done in two instructions instead of 3-4. */
1639 if (subtargets
1640 /* TARGET can't be NULL if SUBTARGETS is 0 */
1641 || (reload_completed && !reg_mentioned_p (target, source)))
1643 if (const_ok_for_arm (ARM_SIGN_EXTEND (~val)))
1645 if (generate)
1647 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1649 emit_insn (gen_rtx_SET (VOIDmode, sub, GEN_INT (val)));
1650 emit_insn (gen_rtx_SET (VOIDmode, target,
1651 gen_rtx_fmt_ee (code, mode, source, sub)));
1653 return 2;
1657 if (code == XOR)
1658 break;
1660 if (set_sign_bit_copies > 8
1661 && (val & (-1 << (32 - set_sign_bit_copies))) == val)
1663 if (generate)
1665 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1666 rtx shift = GEN_INT (set_sign_bit_copies);
1668 emit_insn (gen_rtx_SET (VOIDmode, sub,
1669 gen_rtx_NOT (mode,
1670 gen_rtx_ASHIFT (mode,
1671 source,
1672 shift))));
1673 emit_insn (gen_rtx_SET (VOIDmode, target,
1674 gen_rtx_NOT (mode,
1675 gen_rtx_LSHIFTRT (mode, sub,
1676 shift))));
1678 return 2;
1681 if (set_zero_bit_copies > 8
1682 && (remainder & ((1 << set_zero_bit_copies) - 1)) == remainder)
1684 if (generate)
1686 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1687 rtx shift = GEN_INT (set_zero_bit_copies);
1689 emit_insn (gen_rtx_SET (VOIDmode, sub,
1690 gen_rtx_NOT (mode,
1691 gen_rtx_LSHIFTRT (mode,
1692 source,
1693 shift))));
1694 emit_insn (gen_rtx_SET (VOIDmode, target,
1695 gen_rtx_NOT (mode,
1696 gen_rtx_ASHIFT (mode, sub,
1697 shift))));
1699 return 2;
1702 if (const_ok_for_arm (temp1 = ARM_SIGN_EXTEND (~val)))
1704 if (generate)
1706 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
1707 emit_insn (gen_rtx_SET (VOIDmode, sub,
1708 gen_rtx_NOT (mode, source)));
1709 source = sub;
1710 if (subtargets)
1711 sub = gen_reg_rtx (mode);
1712 emit_insn (gen_rtx_SET (VOIDmode, sub,
1713 gen_rtx_AND (mode, source,
1714 GEN_INT (temp1))));
1715 emit_insn (gen_rtx_SET (VOIDmode, target,
1716 gen_rtx_NOT (mode, sub)));
1718 return 3;
1720 break;
1722 case AND:
1723 /* See if two shifts will do 2 or more insn's worth of work. */
1724 if (clear_sign_bit_copies >= 16 && clear_sign_bit_copies < 24)
1726 HOST_WIDE_INT shift_mask = ((0xffffffff
1727 << (32 - clear_sign_bit_copies))
1728 & 0xffffffff);
1730 if ((remainder | shift_mask) != 0xffffffff)
1732 if (generate)
1734 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1735 insns = arm_gen_constant (AND, mode, remainder | shift_mask,
1736 new_src, source, subtargets, 1);
1737 source = new_src;
1739 else
1741 rtx targ = subtargets ? NULL_RTX : target;
1742 insns = arm_gen_constant (AND, mode, remainder | shift_mask,
1743 targ, source, subtargets, 0);
1747 if (generate)
1749 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1750 rtx shift = GEN_INT (clear_sign_bit_copies);
1752 emit_insn (gen_ashlsi3 (new_src, source, shift));
1753 emit_insn (gen_lshrsi3 (target, new_src, shift));
1756 return insns + 2;
1759 if (clear_zero_bit_copies >= 16 && clear_zero_bit_copies < 24)
1761 HOST_WIDE_INT shift_mask = (1 << clear_zero_bit_copies) - 1;
1763 if ((remainder | shift_mask) != 0xffffffff)
1765 if (generate)
1767 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1769 insns = arm_gen_constant (AND, mode, remainder | shift_mask,
1770 new_src, source, subtargets, 1);
1771 source = new_src;
1773 else
1775 rtx targ = subtargets ? NULL_RTX : target;
1777 insns = arm_gen_constant (AND, mode, remainder | shift_mask,
1778 targ, source, subtargets, 0);
1782 if (generate)
1784 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
1785 rtx shift = GEN_INT (clear_zero_bit_copies);
1787 emit_insn (gen_lshrsi3 (new_src, source, shift));
1788 emit_insn (gen_ashlsi3 (target, new_src, shift));
1791 return insns + 2;
1794 break;
1796 default:
1797 break;
1800 for (i = 0; i < 32; i++)
1801 if (remainder & (1 << i))
1802 num_bits_set++;
1804 if (code == AND || (can_invert && num_bits_set > 16))
1805 remainder = (~remainder) & 0xffffffff;
1806 else if (code == PLUS && num_bits_set > 16)
1807 remainder = (-remainder) & 0xffffffff;
1808 else
1810 can_invert = 0;
1811 can_negate = 0;
1814 /* Now try and find a way of doing the job in either two or three
1815 instructions.
1816 We start by looking for the largest block of zeros that are aligned on
1817 a 2-bit boundary, we then fill up the temps, wrapping around to the
1818 top of the word when we drop off the bottom.
1819 In the worst case this code should produce no more than four insns. */
1821 int best_start = 0;
1822 int best_consecutive_zeros = 0;
1824 for (i = 0; i < 32; i += 2)
1826 int consecutive_zeros = 0;
1828 if (!(remainder & (3 << i)))
1830 while ((i < 32) && !(remainder & (3 << i)))
1832 consecutive_zeros += 2;
1833 i += 2;
1835 if (consecutive_zeros > best_consecutive_zeros)
1837 best_consecutive_zeros = consecutive_zeros;
1838 best_start = i - consecutive_zeros;
1840 i -= 2;
1844 /* So long as it won't require any more insns to do so, it's
1845 desirable to emit a small constant (in bits 0...9) in the last
1846 insn. This way there is more chance that it can be combined with
1847 a later addressing insn to form a pre-indexed load or store
1848 operation. Consider:
1850 *((volatile int *)0xe0000100) = 1;
1851 *((volatile int *)0xe0000110) = 2;
1853 We want this to wind up as:
1855 mov rA, #0xe0000000
1856 mov rB, #1
1857 str rB, [rA, #0x100]
1858 mov rB, #2
1859 str rB, [rA, #0x110]
1861 rather than having to synthesize both large constants from scratch.
1863 Therefore, we calculate how many insns would be required to emit
1864 the constant starting from `best_start', and also starting from
1865 zero (ie with bit 31 first to be output). If `best_start' doesn't
1866 yield a shorter sequence, we may as well use zero. */
1867 if (best_start != 0
1868 && ((((unsigned HOST_WIDE_INT) 1) << best_start) < remainder)
1869 && (count_insns_for_constant (remainder, 0) <=
1870 count_insns_for_constant (remainder, best_start)))
1871 best_start = 0;
1873 /* Now start emitting the insns. */
1874 i = best_start;
1877 int end;
1879 if (i <= 0)
1880 i += 32;
1881 if (remainder & (3 << (i - 2)))
1883 end = i - 8;
1884 if (end < 0)
1885 end += 32;
1886 temp1 = remainder & ((0x0ff << end)
1887 | ((i < end) ? (0xff >> (32 - end)) : 0));
1888 remainder &= ~temp1;
1890 if (generate)
1892 rtx new_src, temp1_rtx;
1894 if (code == SET || code == MINUS)
1896 new_src = (subtargets ? gen_reg_rtx (mode) : target);
1897 if (can_invert && code != MINUS)
1898 temp1 = ~temp1;
1900 else
1902 if (remainder && subtargets)
1903 new_src = gen_reg_rtx (mode);
1904 else
1905 new_src = target;
1906 if (can_invert)
1907 temp1 = ~temp1;
1908 else if (can_negate)
1909 temp1 = -temp1;
1912 temp1 = trunc_int_for_mode (temp1, mode);
1913 temp1_rtx = GEN_INT (temp1);
1915 if (code == SET)
1917 else if (code == MINUS)
1918 temp1_rtx = gen_rtx_MINUS (mode, temp1_rtx, source);
1919 else
1920 temp1_rtx = gen_rtx_fmt_ee (code, mode, source, temp1_rtx);
1922 emit_insn (gen_rtx_SET (VOIDmode, new_src, temp1_rtx));
1923 source = new_src;
1926 if (code == SET)
1928 can_invert = 0;
1929 code = PLUS;
1931 else if (code == MINUS)
1932 code = PLUS;
1934 insns++;
1935 i -= 6;
1937 i -= 2;
1939 while (remainder);
1942 return insns;
1945 /* Canonicalize a comparison so that we are more likely to recognize it.
1946 This can be done for a few constant compares, where we can make the
1947 immediate value easier to load. */
1949 enum rtx_code
1950 arm_canonicalize_comparison (enum rtx_code code, rtx * op1)
1952 unsigned HOST_WIDE_INT i = INTVAL (*op1);
1954 switch (code)
1956 case EQ:
1957 case NE:
1958 return code;
1960 case GT:
1961 case LE:
1962 if (i != ((((unsigned HOST_WIDE_INT) 1) << (HOST_BITS_PER_WIDE_INT - 1)) - 1)
1963 && (const_ok_for_arm (i + 1) || const_ok_for_arm (-(i + 1))))
1965 *op1 = GEN_INT (i + 1);
1966 return code == GT ? GE : LT;
1968 break;
1970 case GE:
1971 case LT:
1972 if (i != (((unsigned HOST_WIDE_INT) 1) << (HOST_BITS_PER_WIDE_INT - 1))
1973 && (const_ok_for_arm (i - 1) || const_ok_for_arm (-(i - 1))))
1975 *op1 = GEN_INT (i - 1);
1976 return code == GE ? GT : LE;
1978 break;
1980 case GTU:
1981 case LEU:
1982 if (i != ~((unsigned HOST_WIDE_INT) 0)
1983 && (const_ok_for_arm (i + 1) || const_ok_for_arm (-(i + 1))))
1985 *op1 = GEN_INT (i + 1);
1986 return code == GTU ? GEU : LTU;
1988 break;
1990 case GEU:
1991 case LTU:
1992 if (i != 0
1993 && (const_ok_for_arm (i - 1) || const_ok_for_arm (-(i - 1))))
1995 *op1 = GEN_INT (i - 1);
1996 return code == GEU ? GTU : LEU;
1998 break;
2000 default:
2001 abort ();
2004 return code;
2007 /* Decide whether a type should be returned in memory (true)
2008 or in a register (false). This is called by the macro
2009 RETURN_IN_MEMORY. */
2011 arm_return_in_memory (tree type)
2013 HOST_WIDE_INT size;
2015 if (!AGGREGATE_TYPE_P (type))
2016 /* All simple types are returned in registers. */
2017 return 0;
2019 size = int_size_in_bytes (type);
2021 if (TARGET_ATPCS)
2023 /* ATPCS returns aggregate types in memory only if they are
2024 larger than a word (or are variable size). */
2025 return (size < 0 || size > UNITS_PER_WORD);
2028 /* For the arm-wince targets we choose to be compatible with Microsoft's
2029 ARM and Thumb compilers, which always return aggregates in memory. */
2030 #ifndef ARM_WINCE
2031 /* All structures/unions bigger than one word are returned in memory.
2032 Also catch the case where int_size_in_bytes returns -1. In this case
2033 the aggregate is either huge or of variable size, and in either case
2034 we will want to return it via memory and not in a register. */
2035 if (size < 0 || size > UNITS_PER_WORD)
2036 return 1;
2038 if (TREE_CODE (type) == RECORD_TYPE)
2040 tree field;
2042 /* For a struct the APCS says that we only return in a register
2043 if the type is 'integer like' and every addressable element
2044 has an offset of zero. For practical purposes this means
2045 that the structure can have at most one non bit-field element
2046 and that this element must be the first one in the structure. */
2048 /* Find the first field, ignoring non FIELD_DECL things which will
2049 have been created by C++. */
2050 for (field = TYPE_FIELDS (type);
2051 field && TREE_CODE (field) != FIELD_DECL;
2052 field = TREE_CHAIN (field))
2053 continue;
2055 if (field == NULL)
2056 return 0; /* An empty structure. Allowed by an extension to ANSI C. */
2058 /* Check that the first field is valid for returning in a register. */
2060 /* ... Floats are not allowed */
2061 if (FLOAT_TYPE_P (TREE_TYPE (field)))
2062 return 1;
2064 /* ... Aggregates that are not themselves valid for returning in
2065 a register are not allowed. */
2066 if (RETURN_IN_MEMORY (TREE_TYPE (field)))
2067 return 1;
2069 /* Now check the remaining fields, if any. Only bitfields are allowed,
2070 since they are not addressable. */
2071 for (field = TREE_CHAIN (field);
2072 field;
2073 field = TREE_CHAIN (field))
2075 if (TREE_CODE (field) != FIELD_DECL)
2076 continue;
2078 if (!DECL_BIT_FIELD_TYPE (field))
2079 return 1;
2082 return 0;
2085 if (TREE_CODE (type) == UNION_TYPE)
2087 tree field;
2089 /* Unions can be returned in registers if every element is
2090 integral, or can be returned in an integer register. */
2091 for (field = TYPE_FIELDS (type);
2092 field;
2093 field = TREE_CHAIN (field))
2095 if (TREE_CODE (field) != FIELD_DECL)
2096 continue;
2098 if (FLOAT_TYPE_P (TREE_TYPE (field)))
2099 return 1;
2101 if (RETURN_IN_MEMORY (TREE_TYPE (field)))
2102 return 1;
2105 return 0;
2107 #endif /* not ARM_WINCE */
2109 /* Return all other types in memory. */
2110 return 1;
2113 /* Indicate whether or not words of a double are in big-endian order. */
2116 arm_float_words_big_endian (void)
2118 if (TARGET_MAVERICK)
2119 return 0;
2121 /* For FPA, float words are always big-endian. For VFP, floats words
2122 follow the memory system mode. */
2124 if (TARGET_FPA)
2126 return 1;
2129 if (TARGET_VFP)
2130 return (TARGET_BIG_END ? 1 : 0);
2132 return 1;
2135 /* Initialize a variable CUM of type CUMULATIVE_ARGS
2136 for a call to a function whose data type is FNTYPE.
2137 For a library call, FNTYPE is NULL. */
2138 void
2139 arm_init_cumulative_args (CUMULATIVE_ARGS *pcum, tree fntype,
2140 rtx libname ATTRIBUTE_UNUSED,
2141 tree fndecl ATTRIBUTE_UNUSED)
2143 /* On the ARM, the offset starts at 0. */
2144 pcum->nregs = ((fntype && aggregate_value_p (TREE_TYPE (fntype), fntype)) ? 1 : 0);
2145 pcum->iwmmxt_nregs = 0;
2147 pcum->call_cookie = CALL_NORMAL;
2149 if (TARGET_LONG_CALLS)
2150 pcum->call_cookie = CALL_LONG;
2152 /* Check for long call/short call attributes. The attributes
2153 override any command line option. */
2154 if (fntype)
2156 if (lookup_attribute ("short_call", TYPE_ATTRIBUTES (fntype)))
2157 pcum->call_cookie = CALL_SHORT;
2158 else if (lookup_attribute ("long_call", TYPE_ATTRIBUTES (fntype)))
2159 pcum->call_cookie = CALL_LONG;
2162 /* Varargs vectors are treated the same as long long.
2163 named_count avoids having to change the way arm handles 'named' */
2164 pcum->named_count = 0;
2165 pcum->nargs = 0;
2167 if (TARGET_REALLY_IWMMXT && fntype)
2169 tree fn_arg;
2171 for (fn_arg = TYPE_ARG_TYPES (fntype);
2172 fn_arg;
2173 fn_arg = TREE_CHAIN (fn_arg))
2174 pcum->named_count += 1;
2176 if (! pcum->named_count)
2177 pcum->named_count = INT_MAX;
2181 /* Determine where to put an argument to a function.
2182 Value is zero to push the argument on the stack,
2183 or a hard register in which to store the argument.
2185 MODE is the argument's machine mode.
2186 TYPE is the data type of the argument (as a tree).
2187 This is null for libcalls where that information may
2188 not be available.
2189 CUM is a variable of type CUMULATIVE_ARGS which gives info about
2190 the preceding args and about the function being called.
2191 NAMED is nonzero if this argument is a named parameter
2192 (otherwise it is an extra parameter matching an ellipsis). */
2195 arm_function_arg (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
2196 tree type ATTRIBUTE_UNUSED, int named)
2198 if (TARGET_REALLY_IWMMXT)
2200 if (VECTOR_MODE_SUPPORTED_P (mode))
2202 /* varargs vectors are treated the same as long long.
2203 named_count avoids having to change the way arm handles 'named' */
2204 if (pcum->named_count <= pcum->nargs + 1)
2206 if (pcum->nregs == 1)
2207 pcum->nregs += 1;
2208 if (pcum->nregs <= 2)
2209 return gen_rtx_REG (mode, pcum->nregs);
2210 else
2211 return NULL_RTX;
2213 else if (pcum->iwmmxt_nregs <= 9)
2214 return gen_rtx_REG (mode, pcum->iwmmxt_nregs + FIRST_IWMMXT_REGNUM);
2215 else
2216 return NULL_RTX;
2218 else if ((mode == DImode || mode == DFmode) && pcum->nregs & 1)
2219 pcum->nregs += 1;
2222 if (mode == VOIDmode)
2223 /* Compute operand 2 of the call insn. */
2224 return GEN_INT (pcum->call_cookie);
2226 if (!named || pcum->nregs >= NUM_ARG_REGS)
2227 return NULL_RTX;
2229 return gen_rtx_REG (mode, pcum->nregs);
2232 /* Variable sized types are passed by reference. This is a GCC
2233 extension to the ARM ABI. */
2236 arm_function_arg_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
2237 enum machine_mode mode ATTRIBUTE_UNUSED,
2238 tree type, int named ATTRIBUTE_UNUSED)
2240 return type && TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST;
2243 /* Implement va_arg. */
2246 arm_va_arg (tree valist, tree type)
2248 /* Variable sized types are passed by reference. */
2249 if (TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
2251 rtx addr = std_expand_builtin_va_arg (valist, build_pointer_type (type));
2252 return gen_rtx_MEM (ptr_mode, force_reg (Pmode, addr));
2255 if (FUNCTION_ARG_BOUNDARY (TYPE_MODE (type), NULL) == IWMMXT_ALIGNMENT)
2257 tree minus_eight;
2258 tree t;
2260 /* Maintain 64-bit alignment of the valist pointer by
2261 constructing: valist = ((valist + (8 - 1)) & -8). */
2262 minus_eight = build_int_2 (- (IWMMXT_ALIGNMENT / BITS_PER_UNIT), -1);
2263 t = build_int_2 ((IWMMXT_ALIGNMENT / BITS_PER_UNIT) - 1, 0);
2264 t = build (PLUS_EXPR, TREE_TYPE (valist), valist, t);
2265 t = build (BIT_AND_EXPR, TREE_TYPE (t), t, minus_eight);
2266 t = build (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
2267 TREE_SIDE_EFFECTS (t) = 1;
2268 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
2270 /* This is to stop the combine pass optimizing
2271 away the alignment adjustment. */
2272 mark_reg_pointer (arg_pointer_rtx, PARM_BOUNDARY);
2275 return std_expand_builtin_va_arg (valist, type);
2278 /* Encode the current state of the #pragma [no_]long_calls. */
2279 typedef enum
2281 OFF, /* No #pramgma [no_]long_calls is in effect. */
2282 LONG, /* #pragma long_calls is in effect. */
2283 SHORT /* #pragma no_long_calls is in effect. */
2284 } arm_pragma_enum;
2286 static arm_pragma_enum arm_pragma_long_calls = OFF;
2288 void
2289 arm_pr_long_calls (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2291 arm_pragma_long_calls = LONG;
2294 void
2295 arm_pr_no_long_calls (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2297 arm_pragma_long_calls = SHORT;
2300 void
2301 arm_pr_long_calls_off (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2303 arm_pragma_long_calls = OFF;
2306 /* Table of machine attributes. */
2307 const struct attribute_spec arm_attribute_table[] =
2309 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
2310 /* Function calls made to this symbol must be done indirectly, because
2311 it may lie outside of the 26 bit addressing range of a normal function
2312 call. */
2313 { "long_call", 0, 0, false, true, true, NULL },
2314 /* Whereas these functions are always known to reside within the 26 bit
2315 addressing range. */
2316 { "short_call", 0, 0, false, true, true, NULL },
2317 /* Interrupt Service Routines have special prologue and epilogue requirements. */
2318 { "isr", 0, 1, false, false, false, arm_handle_isr_attribute },
2319 { "interrupt", 0, 1, false, false, false, arm_handle_isr_attribute },
2320 { "naked", 0, 0, true, false, false, arm_handle_fndecl_attribute },
2321 #ifdef ARM_PE
2322 /* ARM/PE has three new attributes:
2323 interfacearm - ?
2324 dllexport - for exporting a function/variable that will live in a dll
2325 dllimport - for importing a function/variable from a dll
2327 Microsoft allows multiple declspecs in one __declspec, separating
2328 them with spaces. We do NOT support this. Instead, use __declspec
2329 multiple times.
2331 { "dllimport", 0, 0, true, false, false, NULL },
2332 { "dllexport", 0, 0, true, false, false, NULL },
2333 { "interfacearm", 0, 0, true, false, false, arm_handle_fndecl_attribute },
2334 #endif
2335 { NULL, 0, 0, false, false, false, NULL }
2338 /* Handle an attribute requiring a FUNCTION_DECL;
2339 arguments as in struct attribute_spec.handler. */
2340 static tree
2341 arm_handle_fndecl_attribute (tree *node, tree name, tree args ATTRIBUTE_UNUSED,
2342 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
2344 if (TREE_CODE (*node) != FUNCTION_DECL)
2346 warning ("`%s' attribute only applies to functions",
2347 IDENTIFIER_POINTER (name));
2348 *no_add_attrs = true;
2351 return NULL_TREE;
2354 /* Handle an "interrupt" or "isr" attribute;
2355 arguments as in struct attribute_spec.handler. */
2356 static tree
2357 arm_handle_isr_attribute (tree *node, tree name, tree args, int flags,
2358 bool *no_add_attrs)
2360 if (DECL_P (*node))
2362 if (TREE_CODE (*node) != FUNCTION_DECL)
2364 warning ("`%s' attribute only applies to functions",
2365 IDENTIFIER_POINTER (name));
2366 *no_add_attrs = true;
2368 /* FIXME: the argument if any is checked for type attributes;
2369 should it be checked for decl ones? */
2371 else
2373 if (TREE_CODE (*node) == FUNCTION_TYPE
2374 || TREE_CODE (*node) == METHOD_TYPE)
2376 if (arm_isr_value (args) == ARM_FT_UNKNOWN)
2378 warning ("`%s' attribute ignored", IDENTIFIER_POINTER (name));
2379 *no_add_attrs = true;
2382 else if (TREE_CODE (*node) == POINTER_TYPE
2383 && (TREE_CODE (TREE_TYPE (*node)) == FUNCTION_TYPE
2384 || TREE_CODE (TREE_TYPE (*node)) == METHOD_TYPE)
2385 && arm_isr_value (args) != ARM_FT_UNKNOWN)
2387 *node = build_type_copy (*node);
2388 TREE_TYPE (*node) = build_type_attribute_variant
2389 (TREE_TYPE (*node),
2390 tree_cons (name, args, TYPE_ATTRIBUTES (TREE_TYPE (*node))));
2391 *no_add_attrs = true;
2393 else
2395 /* Possibly pass this attribute on from the type to a decl. */
2396 if (flags & ((int) ATTR_FLAG_DECL_NEXT
2397 | (int) ATTR_FLAG_FUNCTION_NEXT
2398 | (int) ATTR_FLAG_ARRAY_NEXT))
2400 *no_add_attrs = true;
2401 return tree_cons (name, args, NULL_TREE);
2403 else
2405 warning ("`%s' attribute ignored", IDENTIFIER_POINTER (name));
2410 return NULL_TREE;
2413 /* Return 0 if the attributes for two types are incompatible, 1 if they
2414 are compatible, and 2 if they are nearly compatible (which causes a
2415 warning to be generated). */
2416 static int
2417 arm_comp_type_attributes (tree type1, tree type2)
2419 int l1, l2, s1, s2;
2421 /* Check for mismatch of non-default calling convention. */
2422 if (TREE_CODE (type1) != FUNCTION_TYPE)
2423 return 1;
2425 /* Check for mismatched call attributes. */
2426 l1 = lookup_attribute ("long_call", TYPE_ATTRIBUTES (type1)) != NULL;
2427 l2 = lookup_attribute ("long_call", TYPE_ATTRIBUTES (type2)) != NULL;
2428 s1 = lookup_attribute ("short_call", TYPE_ATTRIBUTES (type1)) != NULL;
2429 s2 = lookup_attribute ("short_call", TYPE_ATTRIBUTES (type2)) != NULL;
2431 /* Only bother to check if an attribute is defined. */
2432 if (l1 | l2 | s1 | s2)
2434 /* If one type has an attribute, the other must have the same attribute. */
2435 if ((l1 != l2) || (s1 != s2))
2436 return 0;
2438 /* Disallow mixed attributes. */
2439 if ((l1 & s2) || (l2 & s1))
2440 return 0;
2443 /* Check for mismatched ISR attribute. */
2444 l1 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type1)) != NULL;
2445 if (! l1)
2446 l1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type1)) != NULL;
2447 l2 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type2)) != NULL;
2448 if (! l2)
2449 l1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type2)) != NULL;
2450 if (l1 != l2)
2451 return 0;
2453 return 1;
2456 /* Encode long_call or short_call attribute by prefixing
2457 symbol name in DECL with a special character FLAG. */
2458 void
2459 arm_encode_call_attribute (tree decl, int flag)
2461 const char * str = XSTR (XEXP (DECL_RTL (decl), 0), 0);
2462 int len = strlen (str);
2463 char * newstr;
2465 /* Do not allow weak functions to be treated as short call. */
2466 if (DECL_WEAK (decl) && flag == SHORT_CALL_FLAG_CHAR)
2467 return;
2469 newstr = alloca (len + 2);
2470 newstr[0] = flag;
2471 strcpy (newstr + 1, str);
2473 newstr = (char *) ggc_alloc_string (newstr, len + 1);
2474 XSTR (XEXP (DECL_RTL (decl), 0), 0) = newstr;
2477 /* Assigns default attributes to newly defined type. This is used to
2478 set short_call/long_call attributes for function types of
2479 functions defined inside corresponding #pragma scopes. */
2480 static void
2481 arm_set_default_type_attributes (tree type)
2483 /* Add __attribute__ ((long_call)) to all functions, when
2484 inside #pragma long_calls or __attribute__ ((short_call)),
2485 when inside #pragma no_long_calls. */
2486 if (TREE_CODE (type) == FUNCTION_TYPE || TREE_CODE (type) == METHOD_TYPE)
2488 tree type_attr_list, attr_name;
2489 type_attr_list = TYPE_ATTRIBUTES (type);
2491 if (arm_pragma_long_calls == LONG)
2492 attr_name = get_identifier ("long_call");
2493 else if (arm_pragma_long_calls == SHORT)
2494 attr_name = get_identifier ("short_call");
2495 else
2496 return;
2498 type_attr_list = tree_cons (attr_name, NULL_TREE, type_attr_list);
2499 TYPE_ATTRIBUTES (type) = type_attr_list;
2503 /* Return 1 if the operand is a SYMBOL_REF for a function known to be
2504 defined within the current compilation unit. If this cannot be
2505 determined, then 0 is returned. */
2506 static int
2507 current_file_function_operand (rtx sym_ref)
2509 /* This is a bit of a fib. A function will have a short call flag
2510 applied to its name if it has the short call attribute, or it has
2511 already been defined within the current compilation unit. */
2512 if (ENCODED_SHORT_CALL_ATTR_P (XSTR (sym_ref, 0)))
2513 return 1;
2515 /* The current function is always defined within the current compilation
2516 unit. if it s a weak definition however, then this may not be the real
2517 definition of the function, and so we have to say no. */
2518 if (sym_ref == XEXP (DECL_RTL (current_function_decl), 0)
2519 && !DECL_WEAK (current_function_decl))
2520 return 1;
2522 /* We cannot make the determination - default to returning 0. */
2523 return 0;
2526 /* Return nonzero if a 32 bit "long_call" should be generated for
2527 this call. We generate a long_call if the function:
2529 a. has an __attribute__((long call))
2530 or b. is within the scope of a #pragma long_calls
2531 or c. the -mlong-calls command line switch has been specified
2533 However we do not generate a long call if the function:
2535 d. has an __attribute__ ((short_call))
2536 or e. is inside the scope of a #pragma no_long_calls
2537 or f. has an __attribute__ ((section))
2538 or g. is defined within the current compilation unit.
2540 This function will be called by C fragments contained in the machine
2541 description file. CALL_REF and CALL_COOKIE correspond to the matched
2542 rtl operands. CALL_SYMBOL is used to distinguish between
2543 two different callers of the function. It is set to 1 in the
2544 "call_symbol" and "call_symbol_value" patterns and to 0 in the "call"
2545 and "call_value" patterns. This is because of the difference in the
2546 SYM_REFs passed by these patterns. */
2548 arm_is_longcall_p (rtx sym_ref, int call_cookie, int call_symbol)
2550 if (!call_symbol)
2552 if (GET_CODE (sym_ref) != MEM)
2553 return 0;
2555 sym_ref = XEXP (sym_ref, 0);
2558 if (GET_CODE (sym_ref) != SYMBOL_REF)
2559 return 0;
2561 if (call_cookie & CALL_SHORT)
2562 return 0;
2564 if (TARGET_LONG_CALLS && flag_function_sections)
2565 return 1;
2567 if (current_file_function_operand (sym_ref))
2568 return 0;
2570 return (call_cookie & CALL_LONG)
2571 || ENCODED_LONG_CALL_ATTR_P (XSTR (sym_ref, 0))
2572 || TARGET_LONG_CALLS;
2575 /* Return nonzero if it is ok to make a tail-call to DECL. */
2576 static bool
2577 arm_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
2579 int call_type = TARGET_LONG_CALLS ? CALL_LONG : CALL_NORMAL;
2581 if (cfun->machine->sibcall_blocked)
2582 return false;
2584 /* Never tailcall something for which we have no decl, or if we
2585 are in Thumb mode. */
2586 if (decl == NULL || TARGET_THUMB)
2587 return false;
2589 /* Get the calling method. */
2590 if (lookup_attribute ("short_call", TYPE_ATTRIBUTES (TREE_TYPE (decl))))
2591 call_type = CALL_SHORT;
2592 else if (lookup_attribute ("long_call", TYPE_ATTRIBUTES (TREE_TYPE (decl))))
2593 call_type = CALL_LONG;
2595 /* Cannot tail-call to long calls, since these are out of range of
2596 a branch instruction. However, if not compiling PIC, we know
2597 we can reach the symbol if it is in this compilation unit. */
2598 if (call_type == CALL_LONG && (flag_pic || !TREE_ASM_WRITTEN (decl)))
2599 return false;
2601 /* If we are interworking and the function is not declared static
2602 then we can't tail-call it unless we know that it exists in this
2603 compilation unit (since it might be a Thumb routine). */
2604 if (TARGET_INTERWORK && TREE_PUBLIC (decl) && !TREE_ASM_WRITTEN (decl))
2605 return false;
2607 /* Never tailcall from an ISR routine - it needs a special exit sequence. */
2608 if (IS_INTERRUPT (arm_current_func_type ()))
2609 return false;
2611 /* Everything else is ok. */
2612 return true;
2616 /* Addressing mode support functions. */
2618 /* Return nonzero if X is a legitimate immediate operand when compiling
2619 for PIC. */
2621 legitimate_pic_operand_p (rtx x)
2623 if (CONSTANT_P (x)
2624 && flag_pic
2625 && (GET_CODE (x) == SYMBOL_REF
2626 || (GET_CODE (x) == CONST
2627 && GET_CODE (XEXP (x, 0)) == PLUS
2628 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF)))
2629 return 0;
2631 return 1;
2635 legitimize_pic_address (rtx orig, enum machine_mode mode, rtx reg)
2637 if (GET_CODE (orig) == SYMBOL_REF
2638 || GET_CODE (orig) == LABEL_REF)
2640 #ifndef AOF_ASSEMBLER
2641 rtx pic_ref, address;
2642 #endif
2643 rtx insn;
2644 int subregs = 0;
2646 if (reg == 0)
2648 if (no_new_pseudos)
2649 abort ();
2650 else
2651 reg = gen_reg_rtx (Pmode);
2653 subregs = 1;
2656 #ifdef AOF_ASSEMBLER
2657 /* The AOF assembler can generate relocations for these directly, and
2658 understands that the PIC register has to be added into the offset. */
2659 insn = emit_insn (gen_pic_load_addr_based (reg, orig));
2660 #else
2661 if (subregs)
2662 address = gen_reg_rtx (Pmode);
2663 else
2664 address = reg;
2666 if (TARGET_ARM)
2667 emit_insn (gen_pic_load_addr_arm (address, orig));
2668 else
2669 emit_insn (gen_pic_load_addr_thumb (address, orig));
2671 if ((GET_CODE (orig) == LABEL_REF
2672 || (GET_CODE (orig) == SYMBOL_REF &&
2673 SYMBOL_REF_LOCAL_P (orig)))
2674 && NEED_GOT_RELOC)
2675 pic_ref = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, address);
2676 else
2678 pic_ref = gen_rtx_MEM (Pmode,
2679 gen_rtx_PLUS (Pmode, pic_offset_table_rtx,
2680 address));
2681 RTX_UNCHANGING_P (pic_ref) = 1;
2684 insn = emit_move_insn (reg, pic_ref);
2685 #endif
2686 current_function_uses_pic_offset_table = 1;
2687 /* Put a REG_EQUAL note on this insn, so that it can be optimized
2688 by loop. */
2689 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EQUAL, orig,
2690 REG_NOTES (insn));
2691 return reg;
2693 else if (GET_CODE (orig) == CONST)
2695 rtx base, offset;
2697 if (GET_CODE (XEXP (orig, 0)) == PLUS
2698 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
2699 return orig;
2701 if (reg == 0)
2703 if (no_new_pseudos)
2704 abort ();
2705 else
2706 reg = gen_reg_rtx (Pmode);
2709 if (GET_CODE (XEXP (orig, 0)) == PLUS)
2711 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
2712 offset = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
2713 base == reg ? 0 : reg);
2715 else
2716 abort ();
2718 if (GET_CODE (offset) == CONST_INT)
2720 /* The base register doesn't really matter, we only want to
2721 test the index for the appropriate mode. */
2722 if (!arm_legitimate_index_p (mode, offset, SET, 0))
2724 if (!no_new_pseudos)
2725 offset = force_reg (Pmode, offset);
2726 else
2727 abort ();
2730 if (GET_CODE (offset) == CONST_INT)
2731 return plus_constant (base, INTVAL (offset));
2734 if (GET_MODE_SIZE (mode) > 4
2735 && (GET_MODE_CLASS (mode) == MODE_INT
2736 || TARGET_SOFT_FLOAT))
2738 emit_insn (gen_addsi3 (reg, base, offset));
2739 return reg;
2742 return gen_rtx_PLUS (Pmode, base, offset);
2745 return orig;
2748 /* Generate code to load the PIC register. PROLOGUE is true if
2749 called from arm_expand_prologue (in which case we want the
2750 generated insns at the start of the function); false if called
2751 by an exception receiver that needs the PIC register reloaded
2752 (in which case the insns are just dumped at the current location). */
2753 void
2754 arm_finalize_pic (int prologue ATTRIBUTE_UNUSED)
2756 #ifndef AOF_ASSEMBLER
2757 rtx l1, pic_tmp, pic_tmp2, seq, pic_rtx;
2758 rtx global_offset_table;
2760 if (current_function_uses_pic_offset_table == 0 || TARGET_SINGLE_PIC_BASE)
2761 return;
2763 if (!flag_pic)
2764 abort ();
2766 start_sequence ();
2767 l1 = gen_label_rtx ();
2769 global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
2770 /* On the ARM the PC register contains 'dot + 8' at the time of the
2771 addition, on the Thumb it is 'dot + 4'. */
2772 pic_tmp = plus_constant (gen_rtx_LABEL_REF (Pmode, l1), TARGET_ARM ? 8 : 4);
2773 if (GOT_PCREL)
2774 pic_tmp2 = gen_rtx_CONST (VOIDmode,
2775 gen_rtx_PLUS (Pmode, global_offset_table, pc_rtx));
2776 else
2777 pic_tmp2 = gen_rtx_CONST (VOIDmode, global_offset_table);
2779 pic_rtx = gen_rtx_CONST (Pmode, gen_rtx_MINUS (Pmode, pic_tmp2, pic_tmp));
2781 if (TARGET_ARM)
2783 emit_insn (gen_pic_load_addr_arm (pic_offset_table_rtx, pic_rtx));
2784 emit_insn (gen_pic_add_dot_plus_eight (pic_offset_table_rtx, l1));
2786 else
2788 emit_insn (gen_pic_load_addr_thumb (pic_offset_table_rtx, pic_rtx));
2789 emit_insn (gen_pic_add_dot_plus_four (pic_offset_table_rtx, l1));
2792 seq = get_insns ();
2793 end_sequence ();
2794 if (prologue)
2795 emit_insn_after (seq, get_insns ());
2796 else
2797 emit_insn (seq);
2799 /* Need to emit this whether or not we obey regdecls,
2800 since setjmp/longjmp can cause life info to screw up. */
2801 emit_insn (gen_rtx_USE (VOIDmode, pic_offset_table_rtx));
2802 #endif /* AOF_ASSEMBLER */
2805 /* Return nonzero if X is valid as an ARM state addressing register. */
2806 static int
2807 arm_address_register_rtx_p (rtx x, int strict_p)
2809 int regno;
2811 if (GET_CODE (x) != REG)
2812 return 0;
2814 regno = REGNO (x);
2816 if (strict_p)
2817 return ARM_REGNO_OK_FOR_BASE_P (regno);
2819 return (regno <= LAST_ARM_REGNUM
2820 || regno >= FIRST_PSEUDO_REGISTER
2821 || regno == FRAME_POINTER_REGNUM
2822 || regno == ARG_POINTER_REGNUM);
2825 /* Return nonzero if X is a valid ARM state address operand. */
2827 arm_legitimate_address_p (enum machine_mode mode, rtx x, RTX_CODE outer,
2828 int strict_p)
2830 if (arm_address_register_rtx_p (x, strict_p))
2831 return 1;
2833 else if (GET_CODE (x) == POST_INC || GET_CODE (x) == PRE_DEC)
2834 return arm_address_register_rtx_p (XEXP (x, 0), strict_p);
2836 else if ((GET_CODE (x) == POST_MODIFY || GET_CODE (x) == PRE_MODIFY)
2837 && GET_MODE_SIZE (mode) <= 4
2838 && arm_address_register_rtx_p (XEXP (x, 0), strict_p)
2839 && GET_CODE (XEXP (x, 1)) == PLUS
2840 && XEXP (XEXP (x, 1), 0) == XEXP (x, 0))
2841 return arm_legitimate_index_p (mode, XEXP (XEXP (x, 1), 1), outer,
2842 strict_p);
2844 /* After reload constants split into minipools will have addresses
2845 from a LABEL_REF. */
2846 else if (reload_completed
2847 && (GET_CODE (x) == LABEL_REF
2848 || (GET_CODE (x) == CONST
2849 && GET_CODE (XEXP (x, 0)) == PLUS
2850 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
2851 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
2852 return 1;
2854 else if (mode == TImode)
2855 return 0;
2857 else if (mode == DImode || (TARGET_SOFT_FLOAT && mode == DFmode))
2859 if (GET_CODE (x) == PLUS
2860 && arm_address_register_rtx_p (XEXP (x, 0), strict_p)
2861 && GET_CODE (XEXP (x, 1)) == CONST_INT)
2863 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
2865 if (val == 4 || val == -4 || val == -8)
2866 return 1;
2870 else if (TARGET_HARD_FLOAT && TARGET_VFP && mode == DFmode)
2872 if (GET_CODE (x) == PLUS
2873 && arm_address_register_rtx_p (XEXP (x, 0), strict_p)
2874 && GET_CODE (XEXP (x, 1)) == CONST_INT)
2876 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
2878 /* ??? valid arm offsets are a subset of VFP offsets.
2879 For now only allow this subset. Proper fix is to add an
2880 additional memory constraint for arm address modes.
2881 Alternatively allow full vfp addressing and let
2882 output_move_double fix it up with a sub-optimal sequence. */
2883 if (val == 4 || val == -4 || val == -8)
2884 return 1;
2888 else if (GET_CODE (x) == PLUS)
2890 rtx xop0 = XEXP (x, 0);
2891 rtx xop1 = XEXP (x, 1);
2893 return ((arm_address_register_rtx_p (xop0, strict_p)
2894 && arm_legitimate_index_p (mode, xop1, outer, strict_p))
2895 || (arm_address_register_rtx_p (xop1, strict_p)
2896 && arm_legitimate_index_p (mode, xop0, outer, strict_p)));
2899 #if 0
2900 /* Reload currently can't handle MINUS, so disable this for now */
2901 else if (GET_CODE (x) == MINUS)
2903 rtx xop0 = XEXP (x, 0);
2904 rtx xop1 = XEXP (x, 1);
2906 return (arm_address_register_rtx_p (xop0, strict_p)
2907 && arm_legitimate_index_p (mode, xop1, outer, strict_p));
2909 #endif
2911 else if (GET_MODE_CLASS (mode) != MODE_FLOAT
2912 && GET_CODE (x) == SYMBOL_REF
2913 && CONSTANT_POOL_ADDRESS_P (x)
2914 && ! (flag_pic
2915 && symbol_mentioned_p (get_pool_constant (x))))
2916 return 1;
2918 else if ((GET_CODE (x) == PRE_INC || GET_CODE (x) == POST_DEC)
2919 && (GET_MODE_SIZE (mode) <= 4)
2920 && arm_address_register_rtx_p (XEXP (x, 0), strict_p))
2921 return 1;
2923 return 0;
2926 /* Return nonzero if INDEX is valid for an address index operand in
2927 ARM state. */
2928 static int
2929 arm_legitimate_index_p (enum machine_mode mode, rtx index, RTX_CODE outer,
2930 int strict_p)
2932 HOST_WIDE_INT range;
2933 enum rtx_code code = GET_CODE (index);
2935 if (TARGET_HARD_FLOAT && TARGET_FPA && GET_MODE_CLASS (mode) == MODE_FLOAT)
2936 return (code == CONST_INT && INTVAL (index) < 1024
2937 && INTVAL (index) > -1024
2938 && (INTVAL (index) & 3) == 0);
2940 if (TARGET_HARD_FLOAT && TARGET_MAVERICK
2941 && (GET_MODE_CLASS (mode) == MODE_FLOAT || mode == DImode))
2942 return (code == CONST_INT
2943 && INTVAL (index) < 255
2944 && INTVAL (index) > -255);
2946 if (arm_address_register_rtx_p (index, strict_p)
2947 && GET_MODE_SIZE (mode) <= 4)
2948 return 1;
2950 if (TARGET_REALLY_IWMMXT && VALID_IWMMXT_REG_MODE (mode))
2951 return (code == CONST_INT
2952 && INTVAL (index) < 256
2953 && INTVAL (index) > -256);
2955 if (GET_MODE_SIZE (mode) <= 4
2956 && ! (arm_arch4
2957 && (mode == HImode
2958 || (mode == QImode && outer == SIGN_EXTEND))))
2960 if (code == MULT)
2962 rtx xiop0 = XEXP (index, 0);
2963 rtx xiop1 = XEXP (index, 1);
2965 return ((arm_address_register_rtx_p (xiop0, strict_p)
2966 && power_of_two_operand (xiop1, SImode))
2967 || (arm_address_register_rtx_p (xiop1, strict_p)
2968 && power_of_two_operand (xiop0, SImode)));
2970 else if (code == LSHIFTRT || code == ASHIFTRT
2971 || code == ASHIFT || code == ROTATERT)
2973 rtx op = XEXP (index, 1);
2975 return (arm_address_register_rtx_p (XEXP (index, 0), strict_p)
2976 && GET_CODE (op) == CONST_INT
2977 && INTVAL (op) > 0
2978 && INTVAL (op) <= 31);
2982 /* For ARM v4 we may be doing a sign-extend operation during the
2983 load. */
2984 if (arm_arch4)
2986 if (mode == HImode || (outer == SIGN_EXTEND && mode == QImode))
2987 range = 256;
2988 else
2989 range = 4096;
2991 else
2992 range = (mode == HImode) ? 4095 : 4096;
2994 return (code == CONST_INT
2995 && INTVAL (index) < range
2996 && INTVAL (index) > -range);
2999 /* Return nonzero if X is valid as a Thumb state base register. */
3000 static int
3001 thumb_base_register_rtx_p (rtx x, enum machine_mode mode, int strict_p)
3003 int regno;
3005 if (GET_CODE (x) != REG)
3006 return 0;
3008 regno = REGNO (x);
3010 if (strict_p)
3011 return THUMB_REGNO_MODE_OK_FOR_BASE_P (regno, mode);
3013 return (regno <= LAST_LO_REGNUM
3014 || regno > LAST_VIRTUAL_REGISTER
3015 || regno == FRAME_POINTER_REGNUM
3016 || (GET_MODE_SIZE (mode) >= 4
3017 && (regno == STACK_POINTER_REGNUM
3018 || regno >= FIRST_PSEUDO_REGISTER
3019 || x == hard_frame_pointer_rtx
3020 || x == arg_pointer_rtx)));
3023 /* Return nonzero if x is a legitimate index register. This is the case
3024 for any base register that can access a QImode object. */
3025 inline static int
3026 thumb_index_register_rtx_p (rtx x, int strict_p)
3028 return thumb_base_register_rtx_p (x, QImode, strict_p);
3031 /* Return nonzero if x is a legitimate Thumb-state address.
3033 The AP may be eliminated to either the SP or the FP, so we use the
3034 least common denominator, e.g. SImode, and offsets from 0 to 64.
3036 ??? Verify whether the above is the right approach.
3038 ??? Also, the FP may be eliminated to the SP, so perhaps that
3039 needs special handling also.
3041 ??? Look at how the mips16 port solves this problem. It probably uses
3042 better ways to solve some of these problems.
3044 Although it is not incorrect, we don't accept QImode and HImode
3045 addresses based on the frame pointer or arg pointer until the
3046 reload pass starts. This is so that eliminating such addresses
3047 into stack based ones won't produce impossible code. */
3049 thumb_legitimate_address_p (enum machine_mode mode, rtx x, int strict_p)
3051 /* ??? Not clear if this is right. Experiment. */
3052 if (GET_MODE_SIZE (mode) < 4
3053 && !(reload_in_progress || reload_completed)
3054 && (reg_mentioned_p (frame_pointer_rtx, x)
3055 || reg_mentioned_p (arg_pointer_rtx, x)
3056 || reg_mentioned_p (virtual_incoming_args_rtx, x)
3057 || reg_mentioned_p (virtual_outgoing_args_rtx, x)
3058 || reg_mentioned_p (virtual_stack_dynamic_rtx, x)
3059 || reg_mentioned_p (virtual_stack_vars_rtx, x)))
3060 return 0;
3062 /* Accept any base register. SP only in SImode or larger. */
3063 else if (thumb_base_register_rtx_p (x, mode, strict_p))
3064 return 1;
3066 /* This is PC relative data before arm_reorg runs. */
3067 else if (GET_MODE_SIZE (mode) >= 4 && CONSTANT_P (x)
3068 && GET_CODE (x) == SYMBOL_REF
3069 && CONSTANT_POOL_ADDRESS_P (x) && ! flag_pic)
3070 return 1;
3072 /* This is PC relative data after arm_reorg runs. */
3073 else if (GET_MODE_SIZE (mode) >= 4 && reload_completed
3074 && (GET_CODE (x) == LABEL_REF
3075 || (GET_CODE (x) == CONST
3076 && GET_CODE (XEXP (x, 0)) == PLUS
3077 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
3078 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
3079 return 1;
3081 /* Post-inc indexing only supported for SImode and larger. */
3082 else if (GET_CODE (x) == POST_INC && GET_MODE_SIZE (mode) >= 4
3083 && thumb_index_register_rtx_p (XEXP (x, 0), strict_p))
3084 return 1;
3086 else if (GET_CODE (x) == PLUS)
3088 /* REG+REG address can be any two index registers. */
3089 /* We disallow FRAME+REG addressing since we know that FRAME
3090 will be replaced with STACK, and SP relative addressing only
3091 permits SP+OFFSET. */
3092 if (GET_MODE_SIZE (mode) <= 4
3093 && XEXP (x, 0) != frame_pointer_rtx
3094 && XEXP (x, 1) != frame_pointer_rtx
3095 && thumb_index_register_rtx_p (XEXP (x, 0), strict_p)
3096 && thumb_index_register_rtx_p (XEXP (x, 1), strict_p))
3097 return 1;
3099 /* REG+const has 5-7 bit offset for non-SP registers. */
3100 else if ((thumb_index_register_rtx_p (XEXP (x, 0), strict_p)
3101 || XEXP (x, 0) == arg_pointer_rtx)
3102 && GET_CODE (XEXP (x, 1)) == CONST_INT
3103 && thumb_legitimate_offset_p (mode, INTVAL (XEXP (x, 1))))
3104 return 1;
3106 /* REG+const has 10 bit offset for SP, but only SImode and
3107 larger is supported. */
3108 /* ??? Should probably check for DI/DFmode overflow here
3109 just like GO_IF_LEGITIMATE_OFFSET does. */
3110 else if (GET_CODE (XEXP (x, 0)) == REG
3111 && REGNO (XEXP (x, 0)) == STACK_POINTER_REGNUM
3112 && GET_MODE_SIZE (mode) >= 4
3113 && GET_CODE (XEXP (x, 1)) == CONST_INT
3114 && INTVAL (XEXP (x, 1)) >= 0
3115 && INTVAL (XEXP (x, 1)) + GET_MODE_SIZE (mode) <= 1024
3116 && (INTVAL (XEXP (x, 1)) & 3) == 0)
3117 return 1;
3119 else if (GET_CODE (XEXP (x, 0)) == REG
3120 && REGNO (XEXP (x, 0)) == FRAME_POINTER_REGNUM
3121 && GET_MODE_SIZE (mode) >= 4
3122 && GET_CODE (XEXP (x, 1)) == CONST_INT
3123 && (INTVAL (XEXP (x, 1)) & 3) == 0)
3124 return 1;
3127 else if (GET_MODE_CLASS (mode) != MODE_FLOAT
3128 && GET_MODE_SIZE (mode) == 4
3129 && GET_CODE (x) == SYMBOL_REF
3130 && CONSTANT_POOL_ADDRESS_P (x)
3131 && !(flag_pic
3132 && symbol_mentioned_p (get_pool_constant (x))))
3133 return 1;
3135 return 0;
3138 /* Return nonzero if VAL can be used as an offset in a Thumb-state address
3139 instruction of mode MODE. */
3141 thumb_legitimate_offset_p (enum machine_mode mode, HOST_WIDE_INT val)
3143 switch (GET_MODE_SIZE (mode))
3145 case 1:
3146 return val >= 0 && val < 32;
3148 case 2:
3149 return val >= 0 && val < 64 && (val & 1) == 0;
3151 default:
3152 return (val >= 0
3153 && (val + GET_MODE_SIZE (mode)) <= 128
3154 && (val & 3) == 0);
3158 /* Try machine-dependent ways of modifying an illegitimate address
3159 to be legitimate. If we find one, return the new, valid address. */
3161 arm_legitimize_address (rtx x, rtx orig_x, enum machine_mode mode)
3163 if (GET_CODE (x) == PLUS)
3165 rtx xop0 = XEXP (x, 0);
3166 rtx xop1 = XEXP (x, 1);
3168 if (CONSTANT_P (xop0) && !symbol_mentioned_p (xop0))
3169 xop0 = force_reg (SImode, xop0);
3171 if (CONSTANT_P (xop1) && !symbol_mentioned_p (xop1))
3172 xop1 = force_reg (SImode, xop1);
3174 if (ARM_BASE_REGISTER_RTX_P (xop0)
3175 && GET_CODE (xop1) == CONST_INT)
3177 HOST_WIDE_INT n, low_n;
3178 rtx base_reg, val;
3179 n = INTVAL (xop1);
3181 /* VFP addressing modes actually allow greater offsets, but for
3182 now we just stick with the lowest common denominator. */
3183 if (mode == DImode
3184 || ((TARGET_SOFT_FLOAT || TARGET_VFP) && mode == DFmode))
3186 low_n = n & 0x0f;
3187 n &= ~0x0f;
3188 if (low_n > 4)
3190 n += 16;
3191 low_n -= 16;
3194 else
3196 low_n = ((mode) == TImode ? 0
3197 : n >= 0 ? (n & 0xfff) : -((-n) & 0xfff));
3198 n -= low_n;
3201 base_reg = gen_reg_rtx (SImode);
3202 val = force_operand (gen_rtx_PLUS (SImode, xop0,
3203 GEN_INT (n)), NULL_RTX);
3204 emit_move_insn (base_reg, val);
3205 x = (low_n == 0 ? base_reg
3206 : gen_rtx_PLUS (SImode, base_reg, GEN_INT (low_n)));
3208 else if (xop0 != XEXP (x, 0) || xop1 != XEXP (x, 1))
3209 x = gen_rtx_PLUS (SImode, xop0, xop1);
3212 /* XXX We don't allow MINUS any more -- see comment in
3213 arm_legitimate_address_p (). */
3214 else if (GET_CODE (x) == MINUS)
3216 rtx xop0 = XEXP (x, 0);
3217 rtx xop1 = XEXP (x, 1);
3219 if (CONSTANT_P (xop0))
3220 xop0 = force_reg (SImode, xop0);
3222 if (CONSTANT_P (xop1) && ! symbol_mentioned_p (xop1))
3223 xop1 = force_reg (SImode, xop1);
3225 if (xop0 != XEXP (x, 0) || xop1 != XEXP (x, 1))
3226 x = gen_rtx_MINUS (SImode, xop0, xop1);
3229 if (flag_pic)
3231 /* We need to find and carefully transform any SYMBOL and LABEL
3232 references; so go back to the original address expression. */
3233 rtx new_x = legitimize_pic_address (orig_x, mode, NULL_RTX);
3235 if (new_x != orig_x)
3236 x = new_x;
3239 return x;
3243 /* Try machine-dependent ways of modifying an illegitimate Thumb address
3244 to be legitimate. If we find one, return the new, valid address. */
3246 thumb_legitimize_address (rtx x, rtx orig_x, enum machine_mode mode)
3248 if (GET_CODE (x) == PLUS
3249 && GET_CODE (XEXP (x, 1)) == CONST_INT
3250 && (INTVAL (XEXP (x, 1)) >= 32 * GET_MODE_SIZE (mode)
3251 || INTVAL (XEXP (x, 1)) < 0))
3253 rtx xop0 = XEXP (x, 0);
3254 rtx xop1 = XEXP (x, 1);
3255 HOST_WIDE_INT offset = INTVAL (xop1);
3257 /* Try and fold the offset into a biasing of the base register and
3258 then offsetting that. Don't do this when optimizing for space
3259 since it can cause too many CSEs. */
3260 if (optimize_size && offset >= 0
3261 && offset < 256 + 31 * GET_MODE_SIZE (mode))
3263 HOST_WIDE_INT delta;
3265 if (offset >= 256)
3266 delta = offset - (256 - GET_MODE_SIZE (mode));
3267 else if (offset < 32 * GET_MODE_SIZE (mode) + 8)
3268 delta = 31 * GET_MODE_SIZE (mode);
3269 else
3270 delta = offset & (~31 * GET_MODE_SIZE (mode));
3272 xop0 = force_operand (plus_constant (xop0, offset - delta),
3273 NULL_RTX);
3274 x = plus_constant (xop0, delta);
3276 else if (offset < 0 && offset > -256)
3277 /* Small negative offsets are best done with a subtract before the
3278 dereference, forcing these into a register normally takes two
3279 instructions. */
3280 x = force_operand (x, NULL_RTX);
3281 else
3283 /* For the remaining cases, force the constant into a register. */
3284 xop1 = force_reg (SImode, xop1);
3285 x = gen_rtx_PLUS (SImode, xop0, xop1);
3288 else if (GET_CODE (x) == PLUS
3289 && s_register_operand (XEXP (x, 1), SImode)
3290 && !s_register_operand (XEXP (x, 0), SImode))
3292 rtx xop0 = force_operand (XEXP (x, 0), NULL_RTX);
3294 x = gen_rtx_PLUS (SImode, xop0, XEXP (x, 1));
3297 if (flag_pic)
3299 /* We need to find and carefully transform any SYMBOL and LABEL
3300 references; so go back to the original address expression. */
3301 rtx new_x = legitimize_pic_address (orig_x, mode, NULL_RTX);
3303 if (new_x != orig_x)
3304 x = new_x;
3307 return x;
3312 #define REG_OR_SUBREG_REG(X) \
3313 (GET_CODE (X) == REG \
3314 || (GET_CODE (X) == SUBREG && GET_CODE (SUBREG_REG (X)) == REG))
3316 #define REG_OR_SUBREG_RTX(X) \
3317 (GET_CODE (X) == REG ? (X) : SUBREG_REG (X))
3319 #ifndef COSTS_N_INSNS
3320 #define COSTS_N_INSNS(N) ((N) * 4 - 2)
3321 #endif
3322 static inline int
3323 thumb_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer)
3325 enum machine_mode mode = GET_MODE (x);
3327 switch (code)
3329 case ASHIFT:
3330 case ASHIFTRT:
3331 case LSHIFTRT:
3332 case ROTATERT:
3333 case PLUS:
3334 case MINUS:
3335 case COMPARE:
3336 case NEG:
3337 case NOT:
3338 return COSTS_N_INSNS (1);
3340 case MULT:
3341 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
3343 int cycles = 0;
3344 unsigned HOST_WIDE_INT i = INTVAL (XEXP (x, 1));
3346 while (i)
3348 i >>= 2;
3349 cycles++;
3351 return COSTS_N_INSNS (2) + cycles;
3353 return COSTS_N_INSNS (1) + 16;
3355 case SET:
3356 return (COSTS_N_INSNS (1)
3357 + 4 * ((GET_CODE (SET_SRC (x)) == MEM)
3358 + GET_CODE (SET_DEST (x)) == MEM));
3360 case CONST_INT:
3361 if (outer == SET)
3363 if ((unsigned HOST_WIDE_INT) INTVAL (x) < 256)
3364 return 0;
3365 if (thumb_shiftable_const (INTVAL (x)))
3366 return COSTS_N_INSNS (2);
3367 return COSTS_N_INSNS (3);
3369 else if ((outer == PLUS || outer == COMPARE)
3370 && INTVAL (x) < 256 && INTVAL (x) > -256)
3371 return 0;
3372 else if (outer == AND
3373 && INTVAL (x) < 256 && INTVAL (x) >= -256)
3374 return COSTS_N_INSNS (1);
3375 else if (outer == ASHIFT || outer == ASHIFTRT
3376 || outer == LSHIFTRT)
3377 return 0;
3378 return COSTS_N_INSNS (2);
3380 case CONST:
3381 case CONST_DOUBLE:
3382 case LABEL_REF:
3383 case SYMBOL_REF:
3384 return COSTS_N_INSNS (3);
3386 case UDIV:
3387 case UMOD:
3388 case DIV:
3389 case MOD:
3390 return 100;
3392 case TRUNCATE:
3393 return 99;
3395 case AND:
3396 case XOR:
3397 case IOR:
3398 /* XXX guess. */
3399 return 8;
3401 case ADDRESSOF:
3402 case MEM:
3403 /* XXX another guess. */
3404 /* Memory costs quite a lot for the first word, but subsequent words
3405 load at the equivalent of a single insn each. */
3406 return (10 + 4 * ((GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD)
3407 + ((GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
3408 ? 4 : 0));
3410 case IF_THEN_ELSE:
3411 /* XXX a guess. */
3412 if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
3413 return 14;
3414 return 2;
3416 case ZERO_EXTEND:
3417 /* XXX still guessing. */
3418 switch (GET_MODE (XEXP (x, 0)))
3420 case QImode:
3421 return (1 + (mode == DImode ? 4 : 0)
3422 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3424 case HImode:
3425 return (4 + (mode == DImode ? 4 : 0)
3426 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3428 case SImode:
3429 return (1 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3431 default:
3432 return 99;
3435 default:
3436 return 99;
3441 /* Worker routine for arm_rtx_costs. */
3442 static inline int
3443 arm_rtx_costs_1 (rtx x, enum rtx_code code, enum rtx_code outer)
3445 enum machine_mode mode = GET_MODE (x);
3446 enum rtx_code subcode;
3447 int extra_cost;
3449 switch (code)
3451 case MEM:
3452 /* Memory costs quite a lot for the first word, but subsequent words
3453 load at the equivalent of a single insn each. */
3454 return (10 + 4 * ((GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD)
3455 + (GET_CODE (x) == SYMBOL_REF
3456 && CONSTANT_POOL_ADDRESS_P (x) ? 4 : 0));
3458 case DIV:
3459 case MOD:
3460 case UDIV:
3461 case UMOD:
3462 return optimize_size ? COSTS_N_INSNS (2) : 100;
3464 case ROTATE:
3465 if (mode == SImode && GET_CODE (XEXP (x, 1)) == REG)
3466 return 4;
3467 /* Fall through */
3468 case ROTATERT:
3469 if (mode != SImode)
3470 return 8;
3471 /* Fall through */
3472 case ASHIFT: case LSHIFTRT: case ASHIFTRT:
3473 if (mode == DImode)
3474 return (8 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : 8)
3475 + ((GET_CODE (XEXP (x, 0)) == REG
3476 || (GET_CODE (XEXP (x, 0)) == SUBREG
3477 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG))
3478 ? 0 : 8));
3479 return (1 + ((GET_CODE (XEXP (x, 0)) == REG
3480 || (GET_CODE (XEXP (x, 0)) == SUBREG
3481 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG))
3482 ? 0 : 4)
3483 + ((GET_CODE (XEXP (x, 1)) == REG
3484 || (GET_CODE (XEXP (x, 1)) == SUBREG
3485 && GET_CODE (SUBREG_REG (XEXP (x, 1))) == REG)
3486 || (GET_CODE (XEXP (x, 1)) == CONST_INT))
3487 ? 0 : 4));
3489 case MINUS:
3490 if (mode == DImode)
3491 return (4 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 8)
3492 + ((REG_OR_SUBREG_REG (XEXP (x, 0))
3493 || (GET_CODE (XEXP (x, 0)) == CONST_INT
3494 && const_ok_for_arm (INTVAL (XEXP (x, 0)))))
3495 ? 0 : 8));
3497 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
3498 return (2 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
3499 || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
3500 && arm_const_double_rtx (XEXP (x, 1))))
3501 ? 0 : 8)
3502 + ((REG_OR_SUBREG_REG (XEXP (x, 0))
3503 || (GET_CODE (XEXP (x, 0)) == CONST_DOUBLE
3504 && arm_const_double_rtx (XEXP (x, 0))))
3505 ? 0 : 8));
3507 if (((GET_CODE (XEXP (x, 0)) == CONST_INT
3508 && const_ok_for_arm (INTVAL (XEXP (x, 0)))
3509 && REG_OR_SUBREG_REG (XEXP (x, 1))))
3510 || (((subcode = GET_CODE (XEXP (x, 1))) == ASHIFT
3511 || subcode == ASHIFTRT || subcode == LSHIFTRT
3512 || subcode == ROTATE || subcode == ROTATERT
3513 || (subcode == MULT
3514 && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
3515 && ((INTVAL (XEXP (XEXP (x, 1), 1)) &
3516 (INTVAL (XEXP (XEXP (x, 1), 1)) - 1)) == 0)))
3517 && REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 0))
3518 && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 1))
3519 || GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT)
3520 && REG_OR_SUBREG_REG (XEXP (x, 0))))
3521 return 1;
3522 /* Fall through */
3524 case PLUS:
3525 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
3526 return (2 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8)
3527 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
3528 || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
3529 && arm_const_double_rtx (XEXP (x, 1))))
3530 ? 0 : 8));
3532 /* Fall through */
3533 case AND: case XOR: case IOR:
3534 extra_cost = 0;
3536 /* Normally the frame registers will be spilt into reg+const during
3537 reload, so it is a bad idea to combine them with other instructions,
3538 since then they might not be moved outside of loops. As a compromise
3539 we allow integration with ops that have a constant as their second
3540 operand. */
3541 if ((REG_OR_SUBREG_REG (XEXP (x, 0))
3542 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0)))
3543 && GET_CODE (XEXP (x, 1)) != CONST_INT)
3544 || (REG_OR_SUBREG_REG (XEXP (x, 0))
3545 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0)))))
3546 extra_cost = 4;
3548 if (mode == DImode)
3549 return (4 + extra_cost + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8)
3550 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
3551 || (GET_CODE (XEXP (x, 1)) == CONST_INT
3552 && const_ok_for_op (INTVAL (XEXP (x, 1)), code)))
3553 ? 0 : 8));
3555 if (REG_OR_SUBREG_REG (XEXP (x, 0)))
3556 return (1 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : extra_cost)
3557 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
3558 || (GET_CODE (XEXP (x, 1)) == CONST_INT
3559 && const_ok_for_op (INTVAL (XEXP (x, 1)), code)))
3560 ? 0 : 4));
3562 else if (REG_OR_SUBREG_REG (XEXP (x, 1)))
3563 return (1 + extra_cost
3564 + ((((subcode = GET_CODE (XEXP (x, 0))) == ASHIFT
3565 || subcode == LSHIFTRT || subcode == ASHIFTRT
3566 || subcode == ROTATE || subcode == ROTATERT
3567 || (subcode == MULT
3568 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
3569 && ((INTVAL (XEXP (XEXP (x, 0), 1)) &
3570 (INTVAL (XEXP (XEXP (x, 0), 1)) - 1)) == 0)))
3571 && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 0)))
3572 && ((REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 1)))
3573 || GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT))
3574 ? 0 : 4));
3576 return 8;
3578 case MULT:
3579 /* This should have been handled by the CPU specific routines. */
3580 abort ();
3582 case TRUNCATE:
3583 if (arm_arch3m && mode == SImode
3584 && GET_CODE (XEXP (x, 0)) == LSHIFTRT
3585 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
3586 && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0))
3587 == GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)))
3588 && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == ZERO_EXTEND
3589 || GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == SIGN_EXTEND))
3590 return 8;
3591 return 99;
3593 case NEG:
3594 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
3595 return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 6);
3596 /* Fall through */
3597 case NOT:
3598 if (mode == DImode)
3599 return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4);
3601 return 1 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4);
3603 case IF_THEN_ELSE:
3604 if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
3605 return 14;
3606 return 2;
3608 case COMPARE:
3609 return 1;
3611 case ABS:
3612 return 4 + (mode == DImode ? 4 : 0);
3614 case SIGN_EXTEND:
3615 if (GET_MODE (XEXP (x, 0)) == QImode)
3616 return (4 + (mode == DImode ? 4 : 0)
3617 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3618 /* Fall through */
3619 case ZERO_EXTEND:
3620 switch (GET_MODE (XEXP (x, 0)))
3622 case QImode:
3623 return (1 + (mode == DImode ? 4 : 0)
3624 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3626 case HImode:
3627 return (4 + (mode == DImode ? 4 : 0)
3628 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3630 case SImode:
3631 return (1 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
3633 case V8QImode:
3634 case V4HImode:
3635 case V2SImode:
3636 case V4QImode:
3637 case V2HImode:
3638 return 1;
3640 default:
3641 break;
3643 abort ();
3645 case CONST_INT:
3646 if (const_ok_for_arm (INTVAL (x)))
3647 return outer == SET ? 2 : -1;
3648 else if (outer == AND
3649 && const_ok_for_arm (~INTVAL (x)))
3650 return -1;
3651 else if ((outer == COMPARE
3652 || outer == PLUS || outer == MINUS)
3653 && const_ok_for_arm (-INTVAL (x)))
3654 return -1;
3655 else
3656 return 5;
3658 case CONST:
3659 case LABEL_REF:
3660 case SYMBOL_REF:
3661 return 6;
3663 case CONST_DOUBLE:
3664 if (arm_const_double_rtx (x))
3665 return outer == SET ? 2 : -1;
3666 else if ((outer == COMPARE || outer == PLUS)
3667 && neg_const_double_rtx_ok_for_fpa (x))
3668 return -1;
3669 return 7;
3671 default:
3672 return 99;
3676 /* RTX costs for cores with a slow MUL implementation. */
3678 static bool
3679 arm_slowmul_rtx_costs (rtx x, int code, int outer_code, int *total)
3681 enum machine_mode mode = GET_MODE (x);
3683 if (TARGET_THUMB)
3685 *total = thumb_rtx_costs (x, code, outer_code);
3686 return true;
3689 switch (code)
3691 case MULT:
3692 if (GET_MODE_CLASS (mode) == MODE_FLOAT
3693 || mode == DImode)
3695 *total = 30;
3696 return true;
3699 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
3701 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
3702 & (unsigned HOST_WIDE_INT) 0xffffffff);
3703 int cost, const_ok = const_ok_for_arm (i);
3704 int j, booth_unit_size;
3706 /* Tune as appropriate. */
3707 cost = const_ok ? 4 : 8;
3708 booth_unit_size = 2;
3709 for (j = 0; i && j < 32; j += booth_unit_size)
3711 i >>= booth_unit_size;
3712 cost += 2;
3715 *total = cost;
3716 return true;
3719 *total = 30 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
3720 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
3721 return true;
3723 default:
3724 *total = arm_rtx_costs_1 (x, code, outer_code);
3725 return true;
3730 /* RTX cost for cores with a fast multiply unit (M variants). */
3732 static bool
3733 arm_fastmul_rtx_costs (rtx x, int code, int outer_code, int *total)
3735 enum machine_mode mode = GET_MODE (x);
3737 if (TARGET_THUMB)
3739 *total = thumb_rtx_costs (x, code, outer_code);
3740 return true;
3743 switch (code)
3745 case MULT:
3746 /* There is no point basing this on the tuning, since it is always the
3747 fast variant if it exists at all. */
3748 if (mode == DImode
3749 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
3750 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
3751 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
3753 *total = 8;
3754 return true;
3758 if (GET_MODE_CLASS (mode) == MODE_FLOAT
3759 || mode == DImode)
3761 *total = 30;
3762 return true;
3765 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
3767 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
3768 & (unsigned HOST_WIDE_INT) 0xffffffff);
3769 int cost, const_ok = const_ok_for_arm (i);
3770 int j, booth_unit_size;
3772 /* Tune as appropriate. */
3773 cost = const_ok ? 4 : 8;
3774 booth_unit_size = 8;
3775 for (j = 0; i && j < 32; j += booth_unit_size)
3777 i >>= booth_unit_size;
3778 cost += 2;
3781 *total = cost;
3782 return true;
3785 *total = 8 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
3786 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
3787 return true;
3789 default:
3790 *total = arm_rtx_costs_1 (x, code, outer_code);
3791 return true;
3796 /* RTX cost for XScale CPUs. */
3798 static bool
3799 arm_xscale_rtx_costs (rtx x, int code, int outer_code, int *total)
3801 enum machine_mode mode = GET_MODE (x);
3803 if (TARGET_THUMB)
3805 *total = thumb_rtx_costs (x, code, outer_code);
3806 return true;
3809 switch (code)
3811 case MULT:
3812 /* There is no point basing this on the tuning, since it is always the
3813 fast variant if it exists at all. */
3814 if (mode == DImode
3815 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
3816 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
3817 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
3819 *total = 8;
3820 return true;
3824 if (GET_MODE_CLASS (mode) == MODE_FLOAT
3825 || mode == DImode)
3827 *total = 30;
3828 return true;
3831 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
3833 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
3834 & (unsigned HOST_WIDE_INT) 0xffffffff);
3835 int cost, const_ok = const_ok_for_arm (i);
3836 unsigned HOST_WIDE_INT masked_const;
3838 /* The cost will be related to two insns.
3839 First a load of the constant (MOV or LDR), then a multiply. */
3840 cost = 2;
3841 if (! const_ok)
3842 cost += 1; /* LDR is probably more expensive because
3843 of longer result latency. */
3844 masked_const = i & 0xffff8000;
3845 if (masked_const != 0 && masked_const != 0xffff8000)
3847 masked_const = i & 0xf8000000;
3848 if (masked_const == 0 || masked_const == 0xf8000000)
3849 cost += 1;
3850 else
3851 cost += 2;
3853 *total = cost;
3854 return true;
3857 *total = 8 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
3858 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
3859 return true;
3861 default:
3862 *total = arm_rtx_costs_1 (x, code, outer_code);
3863 return true;
3868 /* RTX costs for 9e (and later) cores. */
3870 static bool
3871 arm_9e_rtx_costs (rtx x, int code, int outer_code, int *total)
3873 enum machine_mode mode = GET_MODE (x);
3874 int nonreg_cost;
3875 int cost;
3877 if (TARGET_THUMB)
3879 switch (code)
3881 case MULT:
3882 *total = COSTS_N_INSNS (3);
3883 return true;
3885 default:
3886 *total = thumb_rtx_costs (x, code, outer_code);
3887 return true;
3891 switch (code)
3893 case MULT:
3894 /* There is no point basing this on the tuning, since it is always the
3895 fast variant if it exists at all. */
3896 if (mode == DImode
3897 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
3898 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
3899 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
3901 *total = 3;
3902 return true;
3906 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
3908 *total = 30;
3909 return true;
3911 if (mode == DImode)
3913 cost = 7;
3914 nonreg_cost = 8;
3916 else
3918 cost = 2;
3919 nonreg_cost = 4;
3923 *total = cost + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : nonreg_cost)
3924 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : nonreg_cost);
3925 return true;
3927 default:
3928 *total = arm_rtx_costs_1 (x, code, outer_code);
3929 return true;
3932 /* All address computations that can be done are free, but rtx cost returns
3933 the same for practically all of them. So we weight the different types
3934 of address here in the order (most pref first):
3935 PRE/POST_INC/DEC, SHIFT or NON-INT sum, INT sum, REG, MEM or LABEL. */
3936 static inline int
3937 arm_arm_address_cost (rtx x)
3939 enum rtx_code c = GET_CODE (x);
3941 if (c == PRE_INC || c == PRE_DEC || c == POST_INC || c == POST_DEC)
3942 return 0;
3943 if (c == MEM || c == LABEL_REF || c == SYMBOL_REF)
3944 return 10;
3946 if (c == PLUS || c == MINUS)
3948 if (GET_CODE (XEXP (x, 0)) == CONST_INT)
3949 return 2;
3951 if (ARITHMETIC_P (XEXP (x, 0)) || ARITHMETIC_P (XEXP (x, 1)))
3952 return 3;
3954 return 4;
3957 return 6;
3960 static inline int
3961 arm_thumb_address_cost (rtx x)
3963 enum rtx_code c = GET_CODE (x);
3965 if (c == REG)
3966 return 1;
3967 if (c == PLUS
3968 && GET_CODE (XEXP (x, 0)) == REG
3969 && GET_CODE (XEXP (x, 1)) == CONST_INT)
3970 return 1;
3972 return 2;
3975 static int
3976 arm_address_cost (rtx x)
3978 return TARGET_ARM ? arm_arm_address_cost (x) : arm_thumb_address_cost (x);
3981 static int
3982 arm_use_dfa_pipeline_interface (void)
3984 return true;
3987 static int
3988 arm_adjust_cost (rtx insn, rtx link, rtx dep, int cost)
3990 rtx i_pat, d_pat;
3992 /* Some true dependencies can have a higher cost depending
3993 on precisely how certain input operands are used. */
3994 if (arm_tune_xscale
3995 && REG_NOTE_KIND (link) == 0
3996 && recog_memoized (insn) >= 0
3997 && recog_memoized (dep) >= 0)
3999 int shift_opnum = get_attr_shift (insn);
4000 enum attr_type attr_type = get_attr_type (dep);
4002 /* If nonzero, SHIFT_OPNUM contains the operand number of a shifted
4003 operand for INSN. If we have a shifted input operand and the
4004 instruction we depend on is another ALU instruction, then we may
4005 have to account for an additional stall. */
4006 if (shift_opnum != 0
4007 && (attr_type == TYPE_ALU_SHIFT || attr_type == TYPE_ALU_SHIFT_REG))
4009 rtx shifted_operand;
4010 int opno;
4012 /* Get the shifted operand. */
4013 extract_insn (insn);
4014 shifted_operand = recog_data.operand[shift_opnum];
4016 /* Iterate over all the operands in DEP. If we write an operand
4017 that overlaps with SHIFTED_OPERAND, then we have increase the
4018 cost of this dependency. */
4019 extract_insn (dep);
4020 preprocess_constraints ();
4021 for (opno = 0; opno < recog_data.n_operands; opno++)
4023 /* We can ignore strict inputs. */
4024 if (recog_data.operand_type[opno] == OP_IN)
4025 continue;
4027 if (reg_overlap_mentioned_p (recog_data.operand[opno],
4028 shifted_operand))
4029 return 2;
4034 /* XXX This is not strictly true for the FPA. */
4035 if (REG_NOTE_KIND (link) == REG_DEP_ANTI
4036 || REG_NOTE_KIND (link) == REG_DEP_OUTPUT)
4037 return 0;
4039 /* Call insns don't incur a stall, even if they follow a load. */
4040 if (REG_NOTE_KIND (link) == 0
4041 && GET_CODE (insn) == CALL_INSN)
4042 return 1;
4044 if ((i_pat = single_set (insn)) != NULL
4045 && GET_CODE (SET_SRC (i_pat)) == MEM
4046 && (d_pat = single_set (dep)) != NULL
4047 && GET_CODE (SET_DEST (d_pat)) == MEM)
4049 rtx src_mem = XEXP (SET_SRC (i_pat), 0);
4050 /* This is a load after a store, there is no conflict if the load reads
4051 from a cached area. Assume that loads from the stack, and from the
4052 constant pool are cached, and that others will miss. This is a
4053 hack. */
4055 if ((GET_CODE (src_mem) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (src_mem))
4056 || reg_mentioned_p (stack_pointer_rtx, src_mem)
4057 || reg_mentioned_p (frame_pointer_rtx, src_mem)
4058 || reg_mentioned_p (hard_frame_pointer_rtx, src_mem))
4059 return 1;
4062 return cost;
4065 static int fp_consts_inited = 0;
4067 /* Only zero is valid for VFP. Other values are also valid for FPA. */
4068 static const char * const strings_fp[8] =
4070 "0", "1", "2", "3",
4071 "4", "5", "0.5", "10"
4074 static REAL_VALUE_TYPE values_fp[8];
4076 static void
4077 init_fp_table (void)
4079 int i;
4080 REAL_VALUE_TYPE r;
4082 if (TARGET_VFP)
4083 fp_consts_inited = 1;
4084 else
4085 fp_consts_inited = 8;
4087 for (i = 0; i < fp_consts_inited; i++)
4089 r = REAL_VALUE_ATOF (strings_fp[i], DFmode);
4090 values_fp[i] = r;
4094 /* Return TRUE if rtx X is a valid immediate FP constant. */
4096 arm_const_double_rtx (rtx x)
4098 REAL_VALUE_TYPE r;
4099 int i;
4101 if (!fp_consts_inited)
4102 init_fp_table ();
4104 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
4105 if (REAL_VALUE_MINUS_ZERO (r))
4106 return 0;
4108 for (i = 0; i < fp_consts_inited; i++)
4109 if (REAL_VALUES_EQUAL (r, values_fp[i]))
4110 return 1;
4112 return 0;
4115 /* Return TRUE if rtx X is a valid immediate FPA constant. */
4117 neg_const_double_rtx_ok_for_fpa (rtx x)
4119 REAL_VALUE_TYPE r;
4120 int i;
4122 if (!fp_consts_inited)
4123 init_fp_table ();
4125 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
4126 r = REAL_VALUE_NEGATE (r);
4127 if (REAL_VALUE_MINUS_ZERO (r))
4128 return 0;
4130 for (i = 0; i < 8; i++)
4131 if (REAL_VALUES_EQUAL (r, values_fp[i]))
4132 return 1;
4134 return 0;
4137 /* Predicates for `match_operand' and `match_operator'. */
4139 /* s_register_operand is the same as register_operand, but it doesn't accept
4140 (SUBREG (MEM)...).
4142 This function exists because at the time it was put in it led to better
4143 code. SUBREG(MEM) always needs a reload in the places where
4144 s_register_operand is used, and this seemed to lead to excessive
4145 reloading. */
4147 s_register_operand (rtx op, enum machine_mode mode)
4149 if (GET_MODE (op) != mode && mode != VOIDmode)
4150 return 0;
4152 if (GET_CODE (op) == SUBREG)
4153 op = SUBREG_REG (op);
4155 /* We don't consider registers whose class is NO_REGS
4156 to be a register operand. */
4157 /* XXX might have to check for lo regs only for thumb ??? */
4158 return (GET_CODE (op) == REG
4159 && (REGNO (op) >= FIRST_PSEUDO_REGISTER
4160 || REGNO_REG_CLASS (REGNO (op)) != NO_REGS));
4163 /* A hard register operand (even before reload. */
4165 arm_hard_register_operand (rtx op, enum machine_mode mode)
4167 if (GET_MODE (op) != mode && mode != VOIDmode)
4168 return 0;
4170 return (GET_CODE (op) == REG
4171 && REGNO (op) < FIRST_PSEUDO_REGISTER);
4174 /* An arm register operand. */
4176 arm_general_register_operand (rtx op, enum machine_mode mode)
4178 if (GET_MODE (op) != mode && mode != VOIDmode)
4179 return 0;
4181 if (GET_CODE (op) == SUBREG)
4182 op = SUBREG_REG (op);
4184 return (GET_CODE (op) == REG
4185 && (REGNO (op) <= LAST_ARM_REGNUM
4186 || REGNO (op) >= FIRST_PSEUDO_REGISTER));
4189 /* Only accept reg, subreg(reg), const_int. */
4191 reg_or_int_operand (rtx op, enum machine_mode mode)
4193 if (GET_CODE (op) == CONST_INT)
4194 return 1;
4196 if (GET_MODE (op) != mode && mode != VOIDmode)
4197 return 0;
4199 if (GET_CODE (op) == SUBREG)
4200 op = SUBREG_REG (op);
4202 /* We don't consider registers whose class is NO_REGS
4203 to be a register operand. */
4204 return (GET_CODE (op) == REG
4205 && (REGNO (op) >= FIRST_PSEUDO_REGISTER
4206 || REGNO_REG_CLASS (REGNO (op)) != NO_REGS));
4209 /* Return 1 if OP is an item in memory, given that we are in reload. */
4211 arm_reload_memory_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
4213 int regno = true_regnum (op);
4215 return (!CONSTANT_P (op)
4216 && (regno == -1
4217 || (GET_CODE (op) == REG
4218 && REGNO (op) >= FIRST_PSEUDO_REGISTER)));
4221 /* Return TRUE for valid operands for the rhs of an ARM instruction. */
4223 arm_rhs_operand (rtx op, enum machine_mode mode)
4225 return (s_register_operand (op, mode)
4226 || (GET_CODE (op) == CONST_INT && const_ok_for_arm (INTVAL (op))));
4229 /* Return TRUE for valid operands for the
4230 rhs of an ARM instruction, or a load. */
4232 arm_rhsm_operand (rtx op, enum machine_mode mode)
4234 return (s_register_operand (op, mode)
4235 || (GET_CODE (op) == CONST_INT && const_ok_for_arm (INTVAL (op)))
4236 || memory_operand (op, mode));
4239 /* Return TRUE for valid operands for the rhs of an ARM instruction, or if a
4240 constant that is valid when negated. */
4242 arm_add_operand (rtx op, enum machine_mode mode)
4244 if (TARGET_THUMB)
4245 return thumb_cmp_operand (op, mode);
4247 return (s_register_operand (op, mode)
4248 || (GET_CODE (op) == CONST_INT
4249 && (const_ok_for_arm (INTVAL (op))
4250 || const_ok_for_arm (-INTVAL (op)))));
4253 /* Return TRUE for valid ARM constants (or when valid if negated). */
4255 arm_addimm_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
4257 return (GET_CODE (op) == CONST_INT
4258 && (const_ok_for_arm (INTVAL (op))
4259 || const_ok_for_arm (-INTVAL (op))));
4263 arm_not_operand (rtx op, enum machine_mode mode)
4265 return (s_register_operand (op, mode)
4266 || (GET_CODE (op) == CONST_INT
4267 && (const_ok_for_arm (INTVAL (op))
4268 || const_ok_for_arm (~INTVAL (op)))));
4271 /* Return TRUE if the operand is a memory reference which contains an
4272 offsettable address. */
4274 offsettable_memory_operand (rtx op, enum machine_mode mode)
4276 if (mode == VOIDmode)
4277 mode = GET_MODE (op);
4279 return (mode == GET_MODE (op)
4280 && GET_CODE (op) == MEM
4281 && offsettable_address_p (reload_completed | reload_in_progress,
4282 mode, XEXP (op, 0)));
4285 /* Return TRUE if the operand is a memory reference which is, or can be
4286 made word aligned by adjusting the offset. */
4288 alignable_memory_operand (rtx op, enum machine_mode mode)
4290 rtx reg;
4292 if (mode == VOIDmode)
4293 mode = GET_MODE (op);
4295 if (mode != GET_MODE (op) || GET_CODE (op) != MEM)
4296 return 0;
4298 op = XEXP (op, 0);
4300 return ((GET_CODE (reg = op) == REG
4301 || (GET_CODE (op) == SUBREG
4302 && GET_CODE (reg = SUBREG_REG (op)) == REG)
4303 || (GET_CODE (op) == PLUS
4304 && GET_CODE (XEXP (op, 1)) == CONST_INT
4305 && (GET_CODE (reg = XEXP (op, 0)) == REG
4306 || (GET_CODE (XEXP (op, 0)) == SUBREG
4307 && GET_CODE (reg = SUBREG_REG (XEXP (op, 0))) == REG))))
4308 && REGNO_POINTER_ALIGN (REGNO (reg)) >= 32);
4311 /* Similar to s_register_operand, but does not allow hard integer
4312 registers. */
4314 f_register_operand (rtx op, enum machine_mode mode)
4316 if (GET_MODE (op) != mode && mode != VOIDmode)
4317 return 0;
4319 if (GET_CODE (op) == SUBREG)
4320 op = SUBREG_REG (op);
4322 /* We don't consider registers whose class is NO_REGS
4323 to be a register operand. */
4324 return (GET_CODE (op) == REG
4325 && (REGNO (op) >= FIRST_PSEUDO_REGISTER
4326 || REGNO_REG_CLASS (REGNO (op)) == FPA_REGS));
4329 /* Return TRUE for valid operands for the rhs of an floating point insns.
4330 Allows regs or certain consts on FPA, just regs for everything else. */
4332 arm_float_rhs_operand (rtx op, enum machine_mode mode)
4334 if (s_register_operand (op, mode))
4335 return TRUE;
4337 if (GET_MODE (op) != mode && mode != VOIDmode)
4338 return FALSE;
4340 if (TARGET_FPA && GET_CODE (op) == CONST_DOUBLE)
4341 return arm_const_double_rtx (op);
4343 return FALSE;
4347 arm_float_add_operand (rtx op, enum machine_mode mode)
4349 if (s_register_operand (op, mode))
4350 return TRUE;
4352 if (GET_MODE (op) != mode && mode != VOIDmode)
4353 return FALSE;
4355 if (TARGET_FPA && GET_CODE (op) == CONST_DOUBLE)
4356 return (arm_const_double_rtx (op)
4357 || neg_const_double_rtx_ok_for_fpa (op));
4359 return FALSE;
4363 /* Return TRUE if OP is suitable for the rhs of a floating point comparison.
4364 Depends which fpu we are targeting. */
4367 arm_float_compare_operand (rtx op, enum machine_mode mode)
4369 if (TARGET_VFP)
4370 return vfp_compare_operand (op, mode);
4371 else
4372 return arm_float_rhs_operand (op, mode);
4376 /* Return nonzero if OP is a valid Cirrus memory address pattern. */
4378 cirrus_memory_offset (rtx op)
4380 /* Reject eliminable registers. */
4381 if (! (reload_in_progress || reload_completed)
4382 && ( reg_mentioned_p (frame_pointer_rtx, op)
4383 || reg_mentioned_p (arg_pointer_rtx, op)
4384 || reg_mentioned_p (virtual_incoming_args_rtx, op)
4385 || reg_mentioned_p (virtual_outgoing_args_rtx, op)
4386 || reg_mentioned_p (virtual_stack_dynamic_rtx, op)
4387 || reg_mentioned_p (virtual_stack_vars_rtx, op)))
4388 return 0;
4390 if (GET_CODE (op) == MEM)
4392 rtx ind;
4394 ind = XEXP (op, 0);
4396 /* Match: (mem (reg)). */
4397 if (GET_CODE (ind) == REG)
4398 return 1;
4400 /* Match:
4401 (mem (plus (reg)
4402 (const))). */
4403 if (GET_CODE (ind) == PLUS
4404 && GET_CODE (XEXP (ind, 0)) == REG
4405 && REG_MODE_OK_FOR_BASE_P (XEXP (ind, 0), VOIDmode)
4406 && GET_CODE (XEXP (ind, 1)) == CONST_INT)
4407 return 1;
4410 return 0;
4414 arm_extendqisi_mem_op (rtx op, enum machine_mode mode)
4416 if (!memory_operand (op, mode))
4417 return 0;
4419 return arm_legitimate_address_p (mode, XEXP (op, 0), SIGN_EXTEND, 0);
4422 /* Return nonzero if OP is a Cirrus or general register. */
4424 cirrus_register_operand (rtx op, enum machine_mode mode)
4426 if (GET_MODE (op) != mode && mode != VOIDmode)
4427 return FALSE;
4429 if (GET_CODE (op) == SUBREG)
4430 op = SUBREG_REG (op);
4432 return (GET_CODE (op) == REG
4433 && (REGNO_REG_CLASS (REGNO (op)) == CIRRUS_REGS
4434 || REGNO_REG_CLASS (REGNO (op)) == GENERAL_REGS));
4437 /* Return nonzero if OP is a cirrus FP register. */
4439 cirrus_fp_register (rtx op, enum machine_mode mode)
4441 if (GET_MODE (op) != mode && mode != VOIDmode)
4442 return FALSE;
4444 if (GET_CODE (op) == SUBREG)
4445 op = SUBREG_REG (op);
4447 return (GET_CODE (op) == REG
4448 && (REGNO (op) >= FIRST_PSEUDO_REGISTER
4449 || REGNO_REG_CLASS (REGNO (op)) == CIRRUS_REGS));
4452 /* Return nonzero if OP is a 6bit constant (0..63). */
4454 cirrus_shift_const (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
4456 return (GET_CODE (op) == CONST_INT
4457 && INTVAL (op) >= 0
4458 && INTVAL (op) < 64);
4462 /* Return TRUE if OP is a valid VFP memory address pattern. */
4463 /* Copied from cirrus_memory_offset but with restricted offset range. */
4466 vfp_mem_operand (rtx op)
4468 /* Reject eliminable registers. */
4470 if (! (reload_in_progress || reload_completed)
4471 && ( reg_mentioned_p (frame_pointer_rtx, op)
4472 || reg_mentioned_p (arg_pointer_rtx, op)
4473 || reg_mentioned_p (virtual_incoming_args_rtx, op)
4474 || reg_mentioned_p (virtual_outgoing_args_rtx, op)
4475 || reg_mentioned_p (virtual_stack_dynamic_rtx, op)
4476 || reg_mentioned_p (virtual_stack_vars_rtx, op)))
4477 return FALSE;
4479 /* Constants are converted into offsets from labels. */
4480 if (GET_CODE (op) == MEM)
4482 rtx ind;
4484 ind = XEXP (op, 0);
4486 if (reload_completed
4487 && (GET_CODE (ind) == LABEL_REF
4488 || (GET_CODE (ind) == CONST
4489 && GET_CODE (XEXP (ind, 0)) == PLUS
4490 && GET_CODE (XEXP (XEXP (ind, 0), 0)) == LABEL_REF
4491 && GET_CODE (XEXP (XEXP (ind, 0), 1)) == CONST_INT)))
4492 return TRUE;
4494 /* Match: (mem (reg)). */
4495 if (GET_CODE (ind) == REG)
4496 return arm_address_register_rtx_p (ind, 0);
4498 /* Match:
4499 (mem (plus (reg)
4500 (const))). */
4501 if (GET_CODE (ind) == PLUS
4502 && GET_CODE (XEXP (ind, 0)) == REG
4503 && REG_MODE_OK_FOR_BASE_P (XEXP (ind, 0), VOIDmode)
4504 && GET_CODE (XEXP (ind, 1)) == CONST_INT
4505 && INTVAL (XEXP (ind, 1)) > -1024
4506 && INTVAL (XEXP (ind, 1)) < 1024)
4507 return TRUE;
4510 return FALSE;
4514 /* Return TRUE if OP is a REG or constant zero. */
4516 vfp_compare_operand (rtx op, enum machine_mode mode)
4518 if (s_register_operand (op, mode))
4519 return TRUE;
4521 return (GET_CODE (op) == CONST_DOUBLE
4522 && arm_const_double_rtx (op));
4526 /* Return GENERAL_REGS if a scratch register required to reload x to/from
4527 VFP registers. Otherwise return NO_REGS. */
4529 enum reg_class
4530 vfp_secondary_reload_class (enum machine_mode mode, rtx x)
4532 if (vfp_mem_operand (x) || s_register_operand (x, mode))
4533 return NO_REGS;
4535 return GENERAL_REGS;
4539 /* Returns TRUE if INSN is an "LDR REG, ADDR" instruction.
4540 Use by the Cirrus Maverick code which has to workaround
4541 a hardware bug triggered by such instructions. */
4542 static bool
4543 arm_memory_load_p (rtx insn)
4545 rtx body, lhs, rhs;;
4547 if (insn == NULL_RTX || GET_CODE (insn) != INSN)
4548 return false;
4550 body = PATTERN (insn);
4552 if (GET_CODE (body) != SET)
4553 return false;
4555 lhs = XEXP (body, 0);
4556 rhs = XEXP (body, 1);
4558 lhs = REG_OR_SUBREG_RTX (lhs);
4560 /* If the destination is not a general purpose
4561 register we do not have to worry. */
4562 if (GET_CODE (lhs) != REG
4563 || REGNO_REG_CLASS (REGNO (lhs)) != GENERAL_REGS)
4564 return false;
4566 /* As well as loads from memory we also have to react
4567 to loads of invalid constants which will be turned
4568 into loads from the minipool. */
4569 return (GET_CODE (rhs) == MEM
4570 || GET_CODE (rhs) == SYMBOL_REF
4571 || note_invalid_constants (insn, -1, false));
4574 /* Return TRUE if INSN is a Cirrus instruction. */
4575 static bool
4576 arm_cirrus_insn_p (rtx insn)
4578 enum attr_cirrus attr;
4580 /* get_attr aborts on USE and CLOBBER. */
4581 if (!insn
4582 || GET_CODE (insn) != INSN
4583 || GET_CODE (PATTERN (insn)) == USE
4584 || GET_CODE (PATTERN (insn)) == CLOBBER)
4585 return 0;
4587 attr = get_attr_cirrus (insn);
4589 return attr != CIRRUS_NOT;
4592 /* Cirrus reorg for invalid instruction combinations. */
4593 static void
4594 cirrus_reorg (rtx first)
4596 enum attr_cirrus attr;
4597 rtx body = PATTERN (first);
4598 rtx t;
4599 int nops;
4601 /* Any branch must be followed by 2 non Cirrus instructions. */
4602 if (GET_CODE (first) == JUMP_INSN && GET_CODE (body) != RETURN)
4604 nops = 0;
4605 t = next_nonnote_insn (first);
4607 if (arm_cirrus_insn_p (t))
4608 ++ nops;
4610 if (arm_cirrus_insn_p (next_nonnote_insn (t)))
4611 ++ nops;
4613 while (nops --)
4614 emit_insn_after (gen_nop (), first);
4616 return;
4619 /* (float (blah)) is in parallel with a clobber. */
4620 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) > 0)
4621 body = XVECEXP (body, 0, 0);
4623 if (GET_CODE (body) == SET)
4625 rtx lhs = XEXP (body, 0), rhs = XEXP (body, 1);
4627 /* cfldrd, cfldr64, cfstrd, cfstr64 must
4628 be followed by a non Cirrus insn. */
4629 if (get_attr_cirrus (first) == CIRRUS_DOUBLE)
4631 if (arm_cirrus_insn_p (next_nonnote_insn (first)))
4632 emit_insn_after (gen_nop (), first);
4634 return;
4636 else if (arm_memory_load_p (first))
4638 unsigned int arm_regno;
4640 /* Any ldr/cfmvdlr, ldr/cfmvdhr, ldr/cfmvsr, ldr/cfmv64lr,
4641 ldr/cfmv64hr combination where the Rd field is the same
4642 in both instructions must be split with a non Cirrus
4643 insn. Example:
4645 ldr r0, blah
4647 cfmvsr mvf0, r0. */
4649 /* Get Arm register number for ldr insn. */
4650 if (GET_CODE (lhs) == REG)
4651 arm_regno = REGNO (lhs);
4652 else if (GET_CODE (rhs) == REG)
4653 arm_regno = REGNO (rhs);
4654 else
4655 abort ();
4657 /* Next insn. */
4658 first = next_nonnote_insn (first);
4660 if (! arm_cirrus_insn_p (first))
4661 return;
4663 body = PATTERN (first);
4665 /* (float (blah)) is in parallel with a clobber. */
4666 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0))
4667 body = XVECEXP (body, 0, 0);
4669 if (GET_CODE (body) == FLOAT)
4670 body = XEXP (body, 0);
4672 if (get_attr_cirrus (first) == CIRRUS_MOVE
4673 && GET_CODE (XEXP (body, 1)) == REG
4674 && arm_regno == REGNO (XEXP (body, 1)))
4675 emit_insn_after (gen_nop (), first);
4677 return;
4681 /* get_attr aborts on USE and CLOBBER. */
4682 if (!first
4683 || GET_CODE (first) != INSN
4684 || GET_CODE (PATTERN (first)) == USE
4685 || GET_CODE (PATTERN (first)) == CLOBBER)
4686 return;
4688 attr = get_attr_cirrus (first);
4690 /* Any coprocessor compare instruction (cfcmps, cfcmpd, ...)
4691 must be followed by a non-coprocessor instruction. */
4692 if (attr == CIRRUS_COMPARE)
4694 nops = 0;
4696 t = next_nonnote_insn (first);
4698 if (arm_cirrus_insn_p (t))
4699 ++ nops;
4701 if (arm_cirrus_insn_p (next_nonnote_insn (t)))
4702 ++ nops;
4704 while (nops --)
4705 emit_insn_after (gen_nop (), first);
4707 return;
4711 /* Return nonzero if OP is a constant power of two. */
4713 power_of_two_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
4715 if (GET_CODE (op) == CONST_INT)
4717 HOST_WIDE_INT value = INTVAL (op);
4719 return value != 0 && (value & (value - 1)) == 0;
4722 return FALSE;
4725 /* Return TRUE for a valid operand of a DImode operation.
4726 Either: REG, SUBREG, CONST_DOUBLE or MEM(DImode_address).
4727 Note that this disallows MEM(REG+REG), but allows
4728 MEM(PRE/POST_INC/DEC(REG)). */
4730 di_operand (rtx op, enum machine_mode mode)
4732 if (s_register_operand (op, mode))
4733 return TRUE;
4735 if (mode != VOIDmode && GET_MODE (op) != VOIDmode && GET_MODE (op) != DImode)
4736 return FALSE;
4738 if (GET_CODE (op) == SUBREG)
4739 op = SUBREG_REG (op);
4741 switch (GET_CODE (op))
4743 case CONST_DOUBLE:
4744 case CONST_INT:
4745 return TRUE;
4747 case MEM:
4748 return memory_address_p (DImode, XEXP (op, 0));
4750 default:
4751 return FALSE;
4755 /* Like di_operand, but don't accept constants. */
4757 nonimmediate_di_operand (rtx op, enum machine_mode mode)
4759 if (s_register_operand (op, mode))
4760 return TRUE;
4762 if (mode != VOIDmode && GET_MODE (op) != VOIDmode && GET_MODE (op) != DImode)
4763 return FALSE;
4765 if (GET_CODE (op) == SUBREG)
4766 op = SUBREG_REG (op);
4768 if (GET_CODE (op) == MEM)
4769 return memory_address_p (DImode, XEXP (op, 0));
4771 return FALSE;
4774 /* Return TRUE for a valid operand of a DFmode operation when soft-float.
4775 Either: REG, SUBREG, CONST_DOUBLE or MEM(DImode_address).
4776 Note that this disallows MEM(REG+REG), but allows
4777 MEM(PRE/POST_INC/DEC(REG)). */
4779 soft_df_operand (rtx op, enum machine_mode mode)
4781 if (s_register_operand (op, mode))
4782 return TRUE;
4784 if (mode != VOIDmode && GET_MODE (op) != mode)
4785 return FALSE;
4787 if (GET_CODE (op) == SUBREG && CONSTANT_P (SUBREG_REG (op)))
4788 return FALSE;
4790 if (GET_CODE (op) == SUBREG)
4791 op = SUBREG_REG (op);
4793 switch (GET_CODE (op))
4795 case CONST_DOUBLE:
4796 return TRUE;
4798 case MEM:
4799 return memory_address_p (DFmode, XEXP (op, 0));
4801 default:
4802 return FALSE;
4806 /* Like soft_df_operand, but don't accept constants. */
4808 nonimmediate_soft_df_operand (rtx op, enum machine_mode mode)
4810 if (s_register_operand (op, mode))
4811 return TRUE;
4813 if (mode != VOIDmode && GET_MODE (op) != mode)
4814 return FALSE;
4816 if (GET_CODE (op) == SUBREG)
4817 op = SUBREG_REG (op);
4819 if (GET_CODE (op) == MEM)
4820 return memory_address_p (DFmode, XEXP (op, 0));
4821 return FALSE;
4824 /* Return TRUE for valid index operands. */
4826 index_operand (rtx op, enum machine_mode mode)
4828 return (s_register_operand (op, mode)
4829 || (immediate_operand (op, mode)
4830 && (GET_CODE (op) != CONST_INT
4831 || (INTVAL (op) < 4096 && INTVAL (op) > -4096))));
4834 /* Return TRUE for valid shifts by a constant. This also accepts any
4835 power of two on the (somewhat overly relaxed) assumption that the
4836 shift operator in this case was a mult. */
4838 const_shift_operand (rtx op, enum machine_mode mode)
4840 return (power_of_two_operand (op, mode)
4841 || (immediate_operand (op, mode)
4842 && (GET_CODE (op) != CONST_INT
4843 || (INTVAL (op) < 32 && INTVAL (op) > 0))));
4846 /* Return TRUE for arithmetic operators which can be combined with a multiply
4847 (shift). */
4849 shiftable_operator (rtx x, enum machine_mode mode)
4851 enum rtx_code code;
4853 if (GET_MODE (x) != mode)
4854 return FALSE;
4856 code = GET_CODE (x);
4858 return (code == PLUS || code == MINUS
4859 || code == IOR || code == XOR || code == AND);
4862 /* Return TRUE for binary logical operators. */
4864 logical_binary_operator (rtx x, enum machine_mode mode)
4866 enum rtx_code code;
4868 if (GET_MODE (x) != mode)
4869 return FALSE;
4871 code = GET_CODE (x);
4873 return (code == IOR || code == XOR || code == AND);
4876 /* Return TRUE for shift operators. */
4878 shift_operator (rtx x,enum machine_mode mode)
4880 enum rtx_code code;
4882 if (GET_MODE (x) != mode)
4883 return FALSE;
4885 code = GET_CODE (x);
4887 if (code == MULT)
4888 return power_of_two_operand (XEXP (x, 1), mode);
4890 return (code == ASHIFT || code == ASHIFTRT || code == LSHIFTRT
4891 || code == ROTATERT);
4894 /* Return TRUE if x is EQ or NE. */
4896 equality_operator (rtx x, enum machine_mode mode ATTRIBUTE_UNUSED)
4898 return GET_CODE (x) == EQ || GET_CODE (x) == NE;
4901 /* Return TRUE if x is a comparison operator other than LTGT or UNEQ. */
4903 arm_comparison_operator (rtx x, enum machine_mode mode)
4905 return (comparison_operator (x, mode)
4906 && GET_CODE (x) != LTGT
4907 && GET_CODE (x) != UNEQ);
4910 /* Return TRUE for SMIN SMAX UMIN UMAX operators. */
4912 minmax_operator (rtx x, enum machine_mode mode)
4914 enum rtx_code code = GET_CODE (x);
4916 if (GET_MODE (x) != mode)
4917 return FALSE;
4919 return code == SMIN || code == SMAX || code == UMIN || code == UMAX;
4922 /* Return TRUE if this is the condition code register, if we aren't given
4923 a mode, accept any class CCmode register. */
4925 cc_register (rtx x, enum machine_mode mode)
4927 if (mode == VOIDmode)
4929 mode = GET_MODE (x);
4931 if (GET_MODE_CLASS (mode) != MODE_CC)
4932 return FALSE;
4935 if ( GET_MODE (x) == mode
4936 && GET_CODE (x) == REG
4937 && REGNO (x) == CC_REGNUM)
4938 return TRUE;
4940 return FALSE;
4943 /* Return TRUE if this is the condition code register, if we aren't given
4944 a mode, accept any class CCmode register which indicates a dominance
4945 expression. */
4947 dominant_cc_register (rtx x, enum machine_mode mode)
4949 if (mode == VOIDmode)
4951 mode = GET_MODE (x);
4953 if (GET_MODE_CLASS (mode) != MODE_CC)
4954 return FALSE;
4957 if (mode != CC_DNEmode && mode != CC_DEQmode
4958 && mode != CC_DLEmode && mode != CC_DLTmode
4959 && mode != CC_DGEmode && mode != CC_DGTmode
4960 && mode != CC_DLEUmode && mode != CC_DLTUmode
4961 && mode != CC_DGEUmode && mode != CC_DGTUmode)
4962 return FALSE;
4964 return cc_register (x, mode);
4967 /* Return TRUE if X references a SYMBOL_REF. */
4969 symbol_mentioned_p (rtx x)
4971 const char * fmt;
4972 int i;
4974 if (GET_CODE (x) == SYMBOL_REF)
4975 return 1;
4977 fmt = GET_RTX_FORMAT (GET_CODE (x));
4979 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
4981 if (fmt[i] == 'E')
4983 int j;
4985 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
4986 if (symbol_mentioned_p (XVECEXP (x, i, j)))
4987 return 1;
4989 else if (fmt[i] == 'e' && symbol_mentioned_p (XEXP (x, i)))
4990 return 1;
4993 return 0;
4996 /* Return TRUE if X references a LABEL_REF. */
4998 label_mentioned_p (rtx x)
5000 const char * fmt;
5001 int i;
5003 if (GET_CODE (x) == LABEL_REF)
5004 return 1;
5006 fmt = GET_RTX_FORMAT (GET_CODE (x));
5007 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5009 if (fmt[i] == 'E')
5011 int j;
5013 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5014 if (label_mentioned_p (XVECEXP (x, i, j)))
5015 return 1;
5017 else if (fmt[i] == 'e' && label_mentioned_p (XEXP (x, i)))
5018 return 1;
5021 return 0;
5024 enum rtx_code
5025 minmax_code (rtx x)
5027 enum rtx_code code = GET_CODE (x);
5029 if (code == SMAX)
5030 return GE;
5031 else if (code == SMIN)
5032 return LE;
5033 else if (code == UMIN)
5034 return LEU;
5035 else if (code == UMAX)
5036 return GEU;
5038 abort ();
5041 /* Return 1 if memory locations are adjacent. */
5043 adjacent_mem_locations (rtx a, rtx b)
5045 if ((GET_CODE (XEXP (a, 0)) == REG
5046 || (GET_CODE (XEXP (a, 0)) == PLUS
5047 && GET_CODE (XEXP (XEXP (a, 0), 1)) == CONST_INT))
5048 && (GET_CODE (XEXP (b, 0)) == REG
5049 || (GET_CODE (XEXP (b, 0)) == PLUS
5050 && GET_CODE (XEXP (XEXP (b, 0), 1)) == CONST_INT)))
5052 int val0 = 0, val1 = 0;
5053 int reg0, reg1;
5055 if (GET_CODE (XEXP (a, 0)) == PLUS)
5057 reg0 = REGNO (XEXP (XEXP (a, 0), 0));
5058 val0 = INTVAL (XEXP (XEXP (a, 0), 1));
5060 else
5061 reg0 = REGNO (XEXP (a, 0));
5063 if (GET_CODE (XEXP (b, 0)) == PLUS)
5065 reg1 = REGNO (XEXP (XEXP (b, 0), 0));
5066 val1 = INTVAL (XEXP (XEXP (b, 0), 1));
5068 else
5069 reg1 = REGNO (XEXP (b, 0));
5071 /* Don't accept any offset that will require multiple
5072 instructions to handle, since this would cause the
5073 arith_adjacentmem pattern to output an overlong sequence. */
5074 if (!const_ok_for_op (PLUS, val0) || !const_ok_for_op (PLUS, val1))
5075 return 0;
5077 return (reg0 == reg1) && ((val1 - val0) == 4 || (val0 - val1) == 4);
5079 return 0;
5082 /* Return 1 if OP is a load multiple operation. It is known to be
5083 parallel and the first section will be tested. */
5085 load_multiple_operation (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
5087 HOST_WIDE_INT count = XVECLEN (op, 0);
5088 int dest_regno;
5089 rtx src_addr;
5090 HOST_WIDE_INT i = 1, base = 0;
5091 rtx elt;
5093 if (count <= 1
5094 || GET_CODE (XVECEXP (op, 0, 0)) != SET)
5095 return 0;
5097 /* Check to see if this might be a write-back. */
5098 if (GET_CODE (SET_SRC (elt = XVECEXP (op, 0, 0))) == PLUS)
5100 i++;
5101 base = 1;
5103 /* Now check it more carefully. */
5104 if (GET_CODE (SET_DEST (elt)) != REG
5105 || GET_CODE (XEXP (SET_SRC (elt), 0)) != REG
5106 || GET_CODE (XEXP (SET_SRC (elt), 1)) != CONST_INT
5107 || INTVAL (XEXP (SET_SRC (elt), 1)) != (count - 1) * 4)
5108 return 0;
5111 /* Perform a quick check so we don't blow up below. */
5112 if (count <= i
5113 || GET_CODE (XVECEXP (op, 0, i - 1)) != SET
5114 || GET_CODE (SET_DEST (XVECEXP (op, 0, i - 1))) != REG
5115 || GET_CODE (SET_SRC (XVECEXP (op, 0, i - 1))) != MEM)
5116 return 0;
5118 dest_regno = REGNO (SET_DEST (XVECEXP (op, 0, i - 1)));
5119 src_addr = XEXP (SET_SRC (XVECEXP (op, 0, i - 1)), 0);
5121 for (; i < count; i++)
5123 elt = XVECEXP (op, 0, i);
5125 if (GET_CODE (elt) != SET
5126 || GET_CODE (SET_DEST (elt)) != REG
5127 || GET_MODE (SET_DEST (elt)) != SImode
5128 || REGNO (SET_DEST (elt)) != (unsigned int)(dest_regno + i - base)
5129 || GET_CODE (SET_SRC (elt)) != MEM
5130 || GET_MODE (SET_SRC (elt)) != SImode
5131 || GET_CODE (XEXP (SET_SRC (elt), 0)) != PLUS
5132 || !rtx_equal_p (XEXP (XEXP (SET_SRC (elt), 0), 0), src_addr)
5133 || GET_CODE (XEXP (XEXP (SET_SRC (elt), 0), 1)) != CONST_INT
5134 || INTVAL (XEXP (XEXP (SET_SRC (elt), 0), 1)) != (i - base) * 4)
5135 return 0;
5138 return 1;
5141 /* Return 1 if OP is a store multiple operation. It is known to be
5142 parallel and the first section will be tested. */
5144 store_multiple_operation (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
5146 HOST_WIDE_INT count = XVECLEN (op, 0);
5147 int src_regno;
5148 rtx dest_addr;
5149 HOST_WIDE_INT i = 1, base = 0;
5150 rtx elt;
5152 if (count <= 1
5153 || GET_CODE (XVECEXP (op, 0, 0)) != SET)
5154 return 0;
5156 /* Check to see if this might be a write-back. */
5157 if (GET_CODE (SET_SRC (elt = XVECEXP (op, 0, 0))) == PLUS)
5159 i++;
5160 base = 1;
5162 /* Now check it more carefully. */
5163 if (GET_CODE (SET_DEST (elt)) != REG
5164 || GET_CODE (XEXP (SET_SRC (elt), 0)) != REG
5165 || GET_CODE (XEXP (SET_SRC (elt), 1)) != CONST_INT
5166 || INTVAL (XEXP (SET_SRC (elt), 1)) != (count - 1) * 4)
5167 return 0;
5170 /* Perform a quick check so we don't blow up below. */
5171 if (count <= i
5172 || GET_CODE (XVECEXP (op, 0, i - 1)) != SET
5173 || GET_CODE (SET_DEST (XVECEXP (op, 0, i - 1))) != MEM
5174 || GET_CODE (SET_SRC (XVECEXP (op, 0, i - 1))) != REG)
5175 return 0;
5177 src_regno = REGNO (SET_SRC (XVECEXP (op, 0, i - 1)));
5178 dest_addr = XEXP (SET_DEST (XVECEXP (op, 0, i - 1)), 0);
5180 for (; i < count; i++)
5182 elt = XVECEXP (op, 0, i);
5184 if (GET_CODE (elt) != SET
5185 || GET_CODE (SET_SRC (elt)) != REG
5186 || GET_MODE (SET_SRC (elt)) != SImode
5187 || REGNO (SET_SRC (elt)) != (unsigned int)(src_regno + i - base)
5188 || GET_CODE (SET_DEST (elt)) != MEM
5189 || GET_MODE (SET_DEST (elt)) != SImode
5190 || GET_CODE (XEXP (SET_DEST (elt), 0)) != PLUS
5191 || !rtx_equal_p (XEXP (XEXP (SET_DEST (elt), 0), 0), dest_addr)
5192 || GET_CODE (XEXP (XEXP (SET_DEST (elt), 0), 1)) != CONST_INT
5193 || INTVAL (XEXP (XEXP (SET_DEST (elt), 0), 1)) != (i - base) * 4)
5194 return 0;
5197 return 1;
5201 load_multiple_sequence (rtx *operands, int nops, int *regs, int *base,
5202 HOST_WIDE_INT *load_offset)
5204 int unsorted_regs[4];
5205 HOST_WIDE_INT unsorted_offsets[4];
5206 int order[4];
5207 int base_reg = -1;
5208 int i;
5210 /* Can only handle 2, 3, or 4 insns at present,
5211 though could be easily extended if required. */
5212 if (nops < 2 || nops > 4)
5213 abort ();
5215 /* Loop over the operands and check that the memory references are
5216 suitable (ie immediate offsets from the same base register). At
5217 the same time, extract the target register, and the memory
5218 offsets. */
5219 for (i = 0; i < nops; i++)
5221 rtx reg;
5222 rtx offset;
5224 /* Convert a subreg of a mem into the mem itself. */
5225 if (GET_CODE (operands[nops + i]) == SUBREG)
5226 operands[nops + i] = alter_subreg (operands + (nops + i));
5228 if (GET_CODE (operands[nops + i]) != MEM)
5229 abort ();
5231 /* Don't reorder volatile memory references; it doesn't seem worth
5232 looking for the case where the order is ok anyway. */
5233 if (MEM_VOLATILE_P (operands[nops + i]))
5234 return 0;
5236 offset = const0_rtx;
5238 if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG
5239 || (GET_CODE (reg) == SUBREG
5240 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5241 || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
5242 && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0))
5243 == REG)
5244 || (GET_CODE (reg) == SUBREG
5245 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5246 && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1))
5247 == CONST_INT)))
5249 if (i == 0)
5251 base_reg = REGNO (reg);
5252 unsorted_regs[0] = (GET_CODE (operands[i]) == REG
5253 ? REGNO (operands[i])
5254 : REGNO (SUBREG_REG (operands[i])));
5255 order[0] = 0;
5257 else
5259 if (base_reg != (int) REGNO (reg))
5260 /* Not addressed from the same base register. */
5261 return 0;
5263 unsorted_regs[i] = (GET_CODE (operands[i]) == REG
5264 ? REGNO (operands[i])
5265 : REGNO (SUBREG_REG (operands[i])));
5266 if (unsorted_regs[i] < unsorted_regs[order[0]])
5267 order[0] = i;
5270 /* If it isn't an integer register, or if it overwrites the
5271 base register but isn't the last insn in the list, then
5272 we can't do this. */
5273 if (unsorted_regs[i] < 0 || unsorted_regs[i] > 14
5274 || (i != nops - 1 && unsorted_regs[i] == base_reg))
5275 return 0;
5277 unsorted_offsets[i] = INTVAL (offset);
5279 else
5280 /* Not a suitable memory address. */
5281 return 0;
5284 /* All the useful information has now been extracted from the
5285 operands into unsorted_regs and unsorted_offsets; additionally,
5286 order[0] has been set to the lowest numbered register in the
5287 list. Sort the registers into order, and check that the memory
5288 offsets are ascending and adjacent. */
5290 for (i = 1; i < nops; i++)
5292 int j;
5294 order[i] = order[i - 1];
5295 for (j = 0; j < nops; j++)
5296 if (unsorted_regs[j] > unsorted_regs[order[i - 1]]
5297 && (order[i] == order[i - 1]
5298 || unsorted_regs[j] < unsorted_regs[order[i]]))
5299 order[i] = j;
5301 /* Have we found a suitable register? if not, one must be used more
5302 than once. */
5303 if (order[i] == order[i - 1])
5304 return 0;
5306 /* Is the memory address adjacent and ascending? */
5307 if (unsorted_offsets[order[i]] != unsorted_offsets[order[i - 1]] + 4)
5308 return 0;
5311 if (base)
5313 *base = base_reg;
5315 for (i = 0; i < nops; i++)
5316 regs[i] = unsorted_regs[order[i]];
5318 *load_offset = unsorted_offsets[order[0]];
5321 if (unsorted_offsets[order[0]] == 0)
5322 return 1; /* ldmia */
5324 if (unsorted_offsets[order[0]] == 4)
5325 return 2; /* ldmib */
5327 if (unsorted_offsets[order[nops - 1]] == 0)
5328 return 3; /* ldmda */
5330 if (unsorted_offsets[order[nops - 1]] == -4)
5331 return 4; /* ldmdb */
5333 /* For ARM8,9 & StrongARM, 2 ldr instructions are faster than an ldm
5334 if the offset isn't small enough. The reason 2 ldrs are faster
5335 is because these ARMs are able to do more than one cache access
5336 in a single cycle. The ARM9 and StrongARM have Harvard caches,
5337 whilst the ARM8 has a double bandwidth cache. This means that
5338 these cores can do both an instruction fetch and a data fetch in
5339 a single cycle, so the trick of calculating the address into a
5340 scratch register (one of the result regs) and then doing a load
5341 multiple actually becomes slower (and no smaller in code size).
5342 That is the transformation
5344 ldr rd1, [rbase + offset]
5345 ldr rd2, [rbase + offset + 4]
5349 add rd1, rbase, offset
5350 ldmia rd1, {rd1, rd2}
5352 produces worse code -- '3 cycles + any stalls on rd2' instead of
5353 '2 cycles + any stalls on rd2'. On ARMs with only one cache
5354 access per cycle, the first sequence could never complete in less
5355 than 6 cycles, whereas the ldm sequence would only take 5 and
5356 would make better use of sequential accesses if not hitting the
5357 cache.
5359 We cheat here and test 'arm_ld_sched' which we currently know to
5360 only be true for the ARM8, ARM9 and StrongARM. If this ever
5361 changes, then the test below needs to be reworked. */
5362 if (nops == 2 && arm_ld_sched)
5363 return 0;
5365 /* Can't do it without setting up the offset, only do this if it takes
5366 no more than one insn. */
5367 return (const_ok_for_arm (unsorted_offsets[order[0]])
5368 || const_ok_for_arm (-unsorted_offsets[order[0]])) ? 5 : 0;
5371 const char *
5372 emit_ldm_seq (rtx *operands, int nops)
5374 int regs[4];
5375 int base_reg;
5376 HOST_WIDE_INT offset;
5377 char buf[100];
5378 int i;
5380 switch (load_multiple_sequence (operands, nops, regs, &base_reg, &offset))
5382 case 1:
5383 strcpy (buf, "ldm%?ia\t");
5384 break;
5386 case 2:
5387 strcpy (buf, "ldm%?ib\t");
5388 break;
5390 case 3:
5391 strcpy (buf, "ldm%?da\t");
5392 break;
5394 case 4:
5395 strcpy (buf, "ldm%?db\t");
5396 break;
5398 case 5:
5399 if (offset >= 0)
5400 sprintf (buf, "add%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX,
5401 reg_names[regs[0]], REGISTER_PREFIX, reg_names[base_reg],
5402 (long) offset);
5403 else
5404 sprintf (buf, "sub%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX,
5405 reg_names[regs[0]], REGISTER_PREFIX, reg_names[base_reg],
5406 (long) -offset);
5407 output_asm_insn (buf, operands);
5408 base_reg = regs[0];
5409 strcpy (buf, "ldm%?ia\t");
5410 break;
5412 default:
5413 abort ();
5416 sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
5417 reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
5419 for (i = 1; i < nops; i++)
5420 sprintf (buf + strlen (buf), ", %s%s", REGISTER_PREFIX,
5421 reg_names[regs[i]]);
5423 strcat (buf, "}\t%@ phole ldm");
5425 output_asm_insn (buf, operands);
5426 return "";
5430 store_multiple_sequence (rtx *operands, int nops, int *regs, int *base,
5431 HOST_WIDE_INT * load_offset)
5433 int unsorted_regs[4];
5434 HOST_WIDE_INT unsorted_offsets[4];
5435 int order[4];
5436 int base_reg = -1;
5437 int i;
5439 /* Can only handle 2, 3, or 4 insns at present, though could be easily
5440 extended if required. */
5441 if (nops < 2 || nops > 4)
5442 abort ();
5444 /* Loop over the operands and check that the memory references are
5445 suitable (ie immediate offsets from the same base register). At
5446 the same time, extract the target register, and the memory
5447 offsets. */
5448 for (i = 0; i < nops; i++)
5450 rtx reg;
5451 rtx offset;
5453 /* Convert a subreg of a mem into the mem itself. */
5454 if (GET_CODE (operands[nops + i]) == SUBREG)
5455 operands[nops + i] = alter_subreg (operands + (nops + i));
5457 if (GET_CODE (operands[nops + i]) != MEM)
5458 abort ();
5460 /* Don't reorder volatile memory references; it doesn't seem worth
5461 looking for the case where the order is ok anyway. */
5462 if (MEM_VOLATILE_P (operands[nops + i]))
5463 return 0;
5465 offset = const0_rtx;
5467 if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG
5468 || (GET_CODE (reg) == SUBREG
5469 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5470 || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
5471 && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0))
5472 == REG)
5473 || (GET_CODE (reg) == SUBREG
5474 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
5475 && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1))
5476 == CONST_INT)))
5478 if (i == 0)
5480 base_reg = REGNO (reg);
5481 unsorted_regs[0] = (GET_CODE (operands[i]) == REG
5482 ? REGNO (operands[i])
5483 : REGNO (SUBREG_REG (operands[i])));
5484 order[0] = 0;
5486 else
5488 if (base_reg != (int) REGNO (reg))
5489 /* Not addressed from the same base register. */
5490 return 0;
5492 unsorted_regs[i] = (GET_CODE (operands[i]) == REG
5493 ? REGNO (operands[i])
5494 : REGNO (SUBREG_REG (operands[i])));
5495 if (unsorted_regs[i] < unsorted_regs[order[0]])
5496 order[0] = i;
5499 /* If it isn't an integer register, then we can't do this. */
5500 if (unsorted_regs[i] < 0 || unsorted_regs[i] > 14)
5501 return 0;
5503 unsorted_offsets[i] = INTVAL (offset);
5505 else
5506 /* Not a suitable memory address. */
5507 return 0;
5510 /* All the useful information has now been extracted from the
5511 operands into unsorted_regs and unsorted_offsets; additionally,
5512 order[0] has been set to the lowest numbered register in the
5513 list. Sort the registers into order, and check that the memory
5514 offsets are ascending and adjacent. */
5516 for (i = 1; i < nops; i++)
5518 int j;
5520 order[i] = order[i - 1];
5521 for (j = 0; j < nops; j++)
5522 if (unsorted_regs[j] > unsorted_regs[order[i - 1]]
5523 && (order[i] == order[i - 1]
5524 || unsorted_regs[j] < unsorted_regs[order[i]]))
5525 order[i] = j;
5527 /* Have we found a suitable register? if not, one must be used more
5528 than once. */
5529 if (order[i] == order[i - 1])
5530 return 0;
5532 /* Is the memory address adjacent and ascending? */
5533 if (unsorted_offsets[order[i]] != unsorted_offsets[order[i - 1]] + 4)
5534 return 0;
5537 if (base)
5539 *base = base_reg;
5541 for (i = 0; i < nops; i++)
5542 regs[i] = unsorted_regs[order[i]];
5544 *load_offset = unsorted_offsets[order[0]];
5547 if (unsorted_offsets[order[0]] == 0)
5548 return 1; /* stmia */
5550 if (unsorted_offsets[order[0]] == 4)
5551 return 2; /* stmib */
5553 if (unsorted_offsets[order[nops - 1]] == 0)
5554 return 3; /* stmda */
5556 if (unsorted_offsets[order[nops - 1]] == -4)
5557 return 4; /* stmdb */
5559 return 0;
5562 const char *
5563 emit_stm_seq (rtx *operands, int nops)
5565 int regs[4];
5566 int base_reg;
5567 HOST_WIDE_INT offset;
5568 char buf[100];
5569 int i;
5571 switch (store_multiple_sequence (operands, nops, regs, &base_reg, &offset))
5573 case 1:
5574 strcpy (buf, "stm%?ia\t");
5575 break;
5577 case 2:
5578 strcpy (buf, "stm%?ib\t");
5579 break;
5581 case 3:
5582 strcpy (buf, "stm%?da\t");
5583 break;
5585 case 4:
5586 strcpy (buf, "stm%?db\t");
5587 break;
5589 default:
5590 abort ();
5593 sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
5594 reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
5596 for (i = 1; i < nops; i++)
5597 sprintf (buf + strlen (buf), ", %s%s", REGISTER_PREFIX,
5598 reg_names[regs[i]]);
5600 strcat (buf, "}\t%@ phole stm");
5602 output_asm_insn (buf, operands);
5603 return "";
5607 multi_register_push (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
5609 if (GET_CODE (op) != PARALLEL
5610 || (GET_CODE (XVECEXP (op, 0, 0)) != SET)
5611 || (GET_CODE (SET_SRC (XVECEXP (op, 0, 0))) != UNSPEC)
5612 || (XINT (SET_SRC (XVECEXP (op, 0, 0)), 1) != UNSPEC_PUSH_MULT))
5613 return 0;
5615 return 1;
5618 /* Routines for use in generating RTL. */
5621 arm_gen_load_multiple (int base_regno, int count, rtx from, int up,
5622 int write_back, int unchanging_p, int in_struct_p,
5623 int scalar_p)
5625 int i = 0, j;
5626 rtx result;
5627 int sign = up ? 1 : -1;
5628 rtx mem;
5630 /* XScale has load-store double instructions, but they have stricter
5631 alignment requirements than load-store multiple, so we can not
5632 use them.
5634 For XScale ldm requires 2 + NREGS cycles to complete and blocks
5635 the pipeline until completion.
5637 NREGS CYCLES
5643 An ldr instruction takes 1-3 cycles, but does not block the
5644 pipeline.
5646 NREGS CYCLES
5647 1 1-3
5648 2 2-6
5649 3 3-9
5650 4 4-12
5652 Best case ldr will always win. However, the more ldr instructions
5653 we issue, the less likely we are to be able to schedule them well.
5654 Using ldr instructions also increases code size.
5656 As a compromise, we use ldr for counts of 1 or 2 regs, and ldm
5657 for counts of 3 or 4 regs. */
5658 if (arm_tune_xscale && count <= 2 && ! optimize_size)
5660 rtx seq;
5662 start_sequence ();
5664 for (i = 0; i < count; i++)
5666 mem = gen_rtx_MEM (SImode, plus_constant (from, i * 4 * sign));
5667 RTX_UNCHANGING_P (mem) = unchanging_p;
5668 MEM_IN_STRUCT_P (mem) = in_struct_p;
5669 MEM_SCALAR_P (mem) = scalar_p;
5670 emit_move_insn (gen_rtx_REG (SImode, base_regno + i), mem);
5673 if (write_back)
5674 emit_move_insn (from, plus_constant (from, count * 4 * sign));
5676 seq = get_insns ();
5677 end_sequence ();
5679 return seq;
5682 result = gen_rtx_PARALLEL (VOIDmode,
5683 rtvec_alloc (count + (write_back ? 1 : 0)));
5684 if (write_back)
5686 XVECEXP (result, 0, 0)
5687 = gen_rtx_SET (GET_MODE (from), from,
5688 plus_constant (from, count * 4 * sign));
5689 i = 1;
5690 count++;
5693 for (j = 0; i < count; i++, j++)
5695 mem = gen_rtx_MEM (SImode, plus_constant (from, j * 4 * sign));
5696 RTX_UNCHANGING_P (mem) = unchanging_p;
5697 MEM_IN_STRUCT_P (mem) = in_struct_p;
5698 MEM_SCALAR_P (mem) = scalar_p;
5699 XVECEXP (result, 0, i)
5700 = gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, base_regno + j), mem);
5703 return result;
5707 arm_gen_store_multiple (int base_regno, int count, rtx to, int up,
5708 int write_back, int unchanging_p, int in_struct_p,
5709 int scalar_p)
5711 int i = 0, j;
5712 rtx result;
5713 int sign = up ? 1 : -1;
5714 rtx mem;
5716 /* See arm_gen_load_multiple for discussion of
5717 the pros/cons of ldm/stm usage for XScale. */
5718 if (arm_tune_xscale && count <= 2 && ! optimize_size)
5720 rtx seq;
5722 start_sequence ();
5724 for (i = 0; i < count; i++)
5726 mem = gen_rtx_MEM (SImode, plus_constant (to, i * 4 * sign));
5727 RTX_UNCHANGING_P (mem) = unchanging_p;
5728 MEM_IN_STRUCT_P (mem) = in_struct_p;
5729 MEM_SCALAR_P (mem) = scalar_p;
5730 emit_move_insn (mem, gen_rtx_REG (SImode, base_regno + i));
5733 if (write_back)
5734 emit_move_insn (to, plus_constant (to, count * 4 * sign));
5736 seq = get_insns ();
5737 end_sequence ();
5739 return seq;
5742 result = gen_rtx_PARALLEL (VOIDmode,
5743 rtvec_alloc (count + (write_back ? 1 : 0)));
5744 if (write_back)
5746 XVECEXP (result, 0, 0)
5747 = gen_rtx_SET (GET_MODE (to), to,
5748 plus_constant (to, count * 4 * sign));
5749 i = 1;
5750 count++;
5753 for (j = 0; i < count; i++, j++)
5755 mem = gen_rtx_MEM (SImode, plus_constant (to, j * 4 * sign));
5756 RTX_UNCHANGING_P (mem) = unchanging_p;
5757 MEM_IN_STRUCT_P (mem) = in_struct_p;
5758 MEM_SCALAR_P (mem) = scalar_p;
5760 XVECEXP (result, 0, i)
5761 = gen_rtx_SET (VOIDmode, mem, gen_rtx_REG (SImode, base_regno + j));
5764 return result;
5768 arm_gen_movstrqi (rtx *operands)
5770 HOST_WIDE_INT in_words_to_go, out_words_to_go, last_bytes;
5771 int i;
5772 rtx src, dst;
5773 rtx st_src, st_dst, fin_src, fin_dst;
5774 rtx part_bytes_reg = NULL;
5775 rtx mem;
5776 int dst_unchanging_p, dst_in_struct_p, src_unchanging_p, src_in_struct_p;
5777 int dst_scalar_p, src_scalar_p;
5779 if (GET_CODE (operands[2]) != CONST_INT
5780 || GET_CODE (operands[3]) != CONST_INT
5781 || INTVAL (operands[2]) > 64
5782 || INTVAL (operands[3]) & 3)
5783 return 0;
5785 st_dst = XEXP (operands[0], 0);
5786 st_src = XEXP (operands[1], 0);
5788 dst_unchanging_p = RTX_UNCHANGING_P (operands[0]);
5789 dst_in_struct_p = MEM_IN_STRUCT_P (operands[0]);
5790 dst_scalar_p = MEM_SCALAR_P (operands[0]);
5791 src_unchanging_p = RTX_UNCHANGING_P (operands[1]);
5792 src_in_struct_p = MEM_IN_STRUCT_P (operands[1]);
5793 src_scalar_p = MEM_SCALAR_P (operands[1]);
5795 fin_dst = dst = copy_to_mode_reg (SImode, st_dst);
5796 fin_src = src = copy_to_mode_reg (SImode, st_src);
5798 in_words_to_go = ARM_NUM_INTS (INTVAL (operands[2]));
5799 out_words_to_go = INTVAL (operands[2]) / 4;
5800 last_bytes = INTVAL (operands[2]) & 3;
5802 if (out_words_to_go != in_words_to_go && ((in_words_to_go - 1) & 3) != 0)
5803 part_bytes_reg = gen_rtx_REG (SImode, (in_words_to_go - 1) & 3);
5805 for (i = 0; in_words_to_go >= 2; i+=4)
5807 if (in_words_to_go > 4)
5808 emit_insn (arm_gen_load_multiple (0, 4, src, TRUE, TRUE,
5809 src_unchanging_p,
5810 src_in_struct_p,
5811 src_scalar_p));
5812 else
5813 emit_insn (arm_gen_load_multiple (0, in_words_to_go, src, TRUE,
5814 FALSE, src_unchanging_p,
5815 src_in_struct_p, src_scalar_p));
5817 if (out_words_to_go)
5819 if (out_words_to_go > 4)
5820 emit_insn (arm_gen_store_multiple (0, 4, dst, TRUE, TRUE,
5821 dst_unchanging_p,
5822 dst_in_struct_p,
5823 dst_scalar_p));
5824 else if (out_words_to_go != 1)
5825 emit_insn (arm_gen_store_multiple (0, out_words_to_go,
5826 dst, TRUE,
5827 (last_bytes == 0
5828 ? FALSE : TRUE),
5829 dst_unchanging_p,
5830 dst_in_struct_p,
5831 dst_scalar_p));
5832 else
5834 mem = gen_rtx_MEM (SImode, dst);
5835 RTX_UNCHANGING_P (mem) = dst_unchanging_p;
5836 MEM_IN_STRUCT_P (mem) = dst_in_struct_p;
5837 MEM_SCALAR_P (mem) = dst_scalar_p;
5838 emit_move_insn (mem, gen_rtx_REG (SImode, 0));
5839 if (last_bytes != 0)
5840 emit_insn (gen_addsi3 (dst, dst, GEN_INT (4)));
5844 in_words_to_go -= in_words_to_go < 4 ? in_words_to_go : 4;
5845 out_words_to_go -= out_words_to_go < 4 ? out_words_to_go : 4;
5848 /* OUT_WORDS_TO_GO will be zero here if there are byte stores to do. */
5849 if (out_words_to_go)
5851 rtx sreg;
5853 mem = gen_rtx_MEM (SImode, src);
5854 RTX_UNCHANGING_P (mem) = src_unchanging_p;
5855 MEM_IN_STRUCT_P (mem) = src_in_struct_p;
5856 MEM_SCALAR_P (mem) = src_scalar_p;
5857 emit_move_insn (sreg = gen_reg_rtx (SImode), mem);
5858 emit_move_insn (fin_src = gen_reg_rtx (SImode), plus_constant (src, 4));
5860 mem = gen_rtx_MEM (SImode, dst);
5861 RTX_UNCHANGING_P (mem) = dst_unchanging_p;
5862 MEM_IN_STRUCT_P (mem) = dst_in_struct_p;
5863 MEM_SCALAR_P (mem) = dst_scalar_p;
5864 emit_move_insn (mem, sreg);
5865 emit_move_insn (fin_dst = gen_reg_rtx (SImode), plus_constant (dst, 4));
5866 in_words_to_go--;
5868 if (in_words_to_go) /* Sanity check */
5869 abort ();
5872 if (in_words_to_go)
5874 if (in_words_to_go < 0)
5875 abort ();
5877 mem = gen_rtx_MEM (SImode, src);
5878 RTX_UNCHANGING_P (mem) = src_unchanging_p;
5879 MEM_IN_STRUCT_P (mem) = src_in_struct_p;
5880 MEM_SCALAR_P (mem) = src_scalar_p;
5881 part_bytes_reg = copy_to_mode_reg (SImode, mem);
5884 if (last_bytes && part_bytes_reg == NULL)
5885 abort ();
5887 if (BYTES_BIG_ENDIAN && last_bytes)
5889 rtx tmp = gen_reg_rtx (SImode);
5891 /* The bytes we want are in the top end of the word. */
5892 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg,
5893 GEN_INT (8 * (4 - last_bytes))));
5894 part_bytes_reg = tmp;
5896 while (last_bytes)
5898 mem = gen_rtx_MEM (QImode, plus_constant (dst, last_bytes - 1));
5899 RTX_UNCHANGING_P (mem) = dst_unchanging_p;
5900 MEM_IN_STRUCT_P (mem) = dst_in_struct_p;
5901 MEM_SCALAR_P (mem) = dst_scalar_p;
5902 emit_move_insn (mem, gen_lowpart (QImode, part_bytes_reg));
5904 if (--last_bytes)
5906 tmp = gen_reg_rtx (SImode);
5907 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (8)));
5908 part_bytes_reg = tmp;
5913 else
5915 if (last_bytes > 1)
5917 mem = gen_rtx_MEM (HImode, dst);
5918 RTX_UNCHANGING_P (mem) = dst_unchanging_p;
5919 MEM_IN_STRUCT_P (mem) = dst_in_struct_p;
5920 MEM_SCALAR_P (mem) = dst_scalar_p;
5921 emit_move_insn (mem, gen_lowpart (HImode, part_bytes_reg));
5922 last_bytes -= 2;
5923 if (last_bytes)
5925 rtx tmp = gen_reg_rtx (SImode);
5927 emit_insn (gen_addsi3 (dst, dst, const2_rtx));
5928 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (16)));
5929 part_bytes_reg = tmp;
5933 if (last_bytes)
5935 mem = gen_rtx_MEM (QImode, dst);
5936 RTX_UNCHANGING_P (mem) = dst_unchanging_p;
5937 MEM_IN_STRUCT_P (mem) = dst_in_struct_p;
5938 MEM_SCALAR_P (mem) = dst_scalar_p;
5939 emit_move_insn (mem, gen_lowpart (QImode, part_bytes_reg));
5943 return 1;
5946 /* Generate a memory reference for a half word, such that it will be loaded
5947 into the top 16 bits of the word. We can assume that the address is
5948 known to be alignable and of the form reg, or plus (reg, const). */
5951 arm_gen_rotated_half_load (rtx memref)
5953 HOST_WIDE_INT offset = 0;
5954 rtx base = XEXP (memref, 0);
5956 if (GET_CODE (base) == PLUS)
5958 offset = INTVAL (XEXP (base, 1));
5959 base = XEXP (base, 0);
5962 /* If we aren't allowed to generate unaligned addresses, then fail. */
5963 if (TARGET_MMU_TRAPS
5964 && ((BYTES_BIG_ENDIAN ? 1 : 0) ^ ((offset & 2) == 0)))
5965 return NULL;
5967 base = gen_rtx_MEM (SImode, plus_constant (base, offset & ~2));
5969 if ((BYTES_BIG_ENDIAN ? 1 : 0) ^ ((offset & 2) == 2))
5970 return base;
5972 return gen_rtx_ROTATE (SImode, base, GEN_INT (16));
5975 /* Select a dominance comparison mode if possible for a test of the general
5976 form (OP (COND_OR (X) (Y)) (const_int 0)). We support three forms.
5977 COND_OR == DOM_CC_X_AND_Y => (X && Y)
5978 COND_OR == DOM_CC_NX_OR_Y => ((! X) || Y)
5979 COND_OR == DOM_CC_X_OR_Y => (X || Y)
5980 In all cases OP will be either EQ or NE, but we don't need to know which
5981 here. If we are unable to support a dominance comparison we return
5982 CC mode. This will then fail to match for the RTL expressions that
5983 generate this call. */
5984 enum machine_mode
5985 arm_select_dominance_cc_mode (rtx x, rtx y, HOST_WIDE_INT cond_or)
5987 enum rtx_code cond1, cond2;
5988 int swapped = 0;
5990 /* Currently we will probably get the wrong result if the individual
5991 comparisons are not simple. This also ensures that it is safe to
5992 reverse a comparison if necessary. */
5993 if ((arm_select_cc_mode (cond1 = GET_CODE (x), XEXP (x, 0), XEXP (x, 1))
5994 != CCmode)
5995 || (arm_select_cc_mode (cond2 = GET_CODE (y), XEXP (y, 0), XEXP (y, 1))
5996 != CCmode))
5997 return CCmode;
5999 /* The if_then_else variant of this tests the second condition if the
6000 first passes, but is true if the first fails. Reverse the first
6001 condition to get a true "inclusive-or" expression. */
6002 if (cond_or == DOM_CC_NX_OR_Y)
6003 cond1 = reverse_condition (cond1);
6005 /* If the comparisons are not equal, and one doesn't dominate the other,
6006 then we can't do this. */
6007 if (cond1 != cond2
6008 && !comparison_dominates_p (cond1, cond2)
6009 && (swapped = 1, !comparison_dominates_p (cond2, cond1)))
6010 return CCmode;
6012 if (swapped)
6014 enum rtx_code temp = cond1;
6015 cond1 = cond2;
6016 cond2 = temp;
6019 switch (cond1)
6021 case EQ:
6022 if (cond2 == EQ || cond_or == DOM_CC_X_AND_Y)
6023 return CC_DEQmode;
6025 switch (cond2)
6027 case LE: return CC_DLEmode;
6028 case LEU: return CC_DLEUmode;
6029 case GE: return CC_DGEmode;
6030 case GEU: return CC_DGEUmode;
6031 default: break;
6034 break;
6036 case LT:
6037 if (cond2 == LT || cond_or == DOM_CC_X_AND_Y)
6038 return CC_DLTmode;
6039 if (cond2 == LE)
6040 return CC_DLEmode;
6041 if (cond2 == NE)
6042 return CC_DNEmode;
6043 break;
6045 case GT:
6046 if (cond2 == GT || cond_or == DOM_CC_X_AND_Y)
6047 return CC_DGTmode;
6048 if (cond2 == GE)
6049 return CC_DGEmode;
6050 if (cond2 == NE)
6051 return CC_DNEmode;
6052 break;
6054 case LTU:
6055 if (cond2 == LTU || cond_or == DOM_CC_X_AND_Y)
6056 return CC_DLTUmode;
6057 if (cond2 == LEU)
6058 return CC_DLEUmode;
6059 if (cond2 == NE)
6060 return CC_DNEmode;
6061 break;
6063 case GTU:
6064 if (cond2 == GTU || cond_or == DOM_CC_X_AND_Y)
6065 return CC_DGTUmode;
6066 if (cond2 == GEU)
6067 return CC_DGEUmode;
6068 if (cond2 == NE)
6069 return CC_DNEmode;
6070 break;
6072 /* The remaining cases only occur when both comparisons are the
6073 same. */
6074 case NE:
6075 return CC_DNEmode;
6077 case LE:
6078 return CC_DLEmode;
6080 case GE:
6081 return CC_DGEmode;
6083 case LEU:
6084 return CC_DLEUmode;
6086 case GEU:
6087 return CC_DGEUmode;
6089 default:
6090 break;
6093 abort ();
6096 enum machine_mode
6097 arm_select_cc_mode (enum rtx_code op, rtx x, rtx y)
6099 /* All floating point compares return CCFP if it is an equality
6100 comparison, and CCFPE otherwise. */
6101 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
6103 switch (op)
6105 case EQ:
6106 case NE:
6107 case UNORDERED:
6108 case ORDERED:
6109 case UNLT:
6110 case UNLE:
6111 case UNGT:
6112 case UNGE:
6113 case UNEQ:
6114 case LTGT:
6115 return CCFPmode;
6117 case LT:
6118 case LE:
6119 case GT:
6120 case GE:
6121 if (TARGET_HARD_FLOAT && TARGET_MAVERICK)
6122 return CCFPmode;
6123 return CCFPEmode;
6125 default:
6126 abort ();
6130 /* A compare with a shifted operand. Because of canonicalization, the
6131 comparison will have to be swapped when we emit the assembler. */
6132 if (GET_MODE (y) == SImode && GET_CODE (y) == REG
6133 && (GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
6134 || GET_CODE (x) == LSHIFTRT || GET_CODE (x) == ROTATE
6135 || GET_CODE (x) == ROTATERT))
6136 return CC_SWPmode;
6138 /* This is a special case that is used by combine to allow a
6139 comparison of a shifted byte load to be split into a zero-extend
6140 followed by a comparison of the shifted integer (only valid for
6141 equalities and unsigned inequalities). */
6142 if (GET_MODE (x) == SImode
6143 && GET_CODE (x) == ASHIFT
6144 && GET_CODE (XEXP (x, 1)) == CONST_INT && INTVAL (XEXP (x, 1)) == 24
6145 && GET_CODE (XEXP (x, 0)) == SUBREG
6146 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == MEM
6147 && GET_MODE (SUBREG_REG (XEXP (x, 0))) == QImode
6148 && (op == EQ || op == NE
6149 || op == GEU || op == GTU || op == LTU || op == LEU)
6150 && GET_CODE (y) == CONST_INT)
6151 return CC_Zmode;
6153 /* A construct for a conditional compare, if the false arm contains
6154 0, then both conditions must be true, otherwise either condition
6155 must be true. Not all conditions are possible, so CCmode is
6156 returned if it can't be done. */
6157 if (GET_CODE (x) == IF_THEN_ELSE
6158 && (XEXP (x, 2) == const0_rtx
6159 || XEXP (x, 2) == const1_rtx)
6160 && COMPARISON_P (XEXP (x, 0))
6161 && COMPARISON_P (XEXP (x, 1)))
6162 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
6163 INTVAL (XEXP (x, 2)));
6165 /* Alternate canonicalizations of the above. These are somewhat cleaner. */
6166 if (GET_CODE (x) == AND
6167 && COMPARISON_P (XEXP (x, 0))
6168 && COMPARISON_P (XEXP (x, 1)))
6169 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
6170 DOM_CC_X_AND_Y);
6172 if (GET_CODE (x) == IOR
6173 && COMPARISON_P (XEXP (x, 0))
6174 && COMPARISON_P (XEXP (x, 1)))
6175 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
6176 DOM_CC_X_OR_Y);
6178 /* An operation (on Thumb) where we want to test for a single bit.
6179 This is done by shifting that bit up into the top bit of a
6180 scratch register; we can then branch on the sign bit. */
6181 if (TARGET_THUMB
6182 && GET_MODE (x) == SImode
6183 && (op == EQ || op == NE)
6184 && (GET_CODE (x) == ZERO_EXTRACT))
6185 return CC_Nmode;
6187 /* An operation that sets the condition codes as a side-effect, the
6188 V flag is not set correctly, so we can only use comparisons where
6189 this doesn't matter. (For LT and GE we can use "mi" and "pl"
6190 instead.) */
6191 if (GET_MODE (x) == SImode
6192 && y == const0_rtx
6193 && (op == EQ || op == NE || op == LT || op == GE)
6194 && (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
6195 || GET_CODE (x) == AND || GET_CODE (x) == IOR
6196 || GET_CODE (x) == XOR || GET_CODE (x) == MULT
6197 || GET_CODE (x) == NOT || GET_CODE (x) == NEG
6198 || GET_CODE (x) == LSHIFTRT
6199 || GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
6200 || GET_CODE (x) == ROTATERT
6201 || (TARGET_ARM && GET_CODE (x) == ZERO_EXTRACT)))
6202 return CC_NOOVmode;
6204 if (GET_MODE (x) == QImode && (op == EQ || op == NE))
6205 return CC_Zmode;
6207 if (GET_MODE (x) == SImode && (op == LTU || op == GEU)
6208 && GET_CODE (x) == PLUS
6209 && (rtx_equal_p (XEXP (x, 0), y) || rtx_equal_p (XEXP (x, 1), y)))
6210 return CC_Cmode;
6212 return CCmode;
6215 /* X and Y are two things to compare using CODE. Emit the compare insn and
6216 return the rtx for register 0 in the proper mode. FP means this is a
6217 floating point compare: I don't think that it is needed on the arm. */
6219 arm_gen_compare_reg (enum rtx_code code, rtx x, rtx y)
6221 enum machine_mode mode = SELECT_CC_MODE (code, x, y);
6222 rtx cc_reg = gen_rtx_REG (mode, CC_REGNUM);
6224 emit_insn (gen_rtx_SET (VOIDmode, cc_reg,
6225 gen_rtx_COMPARE (mode, x, y)));
6227 return cc_reg;
6230 /* Generate a sequence of insns that will generate the correct return
6231 address mask depending on the physical architecture that the program
6232 is running on. */
6234 arm_gen_return_addr_mask (void)
6236 rtx reg = gen_reg_rtx (Pmode);
6238 emit_insn (gen_return_addr_mask (reg));
6239 return reg;
6242 void
6243 arm_reload_in_hi (rtx *operands)
6245 rtx ref = operands[1];
6246 rtx base, scratch;
6247 HOST_WIDE_INT offset = 0;
6249 if (GET_CODE (ref) == SUBREG)
6251 offset = SUBREG_BYTE (ref);
6252 ref = SUBREG_REG (ref);
6255 if (GET_CODE (ref) == REG)
6257 /* We have a pseudo which has been spilt onto the stack; there
6258 are two cases here: the first where there is a simple
6259 stack-slot replacement and a second where the stack-slot is
6260 out of range, or is used as a subreg. */
6261 if (reg_equiv_mem[REGNO (ref)])
6263 ref = reg_equiv_mem[REGNO (ref)];
6264 base = find_replacement (&XEXP (ref, 0));
6266 else
6267 /* The slot is out of range, or was dressed up in a SUBREG. */
6268 base = reg_equiv_address[REGNO (ref)];
6270 else
6271 base = find_replacement (&XEXP (ref, 0));
6273 /* Handle the case where the address is too complex to be offset by 1. */
6274 if (GET_CODE (base) == MINUS
6275 || (GET_CODE (base) == PLUS && GET_CODE (XEXP (base, 1)) != CONST_INT))
6277 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6279 emit_insn (gen_rtx_SET (VOIDmode, base_plus, base));
6280 base = base_plus;
6282 else if (GET_CODE (base) == PLUS)
6284 /* The addend must be CONST_INT, or we would have dealt with it above. */
6285 HOST_WIDE_INT hi, lo;
6287 offset += INTVAL (XEXP (base, 1));
6288 base = XEXP (base, 0);
6290 /* Rework the address into a legal sequence of insns. */
6291 /* Valid range for lo is -4095 -> 4095 */
6292 lo = (offset >= 0
6293 ? (offset & 0xfff)
6294 : -((-offset) & 0xfff));
6296 /* Corner case, if lo is the max offset then we would be out of range
6297 once we have added the additional 1 below, so bump the msb into the
6298 pre-loading insn(s). */
6299 if (lo == 4095)
6300 lo &= 0x7ff;
6302 hi = ((((offset - lo) & (HOST_WIDE_INT) 0xffffffff)
6303 ^ (HOST_WIDE_INT) 0x80000000)
6304 - (HOST_WIDE_INT) 0x80000000);
6306 if (hi + lo != offset)
6307 abort ();
6309 if (hi != 0)
6311 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6313 /* Get the base address; addsi3 knows how to handle constants
6314 that require more than one insn. */
6315 emit_insn (gen_addsi3 (base_plus, base, GEN_INT (hi)));
6316 base = base_plus;
6317 offset = lo;
6321 /* Operands[2] may overlap operands[0] (though it won't overlap
6322 operands[1]), that's why we asked for a DImode reg -- so we can
6323 use the bit that does not overlap. */
6324 if (REGNO (operands[2]) == REGNO (operands[0]))
6325 scratch = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6326 else
6327 scratch = gen_rtx_REG (SImode, REGNO (operands[2]));
6329 emit_insn (gen_zero_extendqisi2 (scratch,
6330 gen_rtx_MEM (QImode,
6331 plus_constant (base,
6332 offset))));
6333 emit_insn (gen_zero_extendqisi2 (gen_rtx_SUBREG (SImode, operands[0], 0),
6334 gen_rtx_MEM (QImode,
6335 plus_constant (base,
6336 offset + 1))));
6337 if (!BYTES_BIG_ENDIAN)
6338 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_SUBREG (SImode, operands[0], 0),
6339 gen_rtx_IOR (SImode,
6340 gen_rtx_ASHIFT
6341 (SImode,
6342 gen_rtx_SUBREG (SImode, operands[0], 0),
6343 GEN_INT (8)),
6344 scratch)));
6345 else
6346 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_SUBREG (SImode, operands[0], 0),
6347 gen_rtx_IOR (SImode,
6348 gen_rtx_ASHIFT (SImode, scratch,
6349 GEN_INT (8)),
6350 gen_rtx_SUBREG (SImode, operands[0],
6351 0))));
6354 /* Handle storing a half-word to memory during reload by synthesizing as two
6355 byte stores. Take care not to clobber the input values until after we
6356 have moved them somewhere safe. This code assumes that if the DImode
6357 scratch in operands[2] overlaps either the input value or output address
6358 in some way, then that value must die in this insn (we absolutely need
6359 two scratch registers for some corner cases). */
6360 void
6361 arm_reload_out_hi (rtx *operands)
6363 rtx ref = operands[0];
6364 rtx outval = operands[1];
6365 rtx base, scratch;
6366 HOST_WIDE_INT offset = 0;
6368 if (GET_CODE (ref) == SUBREG)
6370 offset = SUBREG_BYTE (ref);
6371 ref = SUBREG_REG (ref);
6374 if (GET_CODE (ref) == REG)
6376 /* We have a pseudo which has been spilt onto the stack; there
6377 are two cases here: the first where there is a simple
6378 stack-slot replacement and a second where the stack-slot is
6379 out of range, or is used as a subreg. */
6380 if (reg_equiv_mem[REGNO (ref)])
6382 ref = reg_equiv_mem[REGNO (ref)];
6383 base = find_replacement (&XEXP (ref, 0));
6385 else
6386 /* The slot is out of range, or was dressed up in a SUBREG. */
6387 base = reg_equiv_address[REGNO (ref)];
6389 else
6390 base = find_replacement (&XEXP (ref, 0));
6392 scratch = gen_rtx_REG (SImode, REGNO (operands[2]));
6394 /* Handle the case where the address is too complex to be offset by 1. */
6395 if (GET_CODE (base) == MINUS
6396 || (GET_CODE (base) == PLUS && GET_CODE (XEXP (base, 1)) != CONST_INT))
6398 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6400 /* Be careful not to destroy OUTVAL. */
6401 if (reg_overlap_mentioned_p (base_plus, outval))
6403 /* Updating base_plus might destroy outval, see if we can
6404 swap the scratch and base_plus. */
6405 if (!reg_overlap_mentioned_p (scratch, outval))
6407 rtx tmp = scratch;
6408 scratch = base_plus;
6409 base_plus = tmp;
6411 else
6413 rtx scratch_hi = gen_rtx_REG (HImode, REGNO (operands[2]));
6415 /* Be conservative and copy OUTVAL into the scratch now,
6416 this should only be necessary if outval is a subreg
6417 of something larger than a word. */
6418 /* XXX Might this clobber base? I can't see how it can,
6419 since scratch is known to overlap with OUTVAL, and
6420 must be wider than a word. */
6421 emit_insn (gen_movhi (scratch_hi, outval));
6422 outval = scratch_hi;
6426 emit_insn (gen_rtx_SET (VOIDmode, base_plus, base));
6427 base = base_plus;
6429 else if (GET_CODE (base) == PLUS)
6431 /* The addend must be CONST_INT, or we would have dealt with it above. */
6432 HOST_WIDE_INT hi, lo;
6434 offset += INTVAL (XEXP (base, 1));
6435 base = XEXP (base, 0);
6437 /* Rework the address into a legal sequence of insns. */
6438 /* Valid range for lo is -4095 -> 4095 */
6439 lo = (offset >= 0
6440 ? (offset & 0xfff)
6441 : -((-offset) & 0xfff));
6443 /* Corner case, if lo is the max offset then we would be out of range
6444 once we have added the additional 1 below, so bump the msb into the
6445 pre-loading insn(s). */
6446 if (lo == 4095)
6447 lo &= 0x7ff;
6449 hi = ((((offset - lo) & (HOST_WIDE_INT) 0xffffffff)
6450 ^ (HOST_WIDE_INT) 0x80000000)
6451 - (HOST_WIDE_INT) 0x80000000);
6453 if (hi + lo != offset)
6454 abort ();
6456 if (hi != 0)
6458 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
6460 /* Be careful not to destroy OUTVAL. */
6461 if (reg_overlap_mentioned_p (base_plus, outval))
6463 /* Updating base_plus might destroy outval, see if we
6464 can swap the scratch and base_plus. */
6465 if (!reg_overlap_mentioned_p (scratch, outval))
6467 rtx tmp = scratch;
6468 scratch = base_plus;
6469 base_plus = tmp;
6471 else
6473 rtx scratch_hi = gen_rtx_REG (HImode, REGNO (operands[2]));
6475 /* Be conservative and copy outval into scratch now,
6476 this should only be necessary if outval is a
6477 subreg of something larger than a word. */
6478 /* XXX Might this clobber base? I can't see how it
6479 can, since scratch is known to overlap with
6480 outval. */
6481 emit_insn (gen_movhi (scratch_hi, outval));
6482 outval = scratch_hi;
6486 /* Get the base address; addsi3 knows how to handle constants
6487 that require more than one insn. */
6488 emit_insn (gen_addsi3 (base_plus, base, GEN_INT (hi)));
6489 base = base_plus;
6490 offset = lo;
6494 if (BYTES_BIG_ENDIAN)
6496 emit_insn (gen_movqi (gen_rtx_MEM (QImode,
6497 plus_constant (base, offset + 1)),
6498 gen_lowpart (QImode, outval)));
6499 emit_insn (gen_lshrsi3 (scratch,
6500 gen_rtx_SUBREG (SImode, outval, 0),
6501 GEN_INT (8)));
6502 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (base, offset)),
6503 gen_lowpart (QImode, scratch)));
6505 else
6507 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (base, offset)),
6508 gen_lowpart (QImode, outval)));
6509 emit_insn (gen_lshrsi3 (scratch,
6510 gen_rtx_SUBREG (SImode, outval, 0),
6511 GEN_INT (8)));
6512 emit_insn (gen_movqi (gen_rtx_MEM (QImode,
6513 plus_constant (base, offset + 1)),
6514 gen_lowpart (QImode, scratch)));
6518 /* Print a symbolic form of X to the debug file, F. */
6519 static void
6520 arm_print_value (FILE *f, rtx x)
6522 switch (GET_CODE (x))
6524 case CONST_INT:
6525 fprintf (f, HOST_WIDE_INT_PRINT_HEX, INTVAL (x));
6526 return;
6528 case CONST_DOUBLE:
6529 fprintf (f, "<0x%lx,0x%lx>", (long)XWINT (x, 2), (long)XWINT (x, 3));
6530 return;
6532 case CONST_VECTOR:
6534 int i;
6536 fprintf (f, "<");
6537 for (i = 0; i < CONST_VECTOR_NUNITS (x); i++)
6539 fprintf (f, HOST_WIDE_INT_PRINT_HEX, INTVAL (CONST_VECTOR_ELT (x, i)));
6540 if (i < (CONST_VECTOR_NUNITS (x) - 1))
6541 fputc (',', f);
6543 fprintf (f, ">");
6545 return;
6547 case CONST_STRING:
6548 fprintf (f, "\"%s\"", XSTR (x, 0));
6549 return;
6551 case SYMBOL_REF:
6552 fprintf (f, "`%s'", XSTR (x, 0));
6553 return;
6555 case LABEL_REF:
6556 fprintf (f, "L%d", INSN_UID (XEXP (x, 0)));
6557 return;
6559 case CONST:
6560 arm_print_value (f, XEXP (x, 0));
6561 return;
6563 case PLUS:
6564 arm_print_value (f, XEXP (x, 0));
6565 fprintf (f, "+");
6566 arm_print_value (f, XEXP (x, 1));
6567 return;
6569 case PC:
6570 fprintf (f, "pc");
6571 return;
6573 default:
6574 fprintf (f, "????");
6575 return;
6579 /* Routines for manipulation of the constant pool. */
6581 /* Arm instructions cannot load a large constant directly into a
6582 register; they have to come from a pc relative load. The constant
6583 must therefore be placed in the addressable range of the pc
6584 relative load. Depending on the precise pc relative load
6585 instruction the range is somewhere between 256 bytes and 4k. This
6586 means that we often have to dump a constant inside a function, and
6587 generate code to branch around it.
6589 It is important to minimize this, since the branches will slow
6590 things down and make the code larger.
6592 Normally we can hide the table after an existing unconditional
6593 branch so that there is no interruption of the flow, but in the
6594 worst case the code looks like this:
6596 ldr rn, L1
6598 b L2
6599 align
6600 L1: .long value
6604 ldr rn, L3
6606 b L4
6607 align
6608 L3: .long value
6612 We fix this by performing a scan after scheduling, which notices
6613 which instructions need to have their operands fetched from the
6614 constant table and builds the table.
6616 The algorithm starts by building a table of all the constants that
6617 need fixing up and all the natural barriers in the function (places
6618 where a constant table can be dropped without breaking the flow).
6619 For each fixup we note how far the pc-relative replacement will be
6620 able to reach and the offset of the instruction into the function.
6622 Having built the table we then group the fixes together to form
6623 tables that are as large as possible (subject to addressing
6624 constraints) and emit each table of constants after the last
6625 barrier that is within range of all the instructions in the group.
6626 If a group does not contain a barrier, then we forcibly create one
6627 by inserting a jump instruction into the flow. Once the table has
6628 been inserted, the insns are then modified to reference the
6629 relevant entry in the pool.
6631 Possible enhancements to the algorithm (not implemented) are:
6633 1) For some processors and object formats, there may be benefit in
6634 aligning the pools to the start of cache lines; this alignment
6635 would need to be taken into account when calculating addressability
6636 of a pool. */
6638 /* These typedefs are located at the start of this file, so that
6639 they can be used in the prototypes there. This comment is to
6640 remind readers of that fact so that the following structures
6641 can be understood more easily.
6643 typedef struct minipool_node Mnode;
6644 typedef struct minipool_fixup Mfix; */
6646 struct minipool_node
6648 /* Doubly linked chain of entries. */
6649 Mnode * next;
6650 Mnode * prev;
6651 /* The maximum offset into the code that this entry can be placed. While
6652 pushing fixes for forward references, all entries are sorted in order
6653 of increasing max_address. */
6654 HOST_WIDE_INT max_address;
6655 /* Similarly for an entry inserted for a backwards ref. */
6656 HOST_WIDE_INT min_address;
6657 /* The number of fixes referencing this entry. This can become zero
6658 if we "unpush" an entry. In this case we ignore the entry when we
6659 come to emit the code. */
6660 int refcount;
6661 /* The offset from the start of the minipool. */
6662 HOST_WIDE_INT offset;
6663 /* The value in table. */
6664 rtx value;
6665 /* The mode of value. */
6666 enum machine_mode mode;
6667 /* The size of the value. With iWMMXt enabled
6668 sizes > 4 also imply an alignment of 8-bytes. */
6669 int fix_size;
6672 struct minipool_fixup
6674 Mfix * next;
6675 rtx insn;
6676 HOST_WIDE_INT address;
6677 rtx * loc;
6678 enum machine_mode mode;
6679 int fix_size;
6680 rtx value;
6681 Mnode * minipool;
6682 HOST_WIDE_INT forwards;
6683 HOST_WIDE_INT backwards;
6686 /* Fixes less than a word need padding out to a word boundary. */
6687 #define MINIPOOL_FIX_SIZE(mode) \
6688 (GET_MODE_SIZE ((mode)) >= 4 ? GET_MODE_SIZE ((mode)) : 4)
6690 static Mnode * minipool_vector_head;
6691 static Mnode * minipool_vector_tail;
6692 static rtx minipool_vector_label;
6694 /* The linked list of all minipool fixes required for this function. */
6695 Mfix * minipool_fix_head;
6696 Mfix * minipool_fix_tail;
6697 /* The fix entry for the current minipool, once it has been placed. */
6698 Mfix * minipool_barrier;
6700 /* Determines if INSN is the start of a jump table. Returns the end
6701 of the TABLE or NULL_RTX. */
6702 static rtx
6703 is_jump_table (rtx insn)
6705 rtx table;
6707 if (GET_CODE (insn) == JUMP_INSN
6708 && JUMP_LABEL (insn) != NULL
6709 && ((table = next_real_insn (JUMP_LABEL (insn)))
6710 == next_real_insn (insn))
6711 && table != NULL
6712 && GET_CODE (table) == JUMP_INSN
6713 && (GET_CODE (PATTERN (table)) == ADDR_VEC
6714 || GET_CODE (PATTERN (table)) == ADDR_DIFF_VEC))
6715 return table;
6717 return NULL_RTX;
6720 #ifndef JUMP_TABLES_IN_TEXT_SECTION
6721 #define JUMP_TABLES_IN_TEXT_SECTION 0
6722 #endif
6724 static HOST_WIDE_INT
6725 get_jump_table_size (rtx insn)
6727 /* ADDR_VECs only take room if read-only data does into the text
6728 section. */
6729 if (JUMP_TABLES_IN_TEXT_SECTION
6730 #if !defined(READONLY_DATA_SECTION) && !defined(READONLY_DATA_SECTION_ASM_OP)
6731 || 1
6732 #endif
6735 rtx body = PATTERN (insn);
6736 int elt = GET_CODE (body) == ADDR_DIFF_VEC ? 1 : 0;
6738 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, elt);
6741 return 0;
6744 /* Move a minipool fix MP from its current location to before MAX_MP.
6745 If MAX_MP is NULL, then MP doesn't need moving, but the addressing
6746 constraints may need updating. */
6747 static Mnode *
6748 move_minipool_fix_forward_ref (Mnode *mp, Mnode *max_mp,
6749 HOST_WIDE_INT max_address)
6751 /* This should never be true and the code below assumes these are
6752 different. */
6753 if (mp == max_mp)
6754 abort ();
6756 if (max_mp == NULL)
6758 if (max_address < mp->max_address)
6759 mp->max_address = max_address;
6761 else
6763 if (max_address > max_mp->max_address - mp->fix_size)
6764 mp->max_address = max_mp->max_address - mp->fix_size;
6765 else
6766 mp->max_address = max_address;
6768 /* Unlink MP from its current position. Since max_mp is non-null,
6769 mp->prev must be non-null. */
6770 mp->prev->next = mp->next;
6771 if (mp->next != NULL)
6772 mp->next->prev = mp->prev;
6773 else
6774 minipool_vector_tail = mp->prev;
6776 /* Re-insert it before MAX_MP. */
6777 mp->next = max_mp;
6778 mp->prev = max_mp->prev;
6779 max_mp->prev = mp;
6781 if (mp->prev != NULL)
6782 mp->prev->next = mp;
6783 else
6784 minipool_vector_head = mp;
6787 /* Save the new entry. */
6788 max_mp = mp;
6790 /* Scan over the preceding entries and adjust their addresses as
6791 required. */
6792 while (mp->prev != NULL
6793 && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
6795 mp->prev->max_address = mp->max_address - mp->prev->fix_size;
6796 mp = mp->prev;
6799 return max_mp;
6802 /* Add a constant to the minipool for a forward reference. Returns the
6803 node added or NULL if the constant will not fit in this pool. */
6804 static Mnode *
6805 add_minipool_forward_ref (Mfix *fix)
6807 /* If set, max_mp is the first pool_entry that has a lower
6808 constraint than the one we are trying to add. */
6809 Mnode * max_mp = NULL;
6810 HOST_WIDE_INT max_address = fix->address + fix->forwards;
6811 Mnode * mp;
6813 /* If this fix's address is greater than the address of the first
6814 entry, then we can't put the fix in this pool. We subtract the
6815 size of the current fix to ensure that if the table is fully
6816 packed we still have enough room to insert this value by suffling
6817 the other fixes forwards. */
6818 if (minipool_vector_head &&
6819 fix->address >= minipool_vector_head->max_address - fix->fix_size)
6820 return NULL;
6822 /* Scan the pool to see if a constant with the same value has
6823 already been added. While we are doing this, also note the
6824 location where we must insert the constant if it doesn't already
6825 exist. */
6826 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
6828 if (GET_CODE (fix->value) == GET_CODE (mp->value)
6829 && fix->mode == mp->mode
6830 && (GET_CODE (fix->value) != CODE_LABEL
6831 || (CODE_LABEL_NUMBER (fix->value)
6832 == CODE_LABEL_NUMBER (mp->value)))
6833 && rtx_equal_p (fix->value, mp->value))
6835 /* More than one fix references this entry. */
6836 mp->refcount++;
6837 return move_minipool_fix_forward_ref (mp, max_mp, max_address);
6840 /* Note the insertion point if necessary. */
6841 if (max_mp == NULL
6842 && mp->max_address > max_address)
6843 max_mp = mp;
6845 /* If we are inserting an 8-bytes aligned quantity and
6846 we have not already found an insertion point, then
6847 make sure that all such 8-byte aligned quantities are
6848 placed at the start of the pool. */
6849 if (TARGET_REALLY_IWMMXT
6850 && max_mp == NULL
6851 && fix->fix_size == 8
6852 && mp->fix_size != 8)
6854 max_mp = mp;
6855 max_address = mp->max_address;
6859 /* The value is not currently in the minipool, so we need to create
6860 a new entry for it. If MAX_MP is NULL, the entry will be put on
6861 the end of the list since the placement is less constrained than
6862 any existing entry. Otherwise, we insert the new fix before
6863 MAX_MP and, if necessary, adjust the constraints on the other
6864 entries. */
6865 mp = xmalloc (sizeof (* mp));
6866 mp->fix_size = fix->fix_size;
6867 mp->mode = fix->mode;
6868 mp->value = fix->value;
6869 mp->refcount = 1;
6870 /* Not yet required for a backwards ref. */
6871 mp->min_address = -65536;
6873 if (max_mp == NULL)
6875 mp->max_address = max_address;
6876 mp->next = NULL;
6877 mp->prev = minipool_vector_tail;
6879 if (mp->prev == NULL)
6881 minipool_vector_head = mp;
6882 minipool_vector_label = gen_label_rtx ();
6884 else
6885 mp->prev->next = mp;
6887 minipool_vector_tail = mp;
6889 else
6891 if (max_address > max_mp->max_address - mp->fix_size)
6892 mp->max_address = max_mp->max_address - mp->fix_size;
6893 else
6894 mp->max_address = max_address;
6896 mp->next = max_mp;
6897 mp->prev = max_mp->prev;
6898 max_mp->prev = mp;
6899 if (mp->prev != NULL)
6900 mp->prev->next = mp;
6901 else
6902 minipool_vector_head = mp;
6905 /* Save the new entry. */
6906 max_mp = mp;
6908 /* Scan over the preceding entries and adjust their addresses as
6909 required. */
6910 while (mp->prev != NULL
6911 && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
6913 mp->prev->max_address = mp->max_address - mp->prev->fix_size;
6914 mp = mp->prev;
6917 return max_mp;
6920 static Mnode *
6921 move_minipool_fix_backward_ref (Mnode *mp, Mnode *min_mp,
6922 HOST_WIDE_INT min_address)
6924 HOST_WIDE_INT offset;
6926 /* This should never be true, and the code below assumes these are
6927 different. */
6928 if (mp == min_mp)
6929 abort ();
6931 if (min_mp == NULL)
6933 if (min_address > mp->min_address)
6934 mp->min_address = min_address;
6936 else
6938 /* We will adjust this below if it is too loose. */
6939 mp->min_address = min_address;
6941 /* Unlink MP from its current position. Since min_mp is non-null,
6942 mp->next must be non-null. */
6943 mp->next->prev = mp->prev;
6944 if (mp->prev != NULL)
6945 mp->prev->next = mp->next;
6946 else
6947 minipool_vector_head = mp->next;
6949 /* Reinsert it after MIN_MP. */
6950 mp->prev = min_mp;
6951 mp->next = min_mp->next;
6952 min_mp->next = mp;
6953 if (mp->next != NULL)
6954 mp->next->prev = mp;
6955 else
6956 minipool_vector_tail = mp;
6959 min_mp = mp;
6961 offset = 0;
6962 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
6964 mp->offset = offset;
6965 if (mp->refcount > 0)
6966 offset += mp->fix_size;
6968 if (mp->next && mp->next->min_address < mp->min_address + mp->fix_size)
6969 mp->next->min_address = mp->min_address + mp->fix_size;
6972 return min_mp;
6975 /* Add a constant to the minipool for a backward reference. Returns the
6976 node added or NULL if the constant will not fit in this pool.
6978 Note that the code for insertion for a backwards reference can be
6979 somewhat confusing because the calculated offsets for each fix do
6980 not take into account the size of the pool (which is still under
6981 construction. */
6982 static Mnode *
6983 add_minipool_backward_ref (Mfix *fix)
6985 /* If set, min_mp is the last pool_entry that has a lower constraint
6986 than the one we are trying to add. */
6987 Mnode *min_mp = NULL;
6988 /* This can be negative, since it is only a constraint. */
6989 HOST_WIDE_INT min_address = fix->address - fix->backwards;
6990 Mnode *mp;
6992 /* If we can't reach the current pool from this insn, or if we can't
6993 insert this entry at the end of the pool without pushing other
6994 fixes out of range, then we don't try. This ensures that we
6995 can't fail later on. */
6996 if (min_address >= minipool_barrier->address
6997 || (minipool_vector_tail->min_address + fix->fix_size
6998 >= minipool_barrier->address))
6999 return NULL;
7001 /* Scan the pool to see if a constant with the same value has
7002 already been added. While we are doing this, also note the
7003 location where we must insert the constant if it doesn't already
7004 exist. */
7005 for (mp = minipool_vector_tail; mp != NULL; mp = mp->prev)
7007 if (GET_CODE (fix->value) == GET_CODE (mp->value)
7008 && fix->mode == mp->mode
7009 && (GET_CODE (fix->value) != CODE_LABEL
7010 || (CODE_LABEL_NUMBER (fix->value)
7011 == CODE_LABEL_NUMBER (mp->value)))
7012 && rtx_equal_p (fix->value, mp->value)
7013 /* Check that there is enough slack to move this entry to the
7014 end of the table (this is conservative). */
7015 && (mp->max_address
7016 > (minipool_barrier->address
7017 + minipool_vector_tail->offset
7018 + minipool_vector_tail->fix_size)))
7020 mp->refcount++;
7021 return move_minipool_fix_backward_ref (mp, min_mp, min_address);
7024 if (min_mp != NULL)
7025 mp->min_address += fix->fix_size;
7026 else
7028 /* Note the insertion point if necessary. */
7029 if (mp->min_address < min_address)
7031 /* For now, we do not allow the insertion of 8-byte alignment
7032 requiring nodes anywhere but at the start of the pool. */
7033 if (TARGET_REALLY_IWMMXT && fix->fix_size == 8 && mp->fix_size != 8)
7034 return NULL;
7035 else
7036 min_mp = mp;
7038 else if (mp->max_address
7039 < minipool_barrier->address + mp->offset + fix->fix_size)
7041 /* Inserting before this entry would push the fix beyond
7042 its maximum address (which can happen if we have
7043 re-located a forwards fix); force the new fix to come
7044 after it. */
7045 min_mp = mp;
7046 min_address = mp->min_address + fix->fix_size;
7048 /* If we are inserting an 8-bytes aligned quantity and
7049 we have not already found an insertion point, then
7050 make sure that all such 8-byte aligned quantities are
7051 placed at the start of the pool. */
7052 else if (TARGET_REALLY_IWMMXT
7053 && min_mp == NULL
7054 && fix->fix_size == 8
7055 && mp->fix_size < 8)
7057 min_mp = mp;
7058 min_address = mp->min_address + fix->fix_size;
7063 /* We need to create a new entry. */
7064 mp = xmalloc (sizeof (* mp));
7065 mp->fix_size = fix->fix_size;
7066 mp->mode = fix->mode;
7067 mp->value = fix->value;
7068 mp->refcount = 1;
7069 mp->max_address = minipool_barrier->address + 65536;
7071 mp->min_address = min_address;
7073 if (min_mp == NULL)
7075 mp->prev = NULL;
7076 mp->next = minipool_vector_head;
7078 if (mp->next == NULL)
7080 minipool_vector_tail = mp;
7081 minipool_vector_label = gen_label_rtx ();
7083 else
7084 mp->next->prev = mp;
7086 minipool_vector_head = mp;
7088 else
7090 mp->next = min_mp->next;
7091 mp->prev = min_mp;
7092 min_mp->next = mp;
7094 if (mp->next != NULL)
7095 mp->next->prev = mp;
7096 else
7097 minipool_vector_tail = mp;
7100 /* Save the new entry. */
7101 min_mp = mp;
7103 if (mp->prev)
7104 mp = mp->prev;
7105 else
7106 mp->offset = 0;
7108 /* Scan over the following entries and adjust their offsets. */
7109 while (mp->next != NULL)
7111 if (mp->next->min_address < mp->min_address + mp->fix_size)
7112 mp->next->min_address = mp->min_address + mp->fix_size;
7114 if (mp->refcount)
7115 mp->next->offset = mp->offset + mp->fix_size;
7116 else
7117 mp->next->offset = mp->offset;
7119 mp = mp->next;
7122 return min_mp;
7125 static void
7126 assign_minipool_offsets (Mfix *barrier)
7128 HOST_WIDE_INT offset = 0;
7129 Mnode *mp;
7131 minipool_barrier = barrier;
7133 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7135 mp->offset = offset;
7137 if (mp->refcount > 0)
7138 offset += mp->fix_size;
7142 /* Output the literal table */
7143 static void
7144 dump_minipool (rtx scan)
7146 Mnode * mp;
7147 Mnode * nmp;
7148 int align64 = 0;
7150 if (TARGET_REALLY_IWMMXT)
7151 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7152 if (mp->refcount > 0 && mp->fix_size == 8)
7154 align64 = 1;
7155 break;
7158 if (dump_file)
7159 fprintf (dump_file,
7160 ";; Emitting minipool after insn %u; address %ld; align %d (bytes)\n",
7161 INSN_UID (scan), (unsigned long) minipool_barrier->address, align64 ? 8 : 4);
7163 scan = emit_label_after (gen_label_rtx (), scan);
7164 scan = emit_insn_after (align64 ? gen_align_8 () : gen_align_4 (), scan);
7165 scan = emit_label_after (minipool_vector_label, scan);
7167 for (mp = minipool_vector_head; mp != NULL; mp = nmp)
7169 if (mp->refcount > 0)
7171 if (dump_file)
7173 fprintf (dump_file,
7174 ";; Offset %u, min %ld, max %ld ",
7175 (unsigned) mp->offset, (unsigned long) mp->min_address,
7176 (unsigned long) mp->max_address);
7177 arm_print_value (dump_file, mp->value);
7178 fputc ('\n', dump_file);
7181 switch (mp->fix_size)
7183 #ifdef HAVE_consttable_1
7184 case 1:
7185 scan = emit_insn_after (gen_consttable_1 (mp->value), scan);
7186 break;
7188 #endif
7189 #ifdef HAVE_consttable_2
7190 case 2:
7191 scan = emit_insn_after (gen_consttable_2 (mp->value), scan);
7192 break;
7194 #endif
7195 #ifdef HAVE_consttable_4
7196 case 4:
7197 scan = emit_insn_after (gen_consttable_4 (mp->value), scan);
7198 break;
7200 #endif
7201 #ifdef HAVE_consttable_8
7202 case 8:
7203 scan = emit_insn_after (gen_consttable_8 (mp->value), scan);
7204 break;
7206 #endif
7207 default:
7208 abort ();
7209 break;
7213 nmp = mp->next;
7214 free (mp);
7217 minipool_vector_head = minipool_vector_tail = NULL;
7218 scan = emit_insn_after (gen_consttable_end (), scan);
7219 scan = emit_barrier_after (scan);
7222 /* Return the cost of forcibly inserting a barrier after INSN. */
7223 static int
7224 arm_barrier_cost (rtx insn)
7226 /* Basing the location of the pool on the loop depth is preferable,
7227 but at the moment, the basic block information seems to be
7228 corrupt by this stage of the compilation. */
7229 int base_cost = 50;
7230 rtx next = next_nonnote_insn (insn);
7232 if (next != NULL && GET_CODE (next) == CODE_LABEL)
7233 base_cost -= 20;
7235 switch (GET_CODE (insn))
7237 case CODE_LABEL:
7238 /* It will always be better to place the table before the label, rather
7239 than after it. */
7240 return 50;
7242 case INSN:
7243 case CALL_INSN:
7244 return base_cost;
7246 case JUMP_INSN:
7247 return base_cost - 10;
7249 default:
7250 return base_cost + 10;
7254 /* Find the best place in the insn stream in the range
7255 (FIX->address,MAX_ADDRESS) to forcibly insert a minipool barrier.
7256 Create the barrier by inserting a jump and add a new fix entry for
7257 it. */
7258 static Mfix *
7259 create_fix_barrier (Mfix *fix, HOST_WIDE_INT max_address)
7261 HOST_WIDE_INT count = 0;
7262 rtx barrier;
7263 rtx from = fix->insn;
7264 rtx selected = from;
7265 int selected_cost;
7266 HOST_WIDE_INT selected_address;
7267 Mfix * new_fix;
7268 HOST_WIDE_INT max_count = max_address - fix->address;
7269 rtx label = gen_label_rtx ();
7271 selected_cost = arm_barrier_cost (from);
7272 selected_address = fix->address;
7274 while (from && count < max_count)
7276 rtx tmp;
7277 int new_cost;
7279 /* This code shouldn't have been called if there was a natural barrier
7280 within range. */
7281 if (GET_CODE (from) == BARRIER)
7282 abort ();
7284 /* Count the length of this insn. */
7285 count += get_attr_length (from);
7287 /* If there is a jump table, add its length. */
7288 tmp = is_jump_table (from);
7289 if (tmp != NULL)
7291 count += get_jump_table_size (tmp);
7293 /* Jump tables aren't in a basic block, so base the cost on
7294 the dispatch insn. If we select this location, we will
7295 still put the pool after the table. */
7296 new_cost = arm_barrier_cost (from);
7298 if (count < max_count && new_cost <= selected_cost)
7300 selected = tmp;
7301 selected_cost = new_cost;
7302 selected_address = fix->address + count;
7305 /* Continue after the dispatch table. */
7306 from = NEXT_INSN (tmp);
7307 continue;
7310 new_cost = arm_barrier_cost (from);
7312 if (count < max_count && new_cost <= selected_cost)
7314 selected = from;
7315 selected_cost = new_cost;
7316 selected_address = fix->address + count;
7319 from = NEXT_INSN (from);
7322 /* Create a new JUMP_INSN that branches around a barrier. */
7323 from = emit_jump_insn_after (gen_jump (label), selected);
7324 JUMP_LABEL (from) = label;
7325 barrier = emit_barrier_after (from);
7326 emit_label_after (label, barrier);
7328 /* Create a minipool barrier entry for the new barrier. */
7329 new_fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* new_fix));
7330 new_fix->insn = barrier;
7331 new_fix->address = selected_address;
7332 new_fix->next = fix->next;
7333 fix->next = new_fix;
7335 return new_fix;
7338 /* Record that there is a natural barrier in the insn stream at
7339 ADDRESS. */
7340 static void
7341 push_minipool_barrier (rtx insn, HOST_WIDE_INT address)
7343 Mfix * fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* fix));
7345 fix->insn = insn;
7346 fix->address = address;
7348 fix->next = NULL;
7349 if (minipool_fix_head != NULL)
7350 minipool_fix_tail->next = fix;
7351 else
7352 minipool_fix_head = fix;
7354 minipool_fix_tail = fix;
7357 /* Record INSN, which will need fixing up to load a value from the
7358 minipool. ADDRESS is the offset of the insn since the start of the
7359 function; LOC is a pointer to the part of the insn which requires
7360 fixing; VALUE is the constant that must be loaded, which is of type
7361 MODE. */
7362 static void
7363 push_minipool_fix (rtx insn, HOST_WIDE_INT address, rtx *loc,
7364 enum machine_mode mode, rtx value)
7366 Mfix * fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* fix));
7368 #ifdef AOF_ASSEMBLER
7369 /* PIC symbol references need to be converted into offsets into the
7370 based area. */
7371 /* XXX This shouldn't be done here. */
7372 if (flag_pic && GET_CODE (value) == SYMBOL_REF)
7373 value = aof_pic_entry (value);
7374 #endif /* AOF_ASSEMBLER */
7376 fix->insn = insn;
7377 fix->address = address;
7378 fix->loc = loc;
7379 fix->mode = mode;
7380 fix->fix_size = MINIPOOL_FIX_SIZE (mode);
7381 fix->value = value;
7382 fix->forwards = get_attr_pool_range (insn);
7383 fix->backwards = get_attr_neg_pool_range (insn);
7384 fix->minipool = NULL;
7386 /* If an insn doesn't have a range defined for it, then it isn't
7387 expecting to be reworked by this code. Better to abort now than
7388 to generate duff assembly code. */
7389 if (fix->forwards == 0 && fix->backwards == 0)
7390 abort ();
7392 /* With iWMMXt enabled, the pool is aligned to an 8-byte boundary.
7393 So there might be an empty word before the start of the pool.
7394 Hence we reduce the forward range by 4 to allow for this
7395 possibility. */
7396 if (TARGET_REALLY_IWMMXT && fix->fix_size == 8)
7397 fix->forwards -= 4;
7399 if (dump_file)
7401 fprintf (dump_file,
7402 ";; %smode fixup for i%d; addr %lu, range (%ld,%ld): ",
7403 GET_MODE_NAME (mode),
7404 INSN_UID (insn), (unsigned long) address,
7405 -1 * (long)fix->backwards, (long)fix->forwards);
7406 arm_print_value (dump_file, fix->value);
7407 fprintf (dump_file, "\n");
7410 /* Add it to the chain of fixes. */
7411 fix->next = NULL;
7413 if (minipool_fix_head != NULL)
7414 minipool_fix_tail->next = fix;
7415 else
7416 minipool_fix_head = fix;
7418 minipool_fix_tail = fix;
7421 /* Scan INSN and note any of its operands that need fixing.
7422 If DO_PUSHES is false we do not actually push any of the fixups
7423 needed. The function returns TRUE is any fixups were needed/pushed.
7424 This is used by arm_memory_load_p() which needs to know about loads
7425 of constants that will be converted into minipool loads. */
7426 static bool
7427 note_invalid_constants (rtx insn, HOST_WIDE_INT address, int do_pushes)
7429 bool result = false;
7430 int opno;
7432 extract_insn (insn);
7434 if (!constrain_operands (1))
7435 fatal_insn_not_found (insn);
7437 if (recog_data.n_alternatives == 0)
7438 return false;
7440 /* Fill in recog_op_alt with information about the constraints of this insn. */
7441 preprocess_constraints ();
7443 for (opno = 0; opno < recog_data.n_operands; opno++)
7445 /* Things we need to fix can only occur in inputs. */
7446 if (recog_data.operand_type[opno] != OP_IN)
7447 continue;
7449 /* If this alternative is a memory reference, then any mention
7450 of constants in this alternative is really to fool reload
7451 into allowing us to accept one there. We need to fix them up
7452 now so that we output the right code. */
7453 if (recog_op_alt[opno][which_alternative].memory_ok)
7455 rtx op = recog_data.operand[opno];
7457 if (CONSTANT_P (op))
7459 if (do_pushes)
7460 push_minipool_fix (insn, address, recog_data.operand_loc[opno],
7461 recog_data.operand_mode[opno], op);
7462 result = true;
7464 else if (GET_CODE (op) == MEM
7465 && GET_CODE (XEXP (op, 0)) == SYMBOL_REF
7466 && CONSTANT_POOL_ADDRESS_P (XEXP (op, 0)))
7468 if (do_pushes)
7470 rtx cop = avoid_constant_pool_reference (op);
7472 /* Casting the address of something to a mode narrower
7473 than a word can cause avoid_constant_pool_reference()
7474 to return the pool reference itself. That's no good to
7475 us here. Lets just hope that we can use the
7476 constant pool value directly. */
7477 if (op == cop)
7478 cop = get_pool_constant (XEXP (op, 0));
7480 push_minipool_fix (insn, address,
7481 recog_data.operand_loc[opno],
7482 recog_data.operand_mode[opno], cop);
7485 result = true;
7490 return result;
7493 /* Gcc puts the pool in the wrong place for ARM, since we can only
7494 load addresses a limited distance around the pc. We do some
7495 special munging to move the constant pool values to the correct
7496 point in the code. */
7497 static void
7498 arm_reorg (void)
7500 rtx insn;
7501 HOST_WIDE_INT address = 0;
7502 Mfix * fix;
7504 minipool_fix_head = minipool_fix_tail = NULL;
7506 /* The first insn must always be a note, or the code below won't
7507 scan it properly. */
7508 insn = get_insns ();
7509 if (GET_CODE (insn) != NOTE)
7510 abort ();
7512 /* Scan all the insns and record the operands that will need fixing. */
7513 for (insn = next_nonnote_insn (insn); insn; insn = next_nonnote_insn (insn))
7515 if (TARGET_CIRRUS_FIX_INVALID_INSNS
7516 && (arm_cirrus_insn_p (insn)
7517 || GET_CODE (insn) == JUMP_INSN
7518 || arm_memory_load_p (insn)))
7519 cirrus_reorg (insn);
7521 if (GET_CODE (insn) == BARRIER)
7522 push_minipool_barrier (insn, address);
7523 else if (INSN_P (insn))
7525 rtx table;
7527 note_invalid_constants (insn, address, true);
7528 address += get_attr_length (insn);
7530 /* If the insn is a vector jump, add the size of the table
7531 and skip the table. */
7532 if ((table = is_jump_table (insn)) != NULL)
7534 address += get_jump_table_size (table);
7535 insn = table;
7540 fix = minipool_fix_head;
7542 /* Now scan the fixups and perform the required changes. */
7543 while (fix)
7545 Mfix * ftmp;
7546 Mfix * fdel;
7547 Mfix * last_added_fix;
7548 Mfix * last_barrier = NULL;
7549 Mfix * this_fix;
7551 /* Skip any further barriers before the next fix. */
7552 while (fix && GET_CODE (fix->insn) == BARRIER)
7553 fix = fix->next;
7555 /* No more fixes. */
7556 if (fix == NULL)
7557 break;
7559 last_added_fix = NULL;
7561 for (ftmp = fix; ftmp; ftmp = ftmp->next)
7563 if (GET_CODE (ftmp->insn) == BARRIER)
7565 if (ftmp->address >= minipool_vector_head->max_address)
7566 break;
7568 last_barrier = ftmp;
7570 else if ((ftmp->minipool = add_minipool_forward_ref (ftmp)) == NULL)
7571 break;
7573 last_added_fix = ftmp; /* Keep track of the last fix added. */
7576 /* If we found a barrier, drop back to that; any fixes that we
7577 could have reached but come after the barrier will now go in
7578 the next mini-pool. */
7579 if (last_barrier != NULL)
7581 /* Reduce the refcount for those fixes that won't go into this
7582 pool after all. */
7583 for (fdel = last_barrier->next;
7584 fdel && fdel != ftmp;
7585 fdel = fdel->next)
7587 fdel->minipool->refcount--;
7588 fdel->minipool = NULL;
7591 ftmp = last_barrier;
7593 else
7595 /* ftmp is first fix that we can't fit into this pool and
7596 there no natural barriers that we could use. Insert a
7597 new barrier in the code somewhere between the previous
7598 fix and this one, and arrange to jump around it. */
7599 HOST_WIDE_INT max_address;
7601 /* The last item on the list of fixes must be a barrier, so
7602 we can never run off the end of the list of fixes without
7603 last_barrier being set. */
7604 if (ftmp == NULL)
7605 abort ();
7607 max_address = minipool_vector_head->max_address;
7608 /* Check that there isn't another fix that is in range that
7609 we couldn't fit into this pool because the pool was
7610 already too large: we need to put the pool before such an
7611 instruction. */
7612 if (ftmp->address < max_address)
7613 max_address = ftmp->address;
7615 last_barrier = create_fix_barrier (last_added_fix, max_address);
7618 assign_minipool_offsets (last_barrier);
7620 while (ftmp)
7622 if (GET_CODE (ftmp->insn) != BARRIER
7623 && ((ftmp->minipool = add_minipool_backward_ref (ftmp))
7624 == NULL))
7625 break;
7627 ftmp = ftmp->next;
7630 /* Scan over the fixes we have identified for this pool, fixing them
7631 up and adding the constants to the pool itself. */
7632 for (this_fix = fix; this_fix && ftmp != this_fix;
7633 this_fix = this_fix->next)
7634 if (GET_CODE (this_fix->insn) != BARRIER)
7636 rtx addr
7637 = plus_constant (gen_rtx_LABEL_REF (VOIDmode,
7638 minipool_vector_label),
7639 this_fix->minipool->offset);
7640 *this_fix->loc = gen_rtx_MEM (this_fix->mode, addr);
7643 dump_minipool (last_barrier->insn);
7644 fix = ftmp;
7647 /* From now on we must synthesize any constants that we can't handle
7648 directly. This can happen if the RTL gets split during final
7649 instruction generation. */
7650 after_arm_reorg = 1;
7652 /* Free the minipool memory. */
7653 obstack_free (&minipool_obstack, minipool_startobj);
7656 /* Routines to output assembly language. */
7658 /* If the rtx is the correct value then return the string of the number.
7659 In this way we can ensure that valid double constants are generated even
7660 when cross compiling. */
7661 const char *
7662 fp_immediate_constant (rtx x)
7664 REAL_VALUE_TYPE r;
7665 int i;
7667 if (!fp_consts_inited)
7668 init_fp_table ();
7670 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
7671 for (i = 0; i < 8; i++)
7672 if (REAL_VALUES_EQUAL (r, values_fp[i]))
7673 return strings_fp[i];
7675 abort ();
7678 /* As for fp_immediate_constant, but value is passed directly, not in rtx. */
7679 static const char *
7680 fp_const_from_val (REAL_VALUE_TYPE *r)
7682 int i;
7684 if (!fp_consts_inited)
7685 init_fp_table ();
7687 for (i = 0; i < 8; i++)
7688 if (REAL_VALUES_EQUAL (*r, values_fp[i]))
7689 return strings_fp[i];
7691 abort ();
7694 /* Output the operands of a LDM/STM instruction to STREAM.
7695 MASK is the ARM register set mask of which only bits 0-15 are important.
7696 REG is the base register, either the frame pointer or the stack pointer,
7697 INSTR is the possibly suffixed load or store instruction. */
7698 static void
7699 print_multi_reg (FILE *stream, const char *instr, int reg, int mask)
7701 int i;
7702 int not_first = FALSE;
7704 fputc ('\t', stream);
7705 asm_fprintf (stream, instr, reg);
7706 fputs (", {", stream);
7708 for (i = 0; i <= LAST_ARM_REGNUM; i++)
7709 if (mask & (1 << i))
7711 if (not_first)
7712 fprintf (stream, ", ");
7714 asm_fprintf (stream, "%r", i);
7715 not_first = TRUE;
7718 fprintf (stream, "}");
7720 /* Add a ^ character for the 26-bit ABI, but only if we were loading
7721 the PC. Otherwise we would generate an UNPREDICTABLE instruction.
7722 Strictly speaking the instruction would be unpredicatble only if
7723 we were writing back the base register as well, but since we never
7724 want to generate an LDM type 2 instruction (register bank switching)
7725 which is what you get if the PC is not being loaded, we do not need
7726 to check for writeback. */
7727 if (! TARGET_APCS_32
7728 && ((mask & (1 << PC_REGNUM)) != 0))
7729 fprintf (stream, "^");
7731 fprintf (stream, "\n");
7735 /* Output the operands of a FLDM/FSTM instruction to STREAM.
7736 REG is the base register,
7737 INSTR is the possibly suffixed load or store instruction.
7738 FMT specifies now to print the register name.
7739 START and COUNT specify the register range. */
7741 static void
7742 vfp_print_multi (FILE *stream, const char *instr, int reg,
7743 const char * fmt, int start, int count)
7745 int i;
7747 fputc ('\t', stream);
7748 asm_fprintf (stream, instr, reg);
7749 fputs (", {", stream);
7751 for (i = start; i < start + count; i++)
7753 if (i > start)
7754 fputs (", ", stream);
7755 asm_fprintf (stream, fmt, i);
7757 fputs ("}\n", stream);
7761 /* Output the assembly for a store multiple. */
7763 const char *
7764 vfp_output_fstmx (rtx * operands)
7766 char pattern[100];
7767 int p;
7768 int base;
7769 int i;
7771 strcpy (pattern, "fstmfdx\t%m0!, {%P1");
7772 p = strlen (pattern);
7774 if (GET_CODE (operands[1]) != REG)
7775 abort ();
7777 base = (REGNO (operands[1]) - FIRST_VFP_REGNUM) / 2;
7778 for (i = 1; i < XVECLEN (operands[2], 0); i++)
7780 p += sprintf (&pattern[p], ", d%d", base + i);
7782 strcpy (&pattern[p], "}");
7784 output_asm_insn (pattern, operands);
7785 return "";
7789 /* Emit RTL to save block of VFP register pairs to the stack. */
7791 static rtx
7792 vfp_emit_fstmx (int base_reg, int count)
7794 rtx par;
7795 rtx dwarf;
7796 rtx tmp, reg;
7797 int i;
7799 /* ??? The frame layout is implementation defined. We describe
7800 standard format 1 (equivalent to a FSTMD insn and unused pad word).
7801 We really need some way of representing the whole block so that the
7802 unwinder can figure it out at runtime. */
7803 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
7804 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (count + 1));
7806 reg = gen_rtx_REG (DFmode, base_reg);
7807 base_reg += 2;
7809 XVECEXP (par, 0, 0)
7810 = gen_rtx_SET (VOIDmode,
7811 gen_rtx_MEM (BLKmode,
7812 gen_rtx_PRE_DEC (BLKmode, stack_pointer_rtx)),
7813 gen_rtx_UNSPEC (BLKmode,
7814 gen_rtvec (1, reg),
7815 UNSPEC_PUSH_MULT));
7817 tmp = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7818 gen_rtx_PLUS (SImode, stack_pointer_rtx,
7819 GEN_INT (-(count * 8 + 4))));
7820 RTX_FRAME_RELATED_P (tmp) = 1;
7821 XVECEXP (dwarf, 0, 0) = tmp;
7823 tmp = gen_rtx_SET (VOIDmode,
7824 gen_rtx_MEM (DFmode, stack_pointer_rtx),
7825 reg);
7826 RTX_FRAME_RELATED_P (tmp) = 1;
7827 XVECEXP (dwarf, 0, 1) = tmp;
7829 for (i = 1; i < count; i++)
7831 reg = gen_rtx_REG (DFmode, base_reg);
7832 base_reg += 2;
7833 XVECEXP (par, 0, i) = gen_rtx_USE (VOIDmode, reg);
7835 tmp = gen_rtx_SET (VOIDmode,
7836 gen_rtx_MEM (DFmode,
7837 gen_rtx_PLUS (SImode,
7838 stack_pointer_rtx,
7839 GEN_INT (i * 8))),
7840 reg);
7841 RTX_FRAME_RELATED_P (tmp) = 1;
7842 XVECEXP (dwarf, 0, i + 1) = tmp;
7845 par = emit_insn (par);
7846 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
7847 REG_NOTES (par));
7848 return par;
7852 /* Output a 'call' insn. */
7853 const char *
7854 output_call (rtx *operands)
7856 /* Handle calls to lr using ip (which may be clobbered in subr anyway). */
7858 if (REGNO (operands[0]) == LR_REGNUM)
7860 operands[0] = gen_rtx_REG (SImode, IP_REGNUM);
7861 output_asm_insn ("mov%?\t%0, %|lr", operands);
7864 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
7866 if (TARGET_INTERWORK)
7867 output_asm_insn ("bx%?\t%0", operands);
7868 else
7869 output_asm_insn ("mov%?\t%|pc, %0", operands);
7871 return "";
7874 /* Output a 'call' insn that is a reference in memory. */
7875 const char *
7876 output_call_mem (rtx *operands)
7878 if (TARGET_INTERWORK)
7880 output_asm_insn ("ldr%?\t%|ip, %0", operands);
7881 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
7882 output_asm_insn ("bx%?\t%|ip", operands);
7884 else if (regno_use_in (LR_REGNUM, operands[0]))
7886 /* LR is used in the memory address. We load the address in the
7887 first instruction. It's safe to use IP as the target of the
7888 load since the call will kill it anyway. */
7889 output_asm_insn ("ldr%?\t%|ip, %0", operands);
7890 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
7891 output_asm_insn ("mov%?\t%|pc, %|ip", operands);
7893 else
7895 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
7896 output_asm_insn ("ldr%?\t%|pc, %0", operands);
7899 return "";
7903 /* Output a move from arm registers to an fpa registers.
7904 OPERANDS[0] is an fpa register.
7905 OPERANDS[1] is the first registers of an arm register pair. */
7906 const char *
7907 output_mov_long_double_fpa_from_arm (rtx *operands)
7909 int arm_reg0 = REGNO (operands[1]);
7910 rtx ops[3];
7912 if (arm_reg0 == IP_REGNUM)
7913 abort ();
7915 ops[0] = gen_rtx_REG (SImode, arm_reg0);
7916 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
7917 ops[2] = gen_rtx_REG (SImode, 2 + arm_reg0);
7919 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1, %2}", ops);
7920 output_asm_insn ("ldf%?e\t%0, [%|sp], #12", operands);
7922 return "";
7925 /* Output a move from an fpa register to arm registers.
7926 OPERANDS[0] is the first registers of an arm register pair.
7927 OPERANDS[1] is an fpa register. */
7928 const char *
7929 output_mov_long_double_arm_from_fpa (rtx *operands)
7931 int arm_reg0 = REGNO (operands[0]);
7932 rtx ops[3];
7934 if (arm_reg0 == IP_REGNUM)
7935 abort ();
7937 ops[0] = gen_rtx_REG (SImode, arm_reg0);
7938 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
7939 ops[2] = gen_rtx_REG (SImode, 2 + arm_reg0);
7941 output_asm_insn ("stf%?e\t%1, [%|sp, #-12]!", operands);
7942 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1, %2}", ops);
7943 return "";
7946 /* Output a move from arm registers to arm registers of a long double
7947 OPERANDS[0] is the destination.
7948 OPERANDS[1] is the source. */
7949 const char *
7950 output_mov_long_double_arm_from_arm (rtx *operands)
7952 /* We have to be careful here because the two might overlap. */
7953 int dest_start = REGNO (operands[0]);
7954 int src_start = REGNO (operands[1]);
7955 rtx ops[2];
7956 int i;
7958 if (dest_start < src_start)
7960 for (i = 0; i < 3; i++)
7962 ops[0] = gen_rtx_REG (SImode, dest_start + i);
7963 ops[1] = gen_rtx_REG (SImode, src_start + i);
7964 output_asm_insn ("mov%?\t%0, %1", ops);
7967 else
7969 for (i = 2; i >= 0; i--)
7971 ops[0] = gen_rtx_REG (SImode, dest_start + i);
7972 ops[1] = gen_rtx_REG (SImode, src_start + i);
7973 output_asm_insn ("mov%?\t%0, %1", ops);
7977 return "";
7981 /* Output a move from arm registers to an fpa registers.
7982 OPERANDS[0] is an fpa register.
7983 OPERANDS[1] is the first registers of an arm register pair. */
7984 const char *
7985 output_mov_double_fpa_from_arm (rtx *operands)
7987 int arm_reg0 = REGNO (operands[1]);
7988 rtx ops[2];
7990 if (arm_reg0 == IP_REGNUM)
7991 abort ();
7993 ops[0] = gen_rtx_REG (SImode, arm_reg0);
7994 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
7995 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1}", ops);
7996 output_asm_insn ("ldf%?d\t%0, [%|sp], #8", operands);
7997 return "";
8000 /* Output a move from an fpa register to arm registers.
8001 OPERANDS[0] is the first registers of an arm register pair.
8002 OPERANDS[1] is an fpa register. */
8003 const char *
8004 output_mov_double_arm_from_fpa (rtx *operands)
8006 int arm_reg0 = REGNO (operands[0]);
8007 rtx ops[2];
8009 if (arm_reg0 == IP_REGNUM)
8010 abort ();
8012 ops[0] = gen_rtx_REG (SImode, arm_reg0);
8013 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
8014 output_asm_insn ("stf%?d\t%1, [%|sp, #-8]!", operands);
8015 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1}", ops);
8016 return "";
8019 /* Output a move between double words.
8020 It must be REG<-REG, REG<-CONST_DOUBLE, REG<-CONST_INT, REG<-MEM
8021 or MEM<-REG and all MEMs must be offsettable addresses. */
8022 const char *
8023 output_move_double (rtx *operands)
8025 enum rtx_code code0 = GET_CODE (operands[0]);
8026 enum rtx_code code1 = GET_CODE (operands[1]);
8027 rtx otherops[3];
8029 if (code0 == REG)
8031 int reg0 = REGNO (operands[0]);
8033 otherops[0] = gen_rtx_REG (SImode, 1 + reg0);
8035 if (code1 == REG)
8037 int reg1 = REGNO (operands[1]);
8038 if (reg1 == IP_REGNUM)
8039 abort ();
8041 /* Ensure the second source is not overwritten. */
8042 if (reg1 == reg0 + (WORDS_BIG_ENDIAN ? -1 : 1))
8043 output_asm_insn ("mov%?\t%Q0, %Q1\n\tmov%?\t%R0, %R1", operands);
8044 else
8045 output_asm_insn ("mov%?\t%R0, %R1\n\tmov%?\t%Q0, %Q1", operands);
8047 else if (code1 == CONST_VECTOR)
8049 HOST_WIDE_INT hint = 0;
8051 switch (GET_MODE (operands[1]))
8053 case V2SImode:
8054 otherops[1] = GEN_INT (INTVAL (CONST_VECTOR_ELT (operands[1], 1)));
8055 operands[1] = GEN_INT (INTVAL (CONST_VECTOR_ELT (operands[1], 0)));
8056 break;
8058 case V4HImode:
8059 if (BYTES_BIG_ENDIAN)
8061 hint = INTVAL (CONST_VECTOR_ELT (operands[1], 2));
8062 hint <<= 16;
8063 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 3));
8065 else
8067 hint = INTVAL (CONST_VECTOR_ELT (operands[1], 3));
8068 hint <<= 16;
8069 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 2));
8072 otherops[1] = GEN_INT (hint);
8073 hint = 0;
8075 if (BYTES_BIG_ENDIAN)
8077 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 0));
8078 hint <<= 16;
8079 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 1));
8081 else
8083 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 1));
8084 hint <<= 16;
8085 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 0));
8088 operands[1] = GEN_INT (hint);
8089 break;
8091 case V8QImode:
8092 if (BYTES_BIG_ENDIAN)
8094 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 4));
8095 hint <<= 8;
8096 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 5));
8097 hint <<= 8;
8098 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 6));
8099 hint <<= 8;
8100 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 7));
8102 else
8104 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 7));
8105 hint <<= 8;
8106 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 6));
8107 hint <<= 8;
8108 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 5));
8109 hint <<= 8;
8110 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 4));
8113 otherops[1] = GEN_INT (hint);
8114 hint = 0;
8116 if (BYTES_BIG_ENDIAN)
8118 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 0));
8119 hint <<= 8;
8120 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 1));
8121 hint <<= 8;
8122 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 2));
8123 hint <<= 8;
8124 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 3));
8126 else
8128 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 3));
8129 hint <<= 8;
8130 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 2));
8131 hint <<= 8;
8132 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 1));
8133 hint <<= 8;
8134 hint |= INTVAL (CONST_VECTOR_ELT (operands[1], 0));
8137 operands[1] = GEN_INT (hint);
8138 break;
8140 default:
8141 abort ();
8143 output_mov_immediate (operands);
8144 output_mov_immediate (otherops);
8146 else if (code1 == CONST_DOUBLE)
8148 if (GET_MODE (operands[1]) == DFmode)
8150 REAL_VALUE_TYPE r;
8151 long l[2];
8153 REAL_VALUE_FROM_CONST_DOUBLE (r, operands[1]);
8154 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
8155 otherops[1] = GEN_INT (l[1]);
8156 operands[1] = GEN_INT (l[0]);
8158 else if (GET_MODE (operands[1]) != VOIDmode)
8159 abort ();
8160 else if (WORDS_BIG_ENDIAN)
8162 otherops[1] = GEN_INT (CONST_DOUBLE_LOW (operands[1]));
8163 operands[1] = GEN_INT (CONST_DOUBLE_HIGH (operands[1]));
8165 else
8167 otherops[1] = GEN_INT (CONST_DOUBLE_HIGH (operands[1]));
8168 operands[1] = GEN_INT (CONST_DOUBLE_LOW (operands[1]));
8171 output_mov_immediate (operands);
8172 output_mov_immediate (otherops);
8174 else if (code1 == CONST_INT)
8176 #if HOST_BITS_PER_WIDE_INT > 32
8177 /* If HOST_WIDE_INT is more than 32 bits, the intval tells us
8178 what the upper word is. */
8179 if (WORDS_BIG_ENDIAN)
8181 otherops[1] = GEN_INT (ARM_SIGN_EXTEND (INTVAL (operands[1])));
8182 operands[1] = GEN_INT (INTVAL (operands[1]) >> 32);
8184 else
8186 otherops[1] = GEN_INT (INTVAL (operands[1]) >> 32);
8187 operands[1] = GEN_INT (ARM_SIGN_EXTEND (INTVAL (operands[1])));
8189 #else
8190 /* Sign extend the intval into the high-order word. */
8191 if (WORDS_BIG_ENDIAN)
8193 otherops[1] = operands[1];
8194 operands[1] = (INTVAL (operands[1]) < 0
8195 ? constm1_rtx : const0_rtx);
8197 else
8198 otherops[1] = INTVAL (operands[1]) < 0 ? constm1_rtx : const0_rtx;
8199 #endif
8200 output_mov_immediate (otherops);
8201 output_mov_immediate (operands);
8203 else if (code1 == MEM)
8205 switch (GET_CODE (XEXP (operands[1], 0)))
8207 case REG:
8208 output_asm_insn ("ldm%?ia\t%m1, %M0", operands);
8209 break;
8211 case PRE_INC:
8212 abort (); /* Should never happen now. */
8213 break;
8215 case PRE_DEC:
8216 output_asm_insn ("ldm%?db\t%m1!, %M0", operands);
8217 break;
8219 case POST_INC:
8220 output_asm_insn ("ldm%?ia\t%m1!, %M0", operands);
8221 break;
8223 case POST_DEC:
8224 abort (); /* Should never happen now. */
8225 break;
8227 case LABEL_REF:
8228 case CONST:
8229 output_asm_insn ("adr%?\t%0, %1", operands);
8230 output_asm_insn ("ldm%?ia\t%0, %M0", operands);
8231 break;
8233 default:
8234 if (arm_add_operand (XEXP (XEXP (operands[1], 0), 1),
8235 GET_MODE (XEXP (XEXP (operands[1], 0), 1))))
8237 otherops[0] = operands[0];
8238 otherops[1] = XEXP (XEXP (operands[1], 0), 0);
8239 otherops[2] = XEXP (XEXP (operands[1], 0), 1);
8241 if (GET_CODE (XEXP (operands[1], 0)) == PLUS)
8243 if (GET_CODE (otherops[2]) == CONST_INT)
8245 switch ((int) INTVAL (otherops[2]))
8247 case -8:
8248 output_asm_insn ("ldm%?db\t%1, %M0", otherops);
8249 return "";
8250 case -4:
8251 output_asm_insn ("ldm%?da\t%1, %M0", otherops);
8252 return "";
8253 case 4:
8254 output_asm_insn ("ldm%?ib\t%1, %M0", otherops);
8255 return "";
8258 if (!(const_ok_for_arm (INTVAL (otherops[2]))))
8259 output_asm_insn ("sub%?\t%0, %1, #%n2", otherops);
8260 else
8261 output_asm_insn ("add%?\t%0, %1, %2", otherops);
8263 else
8264 output_asm_insn ("add%?\t%0, %1, %2", otherops);
8266 else
8267 output_asm_insn ("sub%?\t%0, %1, %2", otherops);
8269 return "ldm%?ia\t%0, %M0";
8271 else
8273 otherops[1] = adjust_address (operands[1], SImode, 4);
8274 /* Take care of overlapping base/data reg. */
8275 if (reg_mentioned_p (operands[0], operands[1]))
8277 output_asm_insn ("ldr%?\t%0, %1", otherops);
8278 output_asm_insn ("ldr%?\t%0, %1", operands);
8280 else
8282 output_asm_insn ("ldr%?\t%0, %1", operands);
8283 output_asm_insn ("ldr%?\t%0, %1", otherops);
8288 else
8289 abort (); /* Constraints should prevent this. */
8291 else if (code0 == MEM && code1 == REG)
8293 if (REGNO (operands[1]) == IP_REGNUM)
8294 abort ();
8296 switch (GET_CODE (XEXP (operands[0], 0)))
8298 case REG:
8299 output_asm_insn ("stm%?ia\t%m0, %M1", operands);
8300 break;
8302 case PRE_INC:
8303 abort (); /* Should never happen now. */
8304 break;
8306 case PRE_DEC:
8307 output_asm_insn ("stm%?db\t%m0!, %M1", operands);
8308 break;
8310 case POST_INC:
8311 output_asm_insn ("stm%?ia\t%m0!, %M1", operands);
8312 break;
8314 case POST_DEC:
8315 abort (); /* Should never happen now. */
8316 break;
8318 case PLUS:
8319 if (GET_CODE (XEXP (XEXP (operands[0], 0), 1)) == CONST_INT)
8321 switch ((int) INTVAL (XEXP (XEXP (operands[0], 0), 1)))
8323 case -8:
8324 output_asm_insn ("stm%?db\t%m0, %M1", operands);
8325 return "";
8327 case -4:
8328 output_asm_insn ("stm%?da\t%m0, %M1", operands);
8329 return "";
8331 case 4:
8332 output_asm_insn ("stm%?ib\t%m0, %M1", operands);
8333 return "";
8336 /* Fall through */
8338 default:
8339 otherops[0] = adjust_address (operands[0], SImode, 4);
8340 otherops[1] = gen_rtx_REG (SImode, 1 + REGNO (operands[1]));
8341 output_asm_insn ("str%?\t%1, %0", operands);
8342 output_asm_insn ("str%?\t%1, %0", otherops);
8345 else
8346 /* Constraints should prevent this. */
8347 abort ();
8349 return "";
8353 /* Output an arbitrary MOV reg, #n.
8354 OPERANDS[0] is a register. OPERANDS[1] is a const_int. */
8355 const char *
8356 output_mov_immediate (rtx *operands)
8358 HOST_WIDE_INT n = INTVAL (operands[1]);
8360 /* Try to use one MOV. */
8361 if (const_ok_for_arm (n))
8362 output_asm_insn ("mov%?\t%0, %1", operands);
8364 /* Try to use one MVN. */
8365 else if (const_ok_for_arm (~n))
8367 operands[1] = GEN_INT (~n);
8368 output_asm_insn ("mvn%?\t%0, %1", operands);
8370 else
8372 int n_ones = 0;
8373 int i;
8375 /* If all else fails, make it out of ORRs or BICs as appropriate. */
8376 for (i = 0; i < 32; i++)
8377 if (n & 1 << i)
8378 n_ones++;
8380 if (n_ones > 16) /* Shorter to use MVN with BIC in this case. */
8381 output_multi_immediate (operands, "mvn%?\t%0, %1", "bic%?\t%0, %0, %1", 1, ~ n);
8382 else
8383 output_multi_immediate (operands, "mov%?\t%0, %1", "orr%?\t%0, %0, %1", 1, n);
8386 return "";
8389 /* Output an ADD r, s, #n where n may be too big for one instruction.
8390 If adding zero to one register, output nothing. */
8391 const char *
8392 output_add_immediate (rtx *operands)
8394 HOST_WIDE_INT n = INTVAL (operands[2]);
8396 if (n != 0 || REGNO (operands[0]) != REGNO (operands[1]))
8398 if (n < 0)
8399 output_multi_immediate (operands,
8400 "sub%?\t%0, %1, %2", "sub%?\t%0, %0, %2", 2,
8401 -n);
8402 else
8403 output_multi_immediate (operands,
8404 "add%?\t%0, %1, %2", "add%?\t%0, %0, %2", 2,
8408 return "";
8411 /* Output a multiple immediate operation.
8412 OPERANDS is the vector of operands referred to in the output patterns.
8413 INSTR1 is the output pattern to use for the first constant.
8414 INSTR2 is the output pattern to use for subsequent constants.
8415 IMMED_OP is the index of the constant slot in OPERANDS.
8416 N is the constant value. */
8417 static const char *
8418 output_multi_immediate (rtx *operands, const char *instr1, const char *instr2,
8419 int immed_op, HOST_WIDE_INT n)
8421 #if HOST_BITS_PER_WIDE_INT > 32
8422 n &= 0xffffffff;
8423 #endif
8425 if (n == 0)
8427 /* Quick and easy output. */
8428 operands[immed_op] = const0_rtx;
8429 output_asm_insn (instr1, operands);
8431 else
8433 int i;
8434 const char * instr = instr1;
8436 /* Note that n is never zero here (which would give no output). */
8437 for (i = 0; i < 32; i += 2)
8439 if (n & (3 << i))
8441 operands[immed_op] = GEN_INT (n & (255 << i));
8442 output_asm_insn (instr, operands);
8443 instr = instr2;
8444 i += 6;
8449 return "";
8452 /* Return the appropriate ARM instruction for the operation code.
8453 The returned result should not be overwritten. OP is the rtx of the
8454 operation. SHIFT_FIRST_ARG is TRUE if the first argument of the operator
8455 was shifted. */
8456 const char *
8457 arithmetic_instr (rtx op, int shift_first_arg)
8459 switch (GET_CODE (op))
8461 case PLUS:
8462 return "add";
8464 case MINUS:
8465 return shift_first_arg ? "rsb" : "sub";
8467 case IOR:
8468 return "orr";
8470 case XOR:
8471 return "eor";
8473 case AND:
8474 return "and";
8476 default:
8477 abort ();
8481 /* Ensure valid constant shifts and return the appropriate shift mnemonic
8482 for the operation code. The returned result should not be overwritten.
8483 OP is the rtx code of the shift.
8484 On exit, *AMOUNTP will be -1 if the shift is by a register, or a constant
8485 shift. */
8486 static const char *
8487 shift_op (rtx op, HOST_WIDE_INT *amountp)
8489 const char * mnem;
8490 enum rtx_code code = GET_CODE (op);
8492 if (GET_CODE (XEXP (op, 1)) == REG || GET_CODE (XEXP (op, 1)) == SUBREG)
8493 *amountp = -1;
8494 else if (GET_CODE (XEXP (op, 1)) == CONST_INT)
8495 *amountp = INTVAL (XEXP (op, 1));
8496 else
8497 abort ();
8499 switch (code)
8501 case ASHIFT:
8502 mnem = "asl";
8503 break;
8505 case ASHIFTRT:
8506 mnem = "asr";
8507 break;
8509 case LSHIFTRT:
8510 mnem = "lsr";
8511 break;
8513 case ROTATERT:
8514 mnem = "ror";
8515 break;
8517 case MULT:
8518 /* We never have to worry about the amount being other than a
8519 power of 2, since this case can never be reloaded from a reg. */
8520 if (*amountp != -1)
8521 *amountp = int_log2 (*amountp);
8522 else
8523 abort ();
8524 return "asl";
8526 default:
8527 abort ();
8530 if (*amountp != -1)
8532 /* This is not 100% correct, but follows from the desire to merge
8533 multiplication by a power of 2 with the recognizer for a
8534 shift. >=32 is not a valid shift for "asl", so we must try and
8535 output a shift that produces the correct arithmetical result.
8536 Using lsr #32 is identical except for the fact that the carry bit
8537 is not set correctly if we set the flags; but we never use the
8538 carry bit from such an operation, so we can ignore that. */
8539 if (code == ROTATERT)
8540 /* Rotate is just modulo 32. */
8541 *amountp &= 31;
8542 else if (*amountp != (*amountp & 31))
8544 if (code == ASHIFT)
8545 mnem = "lsr";
8546 *amountp = 32;
8549 /* Shifts of 0 are no-ops. */
8550 if (*amountp == 0)
8551 return NULL;
8554 return mnem;
8557 /* Obtain the shift from the POWER of two. */
8559 static HOST_WIDE_INT
8560 int_log2 (HOST_WIDE_INT power)
8562 HOST_WIDE_INT shift = 0;
8564 while ((((HOST_WIDE_INT) 1 << shift) & power) == 0)
8566 if (shift > 31)
8567 abort ();
8568 shift++;
8571 return shift;
8574 /* Output a .ascii pseudo-op, keeping track of lengths. This is because
8575 /bin/as is horribly restrictive. */
8576 #define MAX_ASCII_LEN 51
8578 void
8579 output_ascii_pseudo_op (FILE *stream, const unsigned char *p, int len)
8581 int i;
8582 int len_so_far = 0;
8584 fputs ("\t.ascii\t\"", stream);
8586 for (i = 0; i < len; i++)
8588 int c = p[i];
8590 if (len_so_far >= MAX_ASCII_LEN)
8592 fputs ("\"\n\t.ascii\t\"", stream);
8593 len_so_far = 0;
8596 switch (c)
8598 case TARGET_TAB:
8599 fputs ("\\t", stream);
8600 len_so_far += 2;
8601 break;
8603 case TARGET_FF:
8604 fputs ("\\f", stream);
8605 len_so_far += 2;
8606 break;
8608 case TARGET_BS:
8609 fputs ("\\b", stream);
8610 len_so_far += 2;
8611 break;
8613 case TARGET_CR:
8614 fputs ("\\r", stream);
8615 len_so_far += 2;
8616 break;
8618 case TARGET_NEWLINE:
8619 fputs ("\\n", stream);
8620 c = p [i + 1];
8621 if ((c >= ' ' && c <= '~')
8622 || c == TARGET_TAB)
8623 /* This is a good place for a line break. */
8624 len_so_far = MAX_ASCII_LEN;
8625 else
8626 len_so_far += 2;
8627 break;
8629 case '\"':
8630 case '\\':
8631 putc ('\\', stream);
8632 len_so_far++;
8633 /* Drop through. */
8635 default:
8636 if (c >= ' ' && c <= '~')
8638 putc (c, stream);
8639 len_so_far++;
8641 else
8643 fprintf (stream, "\\%03o", c);
8644 len_so_far += 4;
8646 break;
8650 fputs ("\"\n", stream);
8653 /* Compute the register sabe mask for registers 0 through 12
8654 inclusive. This code is used by both arm_compute_save_reg_mask
8655 and arm_compute_initial_elimination_offset. */
8656 static unsigned long
8657 arm_compute_save_reg0_reg12_mask (void)
8659 unsigned long func_type = arm_current_func_type ();
8660 unsigned int save_reg_mask = 0;
8661 unsigned int reg;
8663 if (IS_INTERRUPT (func_type))
8665 unsigned int max_reg;
8666 /* Interrupt functions must not corrupt any registers,
8667 even call clobbered ones. If this is a leaf function
8668 we can just examine the registers used by the RTL, but
8669 otherwise we have to assume that whatever function is
8670 called might clobber anything, and so we have to save
8671 all the call-clobbered registers as well. */
8672 if (ARM_FUNC_TYPE (func_type) == ARM_FT_FIQ)
8673 /* FIQ handlers have registers r8 - r12 banked, so
8674 we only need to check r0 - r7, Normal ISRs only
8675 bank r14 and r15, so we must check up to r12.
8676 r13 is the stack pointer which is always preserved,
8677 so we do not need to consider it here. */
8678 max_reg = 7;
8679 else
8680 max_reg = 12;
8682 for (reg = 0; reg <= max_reg; reg++)
8683 if (regs_ever_live[reg]
8684 || (! current_function_is_leaf && call_used_regs [reg]))
8685 save_reg_mask |= (1 << reg);
8687 else
8689 /* In the normal case we only need to save those registers
8690 which are call saved and which are used by this function. */
8691 for (reg = 0; reg <= 10; reg++)
8692 if (regs_ever_live[reg] && ! call_used_regs [reg])
8693 save_reg_mask |= (1 << reg);
8695 /* Handle the frame pointer as a special case. */
8696 if (! TARGET_APCS_FRAME
8697 && ! frame_pointer_needed
8698 && regs_ever_live[HARD_FRAME_POINTER_REGNUM]
8699 && ! call_used_regs[HARD_FRAME_POINTER_REGNUM])
8700 save_reg_mask |= 1 << HARD_FRAME_POINTER_REGNUM;
8702 /* If we aren't loading the PIC register,
8703 don't stack it even though it may be live. */
8704 if (flag_pic
8705 && ! TARGET_SINGLE_PIC_BASE
8706 && regs_ever_live[PIC_OFFSET_TABLE_REGNUM])
8707 save_reg_mask |= 1 << PIC_OFFSET_TABLE_REGNUM;
8710 return save_reg_mask;
8713 /* Compute a bit mask of which registers need to be
8714 saved on the stack for the current function. */
8716 static unsigned long
8717 arm_compute_save_reg_mask (void)
8719 unsigned int save_reg_mask = 0;
8720 unsigned long func_type = arm_current_func_type ();
8722 if (IS_NAKED (func_type))
8723 /* This should never really happen. */
8724 return 0;
8726 /* If we are creating a stack frame, then we must save the frame pointer,
8727 IP (which will hold the old stack pointer), LR and the PC. */
8728 if (frame_pointer_needed)
8729 save_reg_mask |=
8730 (1 << ARM_HARD_FRAME_POINTER_REGNUM)
8731 | (1 << IP_REGNUM)
8732 | (1 << LR_REGNUM)
8733 | (1 << PC_REGNUM);
8735 /* Volatile functions do not return, so there
8736 is no need to save any other registers. */
8737 if (IS_VOLATILE (func_type))
8738 return save_reg_mask;
8740 save_reg_mask |= arm_compute_save_reg0_reg12_mask ();
8742 /* Decide if we need to save the link register.
8743 Interrupt routines have their own banked link register,
8744 so they never need to save it.
8745 Otherwise if we do not use the link register we do not need to save
8746 it. If we are pushing other registers onto the stack however, we
8747 can save an instruction in the epilogue by pushing the link register
8748 now and then popping it back into the PC. This incurs extra memory
8749 accesses though, so we only do it when optimizing for size, and only
8750 if we know that we will not need a fancy return sequence. */
8751 if (regs_ever_live [LR_REGNUM]
8752 || (save_reg_mask
8753 && optimize_size
8754 && ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL))
8755 save_reg_mask |= 1 << LR_REGNUM;
8757 if (cfun->machine->lr_save_eliminated)
8758 save_reg_mask &= ~ (1 << LR_REGNUM);
8760 if (TARGET_REALLY_IWMMXT
8761 && ((bit_count (save_reg_mask)
8762 + ARM_NUM_INTS (current_function_pretend_args_size)) % 2) != 0)
8764 unsigned int reg;
8766 /* The total number of registers that are going to be pushed
8767 onto the stack is odd. We need to ensure that the stack
8768 is 64-bit aligned before we start to save iWMMXt registers,
8769 and also before we start to create locals. (A local variable
8770 might be a double or long long which we will load/store using
8771 an iWMMXt instruction). Therefore we need to push another
8772 ARM register, so that the stack will be 64-bit aligned. We
8773 try to avoid using the arg registers (r0 -r3) as they might be
8774 used to pass values in a tail call. */
8775 for (reg = 4; reg <= 12; reg++)
8776 if ((save_reg_mask & (1 << reg)) == 0)
8777 break;
8779 if (reg <= 12)
8780 save_reg_mask |= (1 << reg);
8781 else
8783 cfun->machine->sibcall_blocked = 1;
8784 save_reg_mask |= (1 << 3);
8788 return save_reg_mask;
8791 /* Generate a function exit sequence. If REALLY_RETURN is false, then do
8792 everything bar the final return instruction. */
8793 const char *
8794 output_return_instruction (rtx operand, int really_return, int reverse)
8796 char conditional[10];
8797 char instr[100];
8798 int reg;
8799 unsigned long live_regs_mask;
8800 unsigned long func_type;
8802 func_type = arm_current_func_type ();
8804 if (IS_NAKED (func_type))
8805 return "";
8807 if (IS_VOLATILE (func_type) && TARGET_ABORT_NORETURN)
8809 /* If this function was declared non-returning, and we have
8810 found a tail call, then we have to trust that the called
8811 function won't return. */
8812 if (really_return)
8814 rtx ops[2];
8816 /* Otherwise, trap an attempted return by aborting. */
8817 ops[0] = operand;
8818 ops[1] = gen_rtx_SYMBOL_REF (Pmode, NEED_PLT_RELOC ? "abort(PLT)"
8819 : "abort");
8820 assemble_external_libcall (ops[1]);
8821 output_asm_insn (reverse ? "bl%D0\t%a1" : "bl%d0\t%a1", ops);
8824 return "";
8827 if (current_function_calls_alloca && !really_return)
8828 abort ();
8830 sprintf (conditional, "%%?%%%c0", reverse ? 'D' : 'd');
8832 return_used_this_function = 1;
8834 live_regs_mask = arm_compute_save_reg_mask ();
8836 if (live_regs_mask)
8838 const char * return_reg;
8840 /* If we do not have any special requirements for function exit
8841 (eg interworking, or ISR) then we can load the return address
8842 directly into the PC. Otherwise we must load it into LR. */
8843 if (really_return
8844 && ! TARGET_INTERWORK)
8845 return_reg = reg_names[PC_REGNUM];
8846 else
8847 return_reg = reg_names[LR_REGNUM];
8849 if ((live_regs_mask & (1 << IP_REGNUM)) == (1 << IP_REGNUM))
8851 /* There are three possible reasons for the IP register
8852 being saved. 1) a stack frame was created, in which case
8853 IP contains the old stack pointer, or 2) an ISR routine
8854 corrupted it, or 3) it was saved to align the stack on
8855 iWMMXt. In case 1, restore IP into SP, otherwise just
8856 restore IP. */
8857 if (frame_pointer_needed)
8859 live_regs_mask &= ~ (1 << IP_REGNUM);
8860 live_regs_mask |= (1 << SP_REGNUM);
8862 else
8864 if (! IS_INTERRUPT (func_type)
8865 && ! TARGET_REALLY_IWMMXT)
8866 abort ();
8870 /* On some ARM architectures it is faster to use LDR rather than
8871 LDM to load a single register. On other architectures, the
8872 cost is the same. In 26 bit mode, or for exception handlers,
8873 we have to use LDM to load the PC so that the CPSR is also
8874 restored. */
8875 for (reg = 0; reg <= LAST_ARM_REGNUM; reg++)
8877 if (live_regs_mask == (unsigned int)(1 << reg))
8878 break;
8880 if (reg <= LAST_ARM_REGNUM
8881 && (reg != LR_REGNUM
8882 || ! really_return
8883 || (TARGET_APCS_32 && ! IS_INTERRUPT (func_type))))
8885 sprintf (instr, "ldr%s\t%%|%s, [%%|sp], #4", conditional,
8886 (reg == LR_REGNUM) ? return_reg : reg_names[reg]);
8888 else
8890 char *p;
8891 int first = 1;
8893 /* Generate the load multiple instruction to restore the
8894 registers. Note we can get here, even if
8895 frame_pointer_needed is true, but only if sp already
8896 points to the base of the saved core registers. */
8897 if (live_regs_mask & (1 << SP_REGNUM))
8899 unsigned HOST_WIDE_INT stack_adjust =
8900 arm_get_frame_size () + current_function_outgoing_args_size;
8902 if (stack_adjust != 0 && stack_adjust != 4)
8903 abort ();
8905 if (stack_adjust && arm_arch5)
8906 sprintf (instr, "ldm%sib\t%%|sp, {", conditional);
8907 else
8909 /* If we can't use ldmib (SA110 bug), then try to pop r3
8910 instead. */
8911 if (stack_adjust)
8912 live_regs_mask |= 1 << 3;
8913 sprintf (instr, "ldm%sfd\t%%|sp, {", conditional);
8916 else
8917 sprintf (instr, "ldm%sfd\t%%|sp!, {", conditional);
8919 p = instr + strlen (instr);
8921 for (reg = 0; reg <= SP_REGNUM; reg++)
8922 if (live_regs_mask & (1 << reg))
8924 int l = strlen (reg_names[reg]);
8926 if (first)
8927 first = 0;
8928 else
8930 memcpy (p, ", ", 2);
8931 p += 2;
8934 memcpy (p, "%|", 2);
8935 memcpy (p + 2, reg_names[reg], l);
8936 p += l + 2;
8939 if (live_regs_mask & (1 << LR_REGNUM))
8941 sprintf (p, "%s%%|%s}", first ? "" : ", ", return_reg);
8942 /* Decide if we need to add the ^ symbol to the end of the
8943 register list. This causes the saved condition codes
8944 register to be copied into the current condition codes
8945 register. We do the copy if we are conforming to the 32-bit
8946 ABI and this is an interrupt function, or if we are
8947 conforming to the 26-bit ABI. There is a special case for
8948 the 26-bit ABI however, which is if we are writing back the
8949 stack pointer but not loading the PC. In this case adding
8950 the ^ symbol would create a type 2 LDM instruction, where
8951 writeback is UNPREDICTABLE. We are safe in leaving the ^
8952 character off in this case however, since the actual return
8953 instruction will be a MOVS which will restore the CPSR. */
8954 if ((TARGET_APCS_32 && IS_INTERRUPT (func_type))
8955 || (! TARGET_APCS_32 && really_return))
8956 strcat (p, "^");
8958 else
8959 strcpy (p, "}");
8962 output_asm_insn (instr, & operand);
8964 /* See if we need to generate an extra instruction to
8965 perform the actual function return. */
8966 if (really_return
8967 && func_type != ARM_FT_INTERWORKED
8968 && (live_regs_mask & (1 << LR_REGNUM)) != 0)
8970 /* The return has already been handled
8971 by loading the LR into the PC. */
8972 really_return = 0;
8976 if (really_return)
8978 switch ((int) ARM_FUNC_TYPE (func_type))
8980 case ARM_FT_ISR:
8981 case ARM_FT_FIQ:
8982 sprintf (instr, "sub%ss\t%%|pc, %%|lr, #4", conditional);
8983 break;
8985 case ARM_FT_INTERWORKED:
8986 sprintf (instr, "bx%s\t%%|lr", conditional);
8987 break;
8989 case ARM_FT_EXCEPTION:
8990 sprintf (instr, "mov%ss\t%%|pc, %%|lr", conditional);
8991 break;
8993 default:
8994 /* ARMv5 implementations always provide BX, so interworking
8995 is the default unless APCS-26 is in use. */
8996 if ((insn_flags & FL_ARCH5) != 0 && TARGET_APCS_32)
8997 sprintf (instr, "bx%s\t%%|lr", conditional);
8998 else
8999 sprintf (instr, "mov%s%s\t%%|pc, %%|lr",
9000 conditional, TARGET_APCS_32 ? "" : "s");
9001 break;
9004 output_asm_insn (instr, & operand);
9007 return "";
9010 /* Write the function name into the code section, directly preceding
9011 the function prologue.
9013 Code will be output similar to this:
9015 .ascii "arm_poke_function_name", 0
9016 .align
9018 .word 0xff000000 + (t1 - t0)
9019 arm_poke_function_name
9020 mov ip, sp
9021 stmfd sp!, {fp, ip, lr, pc}
9022 sub fp, ip, #4
9024 When performing a stack backtrace, code can inspect the value
9025 of 'pc' stored at 'fp' + 0. If the trace function then looks
9026 at location pc - 12 and the top 8 bits are set, then we know
9027 that there is a function name embedded immediately preceding this
9028 location and has length ((pc[-3]) & 0xff000000).
9030 We assume that pc is declared as a pointer to an unsigned long.
9032 It is of no benefit to output the function name if we are assembling
9033 a leaf function. These function types will not contain a stack
9034 backtrace structure, therefore it is not possible to determine the
9035 function name. */
9036 void
9037 arm_poke_function_name (FILE *stream, const char *name)
9039 unsigned long alignlength;
9040 unsigned long length;
9041 rtx x;
9043 length = strlen (name) + 1;
9044 alignlength = ROUND_UP_WORD (length);
9046 ASM_OUTPUT_ASCII (stream, name, length);
9047 ASM_OUTPUT_ALIGN (stream, 2);
9048 x = GEN_INT ((unsigned HOST_WIDE_INT) 0xff000000 + alignlength);
9049 assemble_aligned_integer (UNITS_PER_WORD, x);
9052 /* Place some comments into the assembler stream
9053 describing the current function. */
9054 static void
9055 arm_output_function_prologue (FILE *f, HOST_WIDE_INT frame_size)
9057 unsigned long func_type;
9059 if (!TARGET_ARM)
9061 thumb_output_function_prologue (f, frame_size);
9062 return;
9065 /* Sanity check. */
9066 if (arm_ccfsm_state || arm_target_insn)
9067 abort ();
9069 func_type = arm_current_func_type ();
9071 switch ((int) ARM_FUNC_TYPE (func_type))
9073 default:
9074 case ARM_FT_NORMAL:
9075 break;
9076 case ARM_FT_INTERWORKED:
9077 asm_fprintf (f, "\t%@ Function supports interworking.\n");
9078 break;
9079 case ARM_FT_EXCEPTION_HANDLER:
9080 asm_fprintf (f, "\t%@ C++ Exception Handler.\n");
9081 break;
9082 case ARM_FT_ISR:
9083 asm_fprintf (f, "\t%@ Interrupt Service Routine.\n");
9084 break;
9085 case ARM_FT_FIQ:
9086 asm_fprintf (f, "\t%@ Fast Interrupt Service Routine.\n");
9087 break;
9088 case ARM_FT_EXCEPTION:
9089 asm_fprintf (f, "\t%@ ARM Exception Handler.\n");
9090 break;
9093 if (IS_NAKED (func_type))
9094 asm_fprintf (f, "\t%@ Naked Function: prologue and epilogue provided by programmer.\n");
9096 if (IS_VOLATILE (func_type))
9097 asm_fprintf (f, "\t%@ Volatile: function does not return.\n");
9099 if (IS_NESTED (func_type))
9100 asm_fprintf (f, "\t%@ Nested: function declared inside another function.\n");
9102 asm_fprintf (f, "\t%@ args = %d, pretend = %d, frame = %wd\n",
9103 current_function_args_size,
9104 current_function_pretend_args_size, frame_size);
9106 asm_fprintf (f, "\t%@ frame_needed = %d, uses_anonymous_args = %d\n",
9107 frame_pointer_needed,
9108 cfun->machine->uses_anonymous_args);
9110 if (cfun->machine->lr_save_eliminated)
9111 asm_fprintf (f, "\t%@ link register save eliminated.\n");
9113 #ifdef AOF_ASSEMBLER
9114 if (flag_pic)
9115 asm_fprintf (f, "\tmov\t%r, %r\n", IP_REGNUM, PIC_OFFSET_TABLE_REGNUM);
9116 #endif
9118 return_used_this_function = 0;
9121 const char *
9122 arm_output_epilogue (rtx sibling)
9124 int reg;
9125 unsigned long saved_regs_mask;
9126 unsigned long func_type;
9127 /* Floats_offset is the offset from the "virtual" frame. In an APCS
9128 frame that is $fp + 4 for a non-variadic function. */
9129 int floats_offset = 0;
9130 rtx operands[3];
9131 int frame_size = arm_get_frame_size ();
9132 FILE * f = asm_out_file;
9133 rtx eh_ofs = cfun->machine->eh_epilogue_sp_ofs;
9134 unsigned int lrm_count = 0;
9135 int really_return = (sibling == NULL);
9136 int start_reg;
9138 /* If we have already generated the return instruction
9139 then it is futile to generate anything else. */
9140 if (use_return_insn (FALSE, sibling) && return_used_this_function)
9141 return "";
9143 func_type = arm_current_func_type ();
9145 if (IS_NAKED (func_type))
9146 /* Naked functions don't have epilogues. */
9147 return "";
9149 if (IS_VOLATILE (func_type) && TARGET_ABORT_NORETURN)
9151 rtx op;
9153 /* A volatile function should never return. Call abort. */
9154 op = gen_rtx_SYMBOL_REF (Pmode, NEED_PLT_RELOC ? "abort(PLT)" : "abort");
9155 assemble_external_libcall (op);
9156 output_asm_insn ("bl\t%a0", &op);
9158 return "";
9161 if (ARM_FUNC_TYPE (func_type) == ARM_FT_EXCEPTION_HANDLER
9162 && ! really_return)
9163 /* If we are throwing an exception, then we really must
9164 be doing a return, so we can't tail-call. */
9165 abort ();
9167 saved_regs_mask = arm_compute_save_reg_mask ();
9169 if (TARGET_IWMMXT)
9170 lrm_count = bit_count (saved_regs_mask);
9172 /* XXX We should adjust floats_offset for any anonymous args, and then
9173 re-adjust vfp_offset below to compensate. */
9175 /* Compute how far away the floats will be. */
9176 for (reg = 0; reg <= LAST_ARM_REGNUM; reg++)
9177 if (saved_regs_mask & (1 << reg))
9178 floats_offset += 4;
9180 if (frame_pointer_needed)
9182 /* This variable is for the Virtual Frame Pointer, not VFP regs. */
9183 int vfp_offset = 4;
9185 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
9187 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
9188 if (regs_ever_live[reg] && !call_used_regs[reg])
9190 floats_offset += 12;
9191 asm_fprintf (f, "\tldfe\t%r, [%r, #-%d]\n",
9192 reg, FP_REGNUM, floats_offset - vfp_offset);
9195 else
9197 start_reg = LAST_FPA_REGNUM;
9199 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
9201 if (regs_ever_live[reg] && !call_used_regs[reg])
9203 floats_offset += 12;
9205 /* We can't unstack more than four registers at once. */
9206 if (start_reg - reg == 3)
9208 asm_fprintf (f, "\tlfm\t%r, 4, [%r, #-%d]\n",
9209 reg, FP_REGNUM, floats_offset - vfp_offset);
9210 start_reg = reg - 1;
9213 else
9215 if (reg != start_reg)
9216 asm_fprintf (f, "\tlfm\t%r, %d, [%r, #-%d]\n",
9217 reg + 1, start_reg - reg,
9218 FP_REGNUM, floats_offset - vfp_offset);
9219 start_reg = reg - 1;
9223 /* Just in case the last register checked also needs unstacking. */
9224 if (reg != start_reg)
9225 asm_fprintf (f, "\tlfm\t%r, %d, [%r, #-%d]\n",
9226 reg + 1, start_reg - reg,
9227 FP_REGNUM, floats_offset - vfp_offset);
9230 if (TARGET_HARD_FLOAT && TARGET_VFP)
9232 int nregs = 0;
9234 /* We save regs in pairs. */
9235 /* A special insn for saving/restoring VFP registers. This does
9236 not have base+offset addressing modes, so we use IP to
9237 hold the address. Each block requires nregs*2+1 words. */
9238 start_reg = FIRST_VFP_REGNUM;
9239 /* Count how many blocks of registers need saving. */
9240 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
9242 if ((!regs_ever_live[reg] || call_used_regs[reg])
9243 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
9245 if (start_reg != reg)
9246 floats_offset += 4;
9247 start_reg = reg + 2;
9249 else
9251 floats_offset += 8;
9252 nregs++;
9255 if (start_reg != reg)
9256 floats_offset += 4;
9258 if (nregs > 0)
9260 asm_fprintf (f, "\tsub\t%r, %r, #%d\n", IP_REGNUM,
9261 FP_REGNUM, floats_offset - vfp_offset);
9263 start_reg = FIRST_VFP_REGNUM;
9264 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
9266 if ((!regs_ever_live[reg] || call_used_regs[reg])
9267 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
9269 if (start_reg != reg)
9271 vfp_print_multi (f, "fldmfdx\t%r!", IP_REGNUM, "d%d",
9272 (start_reg - FIRST_VFP_REGNUM) / 2,
9273 (reg - start_reg) / 2);
9275 start_reg = reg + 2;
9278 if (start_reg != reg)
9280 vfp_print_multi (f, "fldmfdx\t%r!", IP_REGNUM, "d%d",
9281 (start_reg - FIRST_VFP_REGNUM) / 2,
9282 (reg - start_reg) / 2);
9286 if (TARGET_IWMMXT)
9288 /* The frame pointer is guaranteed to be non-double-word aligned.
9289 This is because it is set to (old_stack_pointer - 4) and the
9290 old_stack_pointer was double word aligned. Thus the offset to
9291 the iWMMXt registers to be loaded must also be non-double-word
9292 sized, so that the resultant address *is* double-word aligned.
9293 We can ignore floats_offset since that was already included in
9294 the live_regs_mask. */
9295 lrm_count += (lrm_count % 2 ? 2 : 1);
9297 for (reg = FIRST_IWMMXT_REGNUM; reg <= LAST_IWMMXT_REGNUM; reg++)
9298 if (regs_ever_live[reg] && !call_used_regs[reg])
9300 asm_fprintf (f, "\twldrd\t%r, [%r, #-%d]\n",
9301 reg, FP_REGNUM, lrm_count * 4);
9302 lrm_count += 2;
9306 /* saved_regs_mask should contain the IP, which at the time of stack
9307 frame generation actually contains the old stack pointer. So a
9308 quick way to unwind the stack is just pop the IP register directly
9309 into the stack pointer. */
9310 if ((saved_regs_mask & (1 << IP_REGNUM)) == 0)
9311 abort ();
9312 saved_regs_mask &= ~ (1 << IP_REGNUM);
9313 saved_regs_mask |= (1 << SP_REGNUM);
9315 /* There are two registers left in saved_regs_mask - LR and PC. We
9316 only need to restore the LR register (the return address), but to
9317 save time we can load it directly into the PC, unless we need a
9318 special function exit sequence, or we are not really returning. */
9319 if (really_return && ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL)
9320 /* Delete the LR from the register mask, so that the LR on
9321 the stack is loaded into the PC in the register mask. */
9322 saved_regs_mask &= ~ (1 << LR_REGNUM);
9323 else
9324 saved_regs_mask &= ~ (1 << PC_REGNUM);
9326 /* We must use SP as the base register, because SP is one of the
9327 registers being restored. If an interrupt or page fault
9328 happens in the ldm instruction, the SP might or might not
9329 have been restored. That would be bad, as then SP will no
9330 longer indicate the safe area of stack, and we can get stack
9331 corruption. Using SP as the base register means that it will
9332 be reset correctly to the original value, should an interrupt
9333 occur. If the stack pointer already points at the right
9334 place, then omit the subtraction. */
9335 if (((frame_size + current_function_outgoing_args_size + floats_offset)
9336 != 4 * (1 + (int) bit_count (saved_regs_mask)))
9337 || current_function_calls_alloca)
9338 asm_fprintf (f, "\tsub\t%r, %r, #%d\n", SP_REGNUM, FP_REGNUM,
9339 4 * bit_count (saved_regs_mask));
9340 print_multi_reg (f, "ldmfd\t%r", SP_REGNUM, saved_regs_mask);
9342 if (IS_INTERRUPT (func_type))
9343 /* Interrupt handlers will have pushed the
9344 IP onto the stack, so restore it now. */
9345 print_multi_reg (f, "ldmfd\t%r!", SP_REGNUM, 1 << IP_REGNUM);
9347 else
9349 /* Restore stack pointer if necessary. */
9350 if (frame_size + current_function_outgoing_args_size != 0)
9352 operands[0] = operands[1] = stack_pointer_rtx;
9353 operands[2] = GEN_INT (frame_size
9354 + current_function_outgoing_args_size);
9355 output_add_immediate (operands);
9358 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
9360 for (reg = FIRST_FPA_REGNUM; reg <= LAST_FPA_REGNUM; reg++)
9361 if (regs_ever_live[reg] && !call_used_regs[reg])
9362 asm_fprintf (f, "\tldfe\t%r, [%r], #12\n",
9363 reg, SP_REGNUM);
9365 else
9367 start_reg = FIRST_FPA_REGNUM;
9369 for (reg = FIRST_FPA_REGNUM; reg <= LAST_FPA_REGNUM; reg++)
9371 if (regs_ever_live[reg] && !call_used_regs[reg])
9373 if (reg - start_reg == 3)
9375 asm_fprintf (f, "\tlfmfd\t%r, 4, [%r]!\n",
9376 start_reg, SP_REGNUM);
9377 start_reg = reg + 1;
9380 else
9382 if (reg != start_reg)
9383 asm_fprintf (f, "\tlfmfd\t%r, %d, [%r]!\n",
9384 start_reg, reg - start_reg,
9385 SP_REGNUM);
9387 start_reg = reg + 1;
9391 /* Just in case the last register checked also needs unstacking. */
9392 if (reg != start_reg)
9393 asm_fprintf (f, "\tlfmfd\t%r, %d, [%r]!\n",
9394 start_reg, reg - start_reg, SP_REGNUM);
9397 if (TARGET_HARD_FLOAT && TARGET_VFP)
9399 start_reg = FIRST_VFP_REGNUM;
9400 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
9402 if ((!regs_ever_live[reg] || call_used_regs[reg])
9403 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
9405 if (start_reg != reg)
9407 vfp_print_multi (f, "fldmfdx\t%r!", SP_REGNUM, "d%d",
9408 (start_reg - FIRST_VFP_REGNUM) / 2,
9409 (reg - start_reg) / 2);
9411 start_reg = reg + 2;
9414 if (start_reg != reg)
9416 vfp_print_multi (f, "fldmfdx\t%r!", SP_REGNUM, "d%d",
9417 (start_reg - FIRST_VFP_REGNUM) / 2,
9418 (reg - start_reg) / 2);
9421 if (TARGET_IWMMXT)
9422 for (reg = FIRST_IWMMXT_REGNUM; reg <= LAST_IWMMXT_REGNUM; reg++)
9423 if (regs_ever_live[reg] && !call_used_regs[reg])
9424 asm_fprintf (f, "\twldrd\t%r, [%r, #+8]!\n", reg, SP_REGNUM);
9426 /* If we can, restore the LR into the PC. */
9427 if (ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
9428 && really_return
9429 && current_function_pretend_args_size == 0
9430 && saved_regs_mask & (1 << LR_REGNUM))
9432 saved_regs_mask &= ~ (1 << LR_REGNUM);
9433 saved_regs_mask |= (1 << PC_REGNUM);
9436 /* Load the registers off the stack. If we only have one register
9437 to load use the LDR instruction - it is faster. */
9438 if (saved_regs_mask == (1 << LR_REGNUM))
9440 /* The exception handler ignores the LR, so we do
9441 not really need to load it off the stack. */
9442 if (eh_ofs)
9443 asm_fprintf (f, "\tadd\t%r, %r, #4\n", SP_REGNUM, SP_REGNUM);
9444 else
9445 asm_fprintf (f, "\tldr\t%r, [%r], #4\n", LR_REGNUM, SP_REGNUM);
9447 else if (saved_regs_mask)
9449 if (saved_regs_mask & (1 << SP_REGNUM))
9450 /* Note - write back to the stack register is not enabled
9451 (ie "ldmfd sp!..."). We know that the stack pointer is
9452 in the list of registers and if we add writeback the
9453 instruction becomes UNPREDICTABLE. */
9454 print_multi_reg (f, "ldmfd\t%r", SP_REGNUM, saved_regs_mask);
9455 else
9456 print_multi_reg (f, "ldmfd\t%r!", SP_REGNUM, saved_regs_mask);
9459 if (current_function_pretend_args_size)
9461 /* Unwind the pre-pushed regs. */
9462 operands[0] = operands[1] = stack_pointer_rtx;
9463 operands[2] = GEN_INT (current_function_pretend_args_size);
9464 output_add_immediate (operands);
9468 if (! really_return
9469 || (ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
9470 && current_function_pretend_args_size == 0
9471 && saved_regs_mask & (1 << PC_REGNUM)))
9472 return "";
9474 /* Generate the return instruction. */
9475 switch ((int) ARM_FUNC_TYPE (func_type))
9477 case ARM_FT_EXCEPTION_HANDLER:
9478 /* Even in 26-bit mode we do a mov (rather than a movs)
9479 because we don't have the PSR bits set in the address. */
9480 asm_fprintf (f, "\tmov\t%r, %r\n", PC_REGNUM, EXCEPTION_LR_REGNUM);
9481 break;
9483 case ARM_FT_ISR:
9484 case ARM_FT_FIQ:
9485 asm_fprintf (f, "\tsubs\t%r, %r, #4\n", PC_REGNUM, LR_REGNUM);
9486 break;
9488 case ARM_FT_EXCEPTION:
9489 asm_fprintf (f, "\tmovs\t%r, %r\n", PC_REGNUM, LR_REGNUM);
9490 break;
9492 case ARM_FT_INTERWORKED:
9493 asm_fprintf (f, "\tbx\t%r\n", LR_REGNUM);
9494 break;
9496 default:
9497 if (frame_pointer_needed)
9498 /* If we used the frame pointer then the return address
9499 will have been loaded off the stack directly into the
9500 PC, so there is no need to issue a MOV instruction
9501 here. */
9503 else if (current_function_pretend_args_size == 0
9504 && (saved_regs_mask & (1 << LR_REGNUM)))
9505 /* Similarly we may have been able to load LR into the PC
9506 even if we did not create a stack frame. */
9508 else if (TARGET_APCS_32)
9509 asm_fprintf (f, "\tmov\t%r, %r\n", PC_REGNUM, LR_REGNUM);
9510 else
9511 asm_fprintf (f, "\tmovs\t%r, %r\n", PC_REGNUM, LR_REGNUM);
9512 break;
9515 return "";
9518 static void
9519 arm_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
9520 HOST_WIDE_INT frame_size)
9522 if (TARGET_THUMB)
9524 /* ??? Probably not safe to set this here, since it assumes that a
9525 function will be emitted as assembly immediately after we generate
9526 RTL for it. This does not happen for inline functions. */
9527 return_used_this_function = 0;
9529 else
9531 /* We need to take into account any stack-frame rounding. */
9532 frame_size = arm_get_frame_size ();
9534 if (use_return_insn (FALSE, NULL)
9535 && return_used_this_function
9536 && (frame_size + current_function_outgoing_args_size) != 0
9537 && !frame_pointer_needed)
9538 abort ();
9540 /* Reset the ARM-specific per-function variables. */
9541 after_arm_reorg = 0;
9545 /* Generate and emit an insn that we will recognize as a push_multi.
9546 Unfortunately, since this insn does not reflect very well the actual
9547 semantics of the operation, we need to annotate the insn for the benefit
9548 of DWARF2 frame unwind information. */
9549 static rtx
9550 emit_multi_reg_push (int mask)
9552 int num_regs = 0;
9553 int num_dwarf_regs;
9554 int i, j;
9555 rtx par;
9556 rtx dwarf;
9557 int dwarf_par_index;
9558 rtx tmp, reg;
9560 for (i = 0; i <= LAST_ARM_REGNUM; i++)
9561 if (mask & (1 << i))
9562 num_regs++;
9564 if (num_regs == 0 || num_regs > 16)
9565 abort ();
9567 /* We don't record the PC in the dwarf frame information. */
9568 num_dwarf_regs = num_regs;
9569 if (mask & (1 << PC_REGNUM))
9570 num_dwarf_regs--;
9572 /* For the body of the insn we are going to generate an UNSPEC in
9573 parallel with several USEs. This allows the insn to be recognized
9574 by the push_multi pattern in the arm.md file. The insn looks
9575 something like this:
9577 (parallel [
9578 (set (mem:BLK (pre_dec:BLK (reg:SI sp)))
9579 (unspec:BLK [(reg:SI r4)] UNSPEC_PUSH_MULT))
9580 (use (reg:SI 11 fp))
9581 (use (reg:SI 12 ip))
9582 (use (reg:SI 14 lr))
9583 (use (reg:SI 15 pc))
9586 For the frame note however, we try to be more explicit and actually
9587 show each register being stored into the stack frame, plus a (single)
9588 decrement of the stack pointer. We do it this way in order to be
9589 friendly to the stack unwinding code, which only wants to see a single
9590 stack decrement per instruction. The RTL we generate for the note looks
9591 something like this:
9593 (sequence [
9594 (set (reg:SI sp) (plus:SI (reg:SI sp) (const_int -20)))
9595 (set (mem:SI (reg:SI sp)) (reg:SI r4))
9596 (set (mem:SI (plus:SI (reg:SI sp) (const_int 4))) (reg:SI fp))
9597 (set (mem:SI (plus:SI (reg:SI sp) (const_int 8))) (reg:SI ip))
9598 (set (mem:SI (plus:SI (reg:SI sp) (const_int 12))) (reg:SI lr))
9601 This sequence is used both by the code to support stack unwinding for
9602 exceptions handlers and the code to generate dwarf2 frame debugging. */
9604 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (num_regs));
9605 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (num_dwarf_regs + 1));
9606 dwarf_par_index = 1;
9608 for (i = 0; i <= LAST_ARM_REGNUM; i++)
9610 if (mask & (1 << i))
9612 reg = gen_rtx_REG (SImode, i);
9614 XVECEXP (par, 0, 0)
9615 = gen_rtx_SET (VOIDmode,
9616 gen_rtx_MEM (BLKmode,
9617 gen_rtx_PRE_DEC (BLKmode,
9618 stack_pointer_rtx)),
9619 gen_rtx_UNSPEC (BLKmode,
9620 gen_rtvec (1, reg),
9621 UNSPEC_PUSH_MULT));
9623 if (i != PC_REGNUM)
9625 tmp = gen_rtx_SET (VOIDmode,
9626 gen_rtx_MEM (SImode, stack_pointer_rtx),
9627 reg);
9628 RTX_FRAME_RELATED_P (tmp) = 1;
9629 XVECEXP (dwarf, 0, dwarf_par_index) = tmp;
9630 dwarf_par_index++;
9633 break;
9637 for (j = 1, i++; j < num_regs; i++)
9639 if (mask & (1 << i))
9641 reg = gen_rtx_REG (SImode, i);
9643 XVECEXP (par, 0, j) = gen_rtx_USE (VOIDmode, reg);
9645 if (i != PC_REGNUM)
9647 tmp = gen_rtx_SET (VOIDmode,
9648 gen_rtx_MEM (SImode,
9649 plus_constant (stack_pointer_rtx,
9650 4 * j)),
9651 reg);
9652 RTX_FRAME_RELATED_P (tmp) = 1;
9653 XVECEXP (dwarf, 0, dwarf_par_index++) = tmp;
9656 j++;
9660 par = emit_insn (par);
9662 tmp = gen_rtx_SET (SImode,
9663 stack_pointer_rtx,
9664 gen_rtx_PLUS (SImode,
9665 stack_pointer_rtx,
9666 GEN_INT (-4 * num_regs)));
9667 RTX_FRAME_RELATED_P (tmp) = 1;
9668 XVECEXP (dwarf, 0, 0) = tmp;
9670 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
9671 REG_NOTES (par));
9672 return par;
9675 static rtx
9676 emit_sfm (int base_reg, int count)
9678 rtx par;
9679 rtx dwarf;
9680 rtx tmp, reg;
9681 int i;
9683 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
9684 dwarf = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
9686 reg = gen_rtx_REG (XFmode, base_reg++);
9688 XVECEXP (par, 0, 0)
9689 = gen_rtx_SET (VOIDmode,
9690 gen_rtx_MEM (BLKmode,
9691 gen_rtx_PRE_DEC (BLKmode, stack_pointer_rtx)),
9692 gen_rtx_UNSPEC (BLKmode,
9693 gen_rtvec (1, reg),
9694 UNSPEC_PUSH_MULT));
9696 = gen_rtx_SET (VOIDmode,
9697 gen_rtx_MEM (XFmode,
9698 gen_rtx_PRE_DEC (BLKmode, stack_pointer_rtx)),
9699 reg);
9700 RTX_FRAME_RELATED_P (tmp) = 1;
9701 XVECEXP (dwarf, 0, count - 1) = tmp;
9703 for (i = 1; i < count; i++)
9705 reg = gen_rtx_REG (XFmode, base_reg++);
9706 XVECEXP (par, 0, i) = gen_rtx_USE (VOIDmode, reg);
9708 tmp = gen_rtx_SET (VOIDmode,
9709 gen_rtx_MEM (XFmode,
9710 gen_rtx_PRE_DEC (BLKmode,
9711 stack_pointer_rtx)),
9712 reg);
9713 RTX_FRAME_RELATED_P (tmp) = 1;
9714 XVECEXP (dwarf, 0, count - i - 1) = tmp;
9717 par = emit_insn (par);
9718 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
9719 REG_NOTES (par));
9720 return par;
9724 /* Compute the distance from register FROM to register TO.
9725 These can be the arg pointer (26), the soft frame pointer (25),
9726 the stack pointer (13) or the hard frame pointer (11).
9727 Typical stack layout looks like this:
9729 old stack pointer -> | |
9730 ----
9731 | | \
9732 | | saved arguments for
9733 | | vararg functions
9734 | | /
9736 hard FP & arg pointer -> | | \
9737 | | stack
9738 | | frame
9739 | | /
9741 | | \
9742 | | call saved
9743 | | registers
9744 soft frame pointer -> | | /
9746 | | \
9747 | | local
9748 | | variables
9749 | | /
9751 | | \
9752 | | outgoing
9753 | | arguments
9754 current stack pointer -> | | /
9757 For a given function some or all of these stack components
9758 may not be needed, giving rise to the possibility of
9759 eliminating some of the registers.
9761 The values returned by this function must reflect the behavior
9762 of arm_expand_prologue() and arm_compute_save_reg_mask().
9764 The sign of the number returned reflects the direction of stack
9765 growth, so the values are positive for all eliminations except
9766 from the soft frame pointer to the hard frame pointer. */
9767 unsigned int
9768 arm_compute_initial_elimination_offset (unsigned int from, unsigned int to)
9770 unsigned int local_vars = arm_get_frame_size ();
9771 unsigned int outgoing_args = current_function_outgoing_args_size;
9772 unsigned int stack_frame;
9773 unsigned int call_saved_registers;
9774 unsigned long func_type;
9776 func_type = arm_current_func_type ();
9778 /* Volatile functions never return, so there is
9779 no need to save call saved registers. */
9780 call_saved_registers = 0;
9781 if (! IS_VOLATILE (func_type))
9783 unsigned int reg_mask;
9784 unsigned int reg;
9785 bool new_block;
9787 /* Make sure that we compute which registers will be saved
9788 on the stack using the same algorithm that is used by
9789 the prologue creation code. */
9790 reg_mask = arm_compute_save_reg_mask ();
9792 /* Now count the number of bits set in save_reg_mask.
9793 If we have already counted the registers in the stack
9794 frame, do not count them again. Non call-saved registers
9795 might be saved in the call-save area of the stack, if
9796 doing so will preserve the stack's alignment. Hence we
9797 must count them here. For each set bit we need 4 bytes
9798 of stack space. */
9799 if (frame_pointer_needed)
9800 reg_mask &= 0x07ff;
9801 call_saved_registers += 4 * bit_count (reg_mask);
9803 /* If the hard floating point registers are going to be
9804 used then they must be saved on the stack as well.
9805 Each register occupies 12 bytes of stack space. */
9806 for (reg = FIRST_FPA_REGNUM; reg <= LAST_FPA_REGNUM; reg++)
9807 if (regs_ever_live[reg] && ! call_used_regs[reg])
9808 call_saved_registers += 12;
9810 /* Likewise VFP regs. */
9811 if (TARGET_HARD_FLOAT && TARGET_VFP)
9813 new_block = TRUE;
9814 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
9816 if ((regs_ever_live[reg] && !call_used_regs[reg])
9817 || (regs_ever_live[reg + 1] && !call_used_regs[reg + 1]))
9819 if (new_block)
9821 call_saved_registers += 4;
9822 new_block = FALSE;
9824 call_saved_registers += 8;
9826 else
9827 new_block = TRUE;
9831 if (TARGET_REALLY_IWMMXT)
9832 /* Check for the call-saved iWMMXt registers. */
9833 for (reg = FIRST_IWMMXT_REGNUM; reg <= LAST_IWMMXT_REGNUM; reg++)
9834 if (regs_ever_live[reg] && ! call_used_regs [reg])
9835 call_saved_registers += 8;
9838 /* The stack frame contains 4 registers - the old frame pointer,
9839 the old stack pointer, the return address and PC of the start
9840 of the function. */
9841 stack_frame = frame_pointer_needed ? 16 : 0;
9843 /* OK, now we have enough information to compute the distances.
9844 There must be an entry in these switch tables for each pair
9845 of registers in ELIMINABLE_REGS, even if some of the entries
9846 seem to be redundant or useless. */
9847 switch (from)
9849 case ARG_POINTER_REGNUM:
9850 switch (to)
9852 case THUMB_HARD_FRAME_POINTER_REGNUM:
9853 return 0;
9855 case FRAME_POINTER_REGNUM:
9856 /* This is the reverse of the soft frame pointer
9857 to hard frame pointer elimination below. */
9858 if (call_saved_registers == 0 && stack_frame == 0)
9859 return 0;
9860 return (call_saved_registers + stack_frame - 4);
9862 case ARM_HARD_FRAME_POINTER_REGNUM:
9863 /* If there is no stack frame then the hard
9864 frame pointer and the arg pointer coincide. */
9865 if (stack_frame == 0 && call_saved_registers != 0)
9866 return 0;
9867 /* FIXME: Not sure about this. Maybe we should always return 0 ? */
9868 return (frame_pointer_needed
9869 && current_function_needs_context
9870 && ! cfun->machine->uses_anonymous_args) ? 4 : 0;
9872 case STACK_POINTER_REGNUM:
9873 /* If nothing has been pushed on the stack at all
9874 then this will return -4. This *is* correct! */
9875 return call_saved_registers + stack_frame + local_vars + outgoing_args - 4;
9877 default:
9878 abort ();
9880 break;
9882 case FRAME_POINTER_REGNUM:
9883 switch (to)
9885 case THUMB_HARD_FRAME_POINTER_REGNUM:
9886 return 0;
9888 case ARM_HARD_FRAME_POINTER_REGNUM:
9889 /* The hard frame pointer points to the top entry in the
9890 stack frame. The soft frame pointer to the bottom entry
9891 in the stack frame. If there is no stack frame at all,
9892 then they are identical. */
9893 if (call_saved_registers == 0 && stack_frame == 0)
9894 return 0;
9895 return - (call_saved_registers + stack_frame - 4);
9897 case STACK_POINTER_REGNUM:
9898 return local_vars + outgoing_args;
9900 default:
9901 abort ();
9903 break;
9905 default:
9906 /* You cannot eliminate from the stack pointer.
9907 In theory you could eliminate from the hard frame
9908 pointer to the stack pointer, but this will never
9909 happen, since if a stack frame is not needed the
9910 hard frame pointer will never be used. */
9911 abort ();
9915 /* Calculate the size of the stack frame, taking into account any
9916 padding that is required to ensure stack-alignment. */
9917 HOST_WIDE_INT
9918 arm_get_frame_size (void)
9920 int regno;
9922 int base_size = ROUND_UP_WORD (get_frame_size ());
9923 int entry_size = 0;
9924 unsigned long func_type = arm_current_func_type ();
9925 int leaf;
9926 bool new_block;
9928 if (! TARGET_ARM)
9929 abort();
9931 if (! TARGET_ATPCS)
9932 return base_size;
9934 /* We need to know if we are a leaf function. Unfortunately, it
9935 is possible to be called after start_sequence has been called,
9936 which causes get_insns to return the insns for the sequence,
9937 not the function, which will cause leaf_function_p to return
9938 the incorrect result.
9940 To work around this, we cache the computed frame size. This
9941 works because we will only be calling RTL expanders that need
9942 to know about leaf functions once reload has completed, and the
9943 frame size cannot be changed after that time, so we can safely
9944 use the cached value. */
9946 if (reload_completed)
9947 return cfun->machine->frame_size;
9949 leaf = leaf_function_p ();
9951 /* A leaf function does not need any stack alignment if it has nothing
9952 on the stack. */
9953 if (leaf && base_size == 0)
9955 cfun->machine->frame_size = 0;
9956 return 0;
9959 /* We know that SP will be word aligned on entry, and we must
9960 preserve that condition at any subroutine call. But those are
9961 the only constraints. */
9963 /* Space for variadic functions. */
9964 if (current_function_pretend_args_size)
9965 entry_size += current_function_pretend_args_size;
9967 /* Space for saved registers. */
9968 entry_size += bit_count (arm_compute_save_reg_mask ()) * 4;
9970 if (! IS_VOLATILE (func_type))
9972 /* Space for saved FPA registers. */
9973 for (regno = FIRST_FPA_REGNUM; regno <= LAST_FPA_REGNUM; regno++)
9974 if (regs_ever_live[regno] && ! call_used_regs[regno])
9975 entry_size += 12;
9977 /* Space for saved VFP registers. */
9978 if (TARGET_HARD_FLOAT && TARGET_VFP)
9980 new_block = TRUE;
9981 for (regno = FIRST_VFP_REGNUM; regno < LAST_VFP_REGNUM; regno += 2)
9983 if ((regs_ever_live[regno] && !call_used_regs[regno])
9984 || (regs_ever_live[regno + 1] && !call_used_regs[regno + 1]))
9986 if (new_block)
9988 entry_size += 4;
9989 new_block = FALSE;
9991 entry_size += 8;
9993 else
9994 new_block = TRUE;
9999 if (TARGET_REALLY_IWMMXT)
10001 /* Check for the call-saved iWMMXt registers. */
10002 for (regno = FIRST_IWMMXT_REGNUM; regno <= LAST_IWMMXT_REGNUM; regno++)
10003 if (regs_ever_live [regno] && ! call_used_regs [regno])
10004 entry_size += 8;
10007 if ((entry_size + base_size + current_function_outgoing_args_size) & 7)
10008 base_size += 4;
10009 if ((entry_size + base_size + current_function_outgoing_args_size) & 7)
10010 abort ();
10012 cfun->machine->frame_size = base_size;
10014 return base_size;
10017 /* Generate the prologue instructions for entry into an ARM function. */
10018 void
10019 arm_expand_prologue (void)
10021 int reg;
10022 rtx amount;
10023 rtx insn;
10024 rtx ip_rtx;
10025 unsigned long live_regs_mask;
10026 unsigned long func_type;
10027 int fp_offset = 0;
10028 int saved_pretend_args = 0;
10029 unsigned int args_to_push;
10031 func_type = arm_current_func_type ();
10033 /* Naked functions don't have prologues. */
10034 if (IS_NAKED (func_type))
10035 return;
10037 /* Make a copy of c_f_p_a_s as we may need to modify it locally. */
10038 args_to_push = current_function_pretend_args_size;
10040 /* Compute which register we will have to save onto the stack. */
10041 live_regs_mask = arm_compute_save_reg_mask ();
10043 ip_rtx = gen_rtx_REG (SImode, IP_REGNUM);
10045 if (frame_pointer_needed)
10047 if (IS_INTERRUPT (func_type))
10049 /* Interrupt functions must not corrupt any registers.
10050 Creating a frame pointer however, corrupts the IP
10051 register, so we must push it first. */
10052 insn = emit_multi_reg_push (1 << IP_REGNUM);
10054 /* Do not set RTX_FRAME_RELATED_P on this insn.
10055 The dwarf stack unwinding code only wants to see one
10056 stack decrement per function, and this is not it. If
10057 this instruction is labeled as being part of the frame
10058 creation sequence then dwarf2out_frame_debug_expr will
10059 abort when it encounters the assignment of IP to FP
10060 later on, since the use of SP here establishes SP as
10061 the CFA register and not IP.
10063 Anyway this instruction is not really part of the stack
10064 frame creation although it is part of the prologue. */
10066 else if (IS_NESTED (func_type))
10068 /* The Static chain register is the same as the IP register
10069 used as a scratch register during stack frame creation.
10070 To get around this need to find somewhere to store IP
10071 whilst the frame is being created. We try the following
10072 places in order:
10074 1. The last argument register.
10075 2. A slot on the stack above the frame. (This only
10076 works if the function is not a varargs function).
10077 3. Register r3, after pushing the argument registers
10078 onto the stack.
10080 Note - we only need to tell the dwarf2 backend about the SP
10081 adjustment in the second variant; the static chain register
10082 doesn't need to be unwound, as it doesn't contain a value
10083 inherited from the caller. */
10085 if (regs_ever_live[3] == 0)
10087 insn = gen_rtx_REG (SImode, 3);
10088 insn = gen_rtx_SET (SImode, insn, ip_rtx);
10089 insn = emit_insn (insn);
10091 else if (args_to_push == 0)
10093 rtx dwarf;
10094 insn = gen_rtx_PRE_DEC (SImode, stack_pointer_rtx);
10095 insn = gen_rtx_MEM (SImode, insn);
10096 insn = gen_rtx_SET (VOIDmode, insn, ip_rtx);
10097 insn = emit_insn (insn);
10099 fp_offset = 4;
10101 /* Just tell the dwarf backend that we adjusted SP. */
10102 dwarf = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
10103 gen_rtx_PLUS (SImode, stack_pointer_rtx,
10104 GEN_INT (-fp_offset)));
10105 RTX_FRAME_RELATED_P (insn) = 1;
10106 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
10107 dwarf, REG_NOTES (insn));
10109 else
10111 /* Store the args on the stack. */
10112 if (cfun->machine->uses_anonymous_args)
10113 insn = emit_multi_reg_push
10114 ((0xf0 >> (args_to_push / 4)) & 0xf);
10115 else
10116 insn = emit_insn
10117 (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10118 GEN_INT (- args_to_push)));
10120 RTX_FRAME_RELATED_P (insn) = 1;
10122 saved_pretend_args = 1;
10123 fp_offset = args_to_push;
10124 args_to_push = 0;
10126 /* Now reuse r3 to preserve IP. */
10127 insn = gen_rtx_REG (SImode, 3);
10128 insn = gen_rtx_SET (SImode, insn, ip_rtx);
10129 (void) emit_insn (insn);
10133 if (fp_offset)
10135 insn = gen_rtx_PLUS (SImode, stack_pointer_rtx, GEN_INT (fp_offset));
10136 insn = gen_rtx_SET (SImode, ip_rtx, insn);
10138 else
10139 insn = gen_movsi (ip_rtx, stack_pointer_rtx);
10141 insn = emit_insn (insn);
10142 RTX_FRAME_RELATED_P (insn) = 1;
10145 if (args_to_push)
10147 /* Push the argument registers, or reserve space for them. */
10148 if (cfun->machine->uses_anonymous_args)
10149 insn = emit_multi_reg_push
10150 ((0xf0 >> (args_to_push / 4)) & 0xf);
10151 else
10152 insn = emit_insn
10153 (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10154 GEN_INT (- args_to_push)));
10155 RTX_FRAME_RELATED_P (insn) = 1;
10158 /* If this is an interrupt service routine, and the link register
10159 is going to be pushed, and we are not creating a stack frame,
10160 (which would involve an extra push of IP and a pop in the epilogue)
10161 subtracting four from LR now will mean that the function return
10162 can be done with a single instruction. */
10163 if ((func_type == ARM_FT_ISR || func_type == ARM_FT_FIQ)
10164 && (live_regs_mask & (1 << LR_REGNUM)) != 0
10165 && ! frame_pointer_needed)
10166 emit_insn (gen_rtx_SET (SImode,
10167 gen_rtx_REG (SImode, LR_REGNUM),
10168 gen_rtx_PLUS (SImode,
10169 gen_rtx_REG (SImode, LR_REGNUM),
10170 GEN_INT (-4))));
10172 if (live_regs_mask)
10174 insn = emit_multi_reg_push (live_regs_mask);
10175 RTX_FRAME_RELATED_P (insn) = 1;
10178 if (TARGET_IWMMXT)
10179 for (reg = FIRST_IWMMXT_REGNUM; reg <= LAST_IWMMXT_REGNUM; reg++)
10180 if (regs_ever_live[reg] && ! call_used_regs [reg])
10182 insn = gen_rtx_PRE_DEC (V2SImode, stack_pointer_rtx);
10183 insn = gen_rtx_MEM (V2SImode, insn);
10184 insn = emit_insn (gen_rtx_SET (VOIDmode, insn,
10185 gen_rtx_REG (V2SImode, reg)));
10186 RTX_FRAME_RELATED_P (insn) = 1;
10189 if (! IS_VOLATILE (func_type))
10191 int start_reg;
10193 /* Save any floating point call-saved registers used by this
10194 function. */
10195 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
10197 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
10198 if (regs_ever_live[reg] && !call_used_regs[reg])
10200 insn = gen_rtx_PRE_DEC (XFmode, stack_pointer_rtx);
10201 insn = gen_rtx_MEM (XFmode, insn);
10202 insn = emit_insn (gen_rtx_SET (VOIDmode, insn,
10203 gen_rtx_REG (XFmode, reg)));
10204 RTX_FRAME_RELATED_P (insn) = 1;
10207 else
10209 start_reg = LAST_FPA_REGNUM;
10211 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
10213 if (regs_ever_live[reg] && !call_used_regs[reg])
10215 if (start_reg - reg == 3)
10217 insn = emit_sfm (reg, 4);
10218 RTX_FRAME_RELATED_P (insn) = 1;
10219 start_reg = reg - 1;
10222 else
10224 if (start_reg != reg)
10226 insn = emit_sfm (reg + 1, start_reg - reg);
10227 RTX_FRAME_RELATED_P (insn) = 1;
10229 start_reg = reg - 1;
10233 if (start_reg != reg)
10235 insn = emit_sfm (reg + 1, start_reg - reg);
10236 RTX_FRAME_RELATED_P (insn) = 1;
10239 if (TARGET_HARD_FLOAT && TARGET_VFP)
10241 start_reg = FIRST_VFP_REGNUM;
10243 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
10245 if ((!regs_ever_live[reg] || call_used_regs[reg])
10246 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
10248 if (start_reg != reg)
10250 insn = vfp_emit_fstmx (start_reg,
10251 (reg - start_reg) / 2);
10252 RTX_FRAME_RELATED_P (insn) = 1;
10254 start_reg = reg + 2;
10257 if (start_reg != reg)
10259 insn = vfp_emit_fstmx (start_reg,
10260 (reg - start_reg) / 2);
10261 RTX_FRAME_RELATED_P (insn) = 1;
10266 if (frame_pointer_needed)
10268 /* Create the new frame pointer. */
10269 insn = GEN_INT (-(4 + args_to_push + fp_offset));
10270 insn = emit_insn (gen_addsi3 (hard_frame_pointer_rtx, ip_rtx, insn));
10271 RTX_FRAME_RELATED_P (insn) = 1;
10273 if (IS_NESTED (func_type))
10275 /* Recover the static chain register. */
10276 if (regs_ever_live [3] == 0
10277 || saved_pretend_args)
10278 insn = gen_rtx_REG (SImode, 3);
10279 else /* if (current_function_pretend_args_size == 0) */
10281 insn = gen_rtx_PLUS (SImode, hard_frame_pointer_rtx,
10282 GEN_INT (4));
10283 insn = gen_rtx_MEM (SImode, insn);
10286 emit_insn (gen_rtx_SET (SImode, ip_rtx, insn));
10287 /* Add a USE to stop propagate_one_insn() from barfing. */
10288 emit_insn (gen_prologue_use (ip_rtx));
10292 amount = GEN_INT (-(arm_get_frame_size ()
10293 + current_function_outgoing_args_size));
10295 if (amount != const0_rtx)
10297 /* This add can produce multiple insns for a large constant, so we
10298 need to get tricky. */
10299 rtx last = get_last_insn ();
10300 insn = emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
10301 amount));
10304 last = last ? NEXT_INSN (last) : get_insns ();
10305 RTX_FRAME_RELATED_P (last) = 1;
10307 while (last != insn);
10309 /* If the frame pointer is needed, emit a special barrier that
10310 will prevent the scheduler from moving stores to the frame
10311 before the stack adjustment. */
10312 if (frame_pointer_needed)
10313 insn = emit_insn (gen_stack_tie (stack_pointer_rtx,
10314 hard_frame_pointer_rtx));
10317 /* If we are profiling, make sure no instructions are scheduled before
10318 the call to mcount. Similarly if the user has requested no
10319 scheduling in the prolog. */
10320 if (current_function_profile || TARGET_NO_SCHED_PRO)
10321 emit_insn (gen_blockage ());
10323 /* If the link register is being kept alive, with the return address in it,
10324 then make sure that it does not get reused by the ce2 pass. */
10325 if ((live_regs_mask & (1 << LR_REGNUM)) == 0)
10327 emit_insn (gen_prologue_use (gen_rtx_REG (SImode, LR_REGNUM)));
10328 cfun->machine->lr_save_eliminated = 1;
10332 /* If CODE is 'd', then the X is a condition operand and the instruction
10333 should only be executed if the condition is true.
10334 if CODE is 'D', then the X is a condition operand and the instruction
10335 should only be executed if the condition is false: however, if the mode
10336 of the comparison is CCFPEmode, then always execute the instruction -- we
10337 do this because in these circumstances !GE does not necessarily imply LT;
10338 in these cases the instruction pattern will take care to make sure that
10339 an instruction containing %d will follow, thereby undoing the effects of
10340 doing this instruction unconditionally.
10341 If CODE is 'N' then X is a floating point operand that must be negated
10342 before output.
10343 If CODE is 'B' then output a bitwise inverted value of X (a const int).
10344 If X is a REG and CODE is `M', output a ldm/stm style multi-reg. */
10345 void
10346 arm_print_operand (FILE *stream, rtx x, int code)
10348 switch (code)
10350 case '@':
10351 fputs (ASM_COMMENT_START, stream);
10352 return;
10354 case '_':
10355 fputs (user_label_prefix, stream);
10356 return;
10358 case '|':
10359 fputs (REGISTER_PREFIX, stream);
10360 return;
10362 case '?':
10363 if (arm_ccfsm_state == 3 || arm_ccfsm_state == 4)
10365 if (TARGET_THUMB || current_insn_predicate != NULL)
10366 abort ();
10368 fputs (arm_condition_codes[arm_current_cc], stream);
10370 else if (current_insn_predicate)
10372 enum arm_cond_code code;
10374 if (TARGET_THUMB)
10375 abort ();
10377 code = get_arm_condition_code (current_insn_predicate);
10378 fputs (arm_condition_codes[code], stream);
10380 return;
10382 case 'N':
10384 REAL_VALUE_TYPE r;
10385 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
10386 r = REAL_VALUE_NEGATE (r);
10387 fprintf (stream, "%s", fp_const_from_val (&r));
10389 return;
10391 case 'B':
10392 if (GET_CODE (x) == CONST_INT)
10394 HOST_WIDE_INT val;
10395 val = ARM_SIGN_EXTEND (~INTVAL (x));
10396 fprintf (stream, HOST_WIDE_INT_PRINT_DEC, val);
10398 else
10400 putc ('~', stream);
10401 output_addr_const (stream, x);
10403 return;
10405 case 'i':
10406 fprintf (stream, "%s", arithmetic_instr (x, 1));
10407 return;
10409 /* Truncate Cirrus shift counts. */
10410 case 's':
10411 if (GET_CODE (x) == CONST_INT)
10413 fprintf (stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0x3f);
10414 return;
10416 arm_print_operand (stream, x, 0);
10417 return;
10419 case 'I':
10420 fprintf (stream, "%s", arithmetic_instr (x, 0));
10421 return;
10423 case 'S':
10425 HOST_WIDE_INT val;
10426 const char * shift = shift_op (x, &val);
10428 if (shift)
10430 fprintf (stream, ", %s ", shift_op (x, &val));
10431 if (val == -1)
10432 arm_print_operand (stream, XEXP (x, 1), 0);
10433 else
10434 fprintf (stream, "#" HOST_WIDE_INT_PRINT_DEC, val);
10437 return;
10439 /* An explanation of the 'Q', 'R' and 'H' register operands:
10441 In a pair of registers containing a DI or DF value the 'Q'
10442 operand returns the register number of the register containing
10443 the least significant part of the value. The 'R' operand returns
10444 the register number of the register containing the most
10445 significant part of the value.
10447 The 'H' operand returns the higher of the two register numbers.
10448 On a run where WORDS_BIG_ENDIAN is true the 'H' operand is the
10449 same as the 'Q' operand, since the most significant part of the
10450 value is held in the lower number register. The reverse is true
10451 on systems where WORDS_BIG_ENDIAN is false.
10453 The purpose of these operands is to distinguish between cases
10454 where the endian-ness of the values is important (for example
10455 when they are added together), and cases where the endian-ness
10456 is irrelevant, but the order of register operations is important.
10457 For example when loading a value from memory into a register
10458 pair, the endian-ness does not matter. Provided that the value
10459 from the lower memory address is put into the lower numbered
10460 register, and the value from the higher address is put into the
10461 higher numbered register, the load will work regardless of whether
10462 the value being loaded is big-wordian or little-wordian. The
10463 order of the two register loads can matter however, if the address
10464 of the memory location is actually held in one of the registers
10465 being overwritten by the load. */
10466 case 'Q':
10467 if (REGNO (x) > LAST_ARM_REGNUM)
10468 abort ();
10469 asm_fprintf (stream, "%r", REGNO (x) + (WORDS_BIG_ENDIAN ? 1 : 0));
10470 return;
10472 case 'R':
10473 if (REGNO (x) > LAST_ARM_REGNUM)
10474 abort ();
10475 asm_fprintf (stream, "%r", REGNO (x) + (WORDS_BIG_ENDIAN ? 0 : 1));
10476 return;
10478 case 'H':
10479 if (REGNO (x) > LAST_ARM_REGNUM)
10480 abort ();
10481 asm_fprintf (stream, "%r", REGNO (x) + 1);
10482 return;
10484 case 'm':
10485 asm_fprintf (stream, "%r",
10486 GET_CODE (XEXP (x, 0)) == REG
10487 ? REGNO (XEXP (x, 0)) : REGNO (XEXP (XEXP (x, 0), 0)));
10488 return;
10490 case 'M':
10491 asm_fprintf (stream, "{%r-%r}",
10492 REGNO (x),
10493 REGNO (x) + ARM_NUM_REGS (GET_MODE (x)) - 1);
10494 return;
10496 case 'd':
10497 /* CONST_TRUE_RTX means always -- that's the default. */
10498 if (x == const_true_rtx)
10499 return;
10501 fputs (arm_condition_codes[get_arm_condition_code (x)],
10502 stream);
10503 return;
10505 case 'D':
10506 /* CONST_TRUE_RTX means not always -- ie never. We shouldn't ever
10507 want to do that. */
10508 if (x == const_true_rtx)
10509 abort ();
10511 fputs (arm_condition_codes[ARM_INVERSE_CONDITION_CODE
10512 (get_arm_condition_code (x))],
10513 stream);
10514 return;
10516 /* Cirrus registers can be accessed in a variety of ways:
10517 single floating point (f)
10518 double floating point (d)
10519 32bit integer (fx)
10520 64bit integer (dx). */
10521 case 'W': /* Cirrus register in F mode. */
10522 case 'X': /* Cirrus register in D mode. */
10523 case 'Y': /* Cirrus register in FX mode. */
10524 case 'Z': /* Cirrus register in DX mode. */
10525 if (GET_CODE (x) != REG || REGNO_REG_CLASS (REGNO (x)) != CIRRUS_REGS)
10526 abort ();
10528 fprintf (stream, "mv%s%s",
10529 code == 'W' ? "f"
10530 : code == 'X' ? "d"
10531 : code == 'Y' ? "fx" : "dx", reg_names[REGNO (x)] + 2);
10533 return;
10535 /* Print cirrus register in the mode specified by the register's mode. */
10536 case 'V':
10538 int mode = GET_MODE (x);
10540 if (GET_CODE (x) != REG || REGNO_REG_CLASS (REGNO (x)) != CIRRUS_REGS)
10541 abort ();
10543 fprintf (stream, "mv%s%s",
10544 mode == DFmode ? "d"
10545 : mode == SImode ? "fx"
10546 : mode == DImode ? "dx"
10547 : "f", reg_names[REGNO (x)] + 2);
10549 return;
10552 case 'U':
10553 if (GET_CODE (x) != REG
10554 || REGNO (x) < FIRST_IWMMXT_GR_REGNUM
10555 || REGNO (x) > LAST_IWMMXT_GR_REGNUM)
10556 /* Bad value for wCG register number. */
10557 abort ();
10558 else
10559 fprintf (stream, "%d", REGNO (x) - FIRST_IWMMXT_GR_REGNUM);
10560 return;
10562 /* Print an iWMMXt control register name. */
10563 case 'w':
10564 if (GET_CODE (x) != CONST_INT
10565 || INTVAL (x) < 0
10566 || INTVAL (x) >= 16)
10567 /* Bad value for wC register number. */
10568 abort ();
10569 else
10571 static const char * wc_reg_names [16] =
10573 "wCID", "wCon", "wCSSF", "wCASF",
10574 "wC4", "wC5", "wC6", "wC7",
10575 "wCGR0", "wCGR1", "wCGR2", "wCGR3",
10576 "wC12", "wC13", "wC14", "wC15"
10579 fprintf (stream, wc_reg_names [INTVAL (x)]);
10581 return;
10583 /* Print a VFP double precision register name. */
10584 case 'P':
10586 int mode = GET_MODE (x);
10587 int num;
10589 if (mode != DImode && mode != DFmode)
10590 abort ();
10592 if (GET_CODE (x) != REG
10593 || !IS_VFP_REGNUM (REGNO (x)))
10594 abort ();
10596 num = REGNO(x) - FIRST_VFP_REGNUM;
10597 if (num & 1)
10598 abort ();
10600 fprintf (stream, "d%d", num >> 1);
10602 return;
10604 default:
10605 if (x == 0)
10606 abort ();
10608 if (GET_CODE (x) == REG)
10609 asm_fprintf (stream, "%r", REGNO (x));
10610 else if (GET_CODE (x) == MEM)
10612 output_memory_reference_mode = GET_MODE (x);
10613 output_address (XEXP (x, 0));
10615 else if (GET_CODE (x) == CONST_DOUBLE)
10616 fprintf (stream, "#%s", fp_immediate_constant (x));
10617 else if (GET_CODE (x) == NEG)
10618 abort (); /* This should never happen now. */
10619 else
10621 fputc ('#', stream);
10622 output_addr_const (stream, x);
10627 #ifndef AOF_ASSEMBLER
10628 /* Target hook for assembling integer objects. The ARM version needs to
10629 handle word-sized values specially. */
10630 static bool
10631 arm_assemble_integer (rtx x, unsigned int size, int aligned_p)
10633 if (size == UNITS_PER_WORD && aligned_p)
10635 fputs ("\t.word\t", asm_out_file);
10636 output_addr_const (asm_out_file, x);
10638 /* Mark symbols as position independent. We only do this in the
10639 .text segment, not in the .data segment. */
10640 if (NEED_GOT_RELOC && flag_pic && making_const_table &&
10641 (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF))
10643 if (GET_CODE (x) == SYMBOL_REF
10644 && (CONSTANT_POOL_ADDRESS_P (x)
10645 || SYMBOL_REF_LOCAL_P (x)))
10646 fputs ("(GOTOFF)", asm_out_file);
10647 else if (GET_CODE (x) == LABEL_REF)
10648 fputs ("(GOTOFF)", asm_out_file);
10649 else
10650 fputs ("(GOT)", asm_out_file);
10652 fputc ('\n', asm_out_file);
10653 return true;
10656 if (VECTOR_MODE_SUPPORTED_P (GET_MODE (x)))
10658 int i, units;
10660 if (GET_CODE (x) != CONST_VECTOR)
10661 abort ();
10663 units = CONST_VECTOR_NUNITS (x);
10665 switch (GET_MODE (x))
10667 case V2SImode: size = 4; break;
10668 case V4HImode: size = 2; break;
10669 case V8QImode: size = 1; break;
10670 default:
10671 abort ();
10674 for (i = 0; i < units; i++)
10676 rtx elt;
10678 elt = CONST_VECTOR_ELT (x, i);
10679 assemble_integer
10680 (elt, size, i == 0 ? BIGGEST_ALIGNMENT : size * BITS_PER_UNIT, 1);
10683 return true;
10686 return default_assemble_integer (x, size, aligned_p);
10688 #endif
10690 /* A finite state machine takes care of noticing whether or not instructions
10691 can be conditionally executed, and thus decrease execution time and code
10692 size by deleting branch instructions. The fsm is controlled by
10693 final_prescan_insn, and controls the actions of ASM_OUTPUT_OPCODE. */
10695 /* The state of the fsm controlling condition codes are:
10696 0: normal, do nothing special
10697 1: make ASM_OUTPUT_OPCODE not output this instruction
10698 2: make ASM_OUTPUT_OPCODE not output this instruction
10699 3: make instructions conditional
10700 4: make instructions conditional
10702 State transitions (state->state by whom under condition):
10703 0 -> 1 final_prescan_insn if the `target' is a label
10704 0 -> 2 final_prescan_insn if the `target' is an unconditional branch
10705 1 -> 3 ASM_OUTPUT_OPCODE after not having output the conditional branch
10706 2 -> 4 ASM_OUTPUT_OPCODE after not having output the conditional branch
10707 3 -> 0 (*targetm.asm_out.internal_label) if the `target' label is reached
10708 (the target label has CODE_LABEL_NUMBER equal to arm_target_label).
10709 4 -> 0 final_prescan_insn if the `target' unconditional branch is reached
10710 (the target insn is arm_target_insn).
10712 If the jump clobbers the conditions then we use states 2 and 4.
10714 A similar thing can be done with conditional return insns.
10716 XXX In case the `target' is an unconditional branch, this conditionalising
10717 of the instructions always reduces code size, but not always execution
10718 time. But then, I want to reduce the code size to somewhere near what
10719 /bin/cc produces. */
10721 /* Returns the index of the ARM condition code string in
10722 `arm_condition_codes'. COMPARISON should be an rtx like
10723 `(eq (...) (...))'. */
10724 static enum arm_cond_code
10725 get_arm_condition_code (rtx comparison)
10727 enum machine_mode mode = GET_MODE (XEXP (comparison, 0));
10728 int code;
10729 enum rtx_code comp_code = GET_CODE (comparison);
10731 if (GET_MODE_CLASS (mode) != MODE_CC)
10732 mode = SELECT_CC_MODE (comp_code, XEXP (comparison, 0),
10733 XEXP (comparison, 1));
10735 switch (mode)
10737 case CC_DNEmode: code = ARM_NE; goto dominance;
10738 case CC_DEQmode: code = ARM_EQ; goto dominance;
10739 case CC_DGEmode: code = ARM_GE; goto dominance;
10740 case CC_DGTmode: code = ARM_GT; goto dominance;
10741 case CC_DLEmode: code = ARM_LE; goto dominance;
10742 case CC_DLTmode: code = ARM_LT; goto dominance;
10743 case CC_DGEUmode: code = ARM_CS; goto dominance;
10744 case CC_DGTUmode: code = ARM_HI; goto dominance;
10745 case CC_DLEUmode: code = ARM_LS; goto dominance;
10746 case CC_DLTUmode: code = ARM_CC;
10748 dominance:
10749 if (comp_code != EQ && comp_code != NE)
10750 abort ();
10752 if (comp_code == EQ)
10753 return ARM_INVERSE_CONDITION_CODE (code);
10754 return code;
10756 case CC_NOOVmode:
10757 switch (comp_code)
10759 case NE: return ARM_NE;
10760 case EQ: return ARM_EQ;
10761 case GE: return ARM_PL;
10762 case LT: return ARM_MI;
10763 default: abort ();
10766 case CC_Zmode:
10767 switch (comp_code)
10769 case NE: return ARM_NE;
10770 case EQ: return ARM_EQ;
10771 default: abort ();
10774 case CC_Nmode:
10775 switch (comp_code)
10777 case NE: return ARM_MI;
10778 case EQ: return ARM_PL;
10779 default: abort ();
10782 case CCFPEmode:
10783 case CCFPmode:
10784 /* These encodings assume that AC=1 in the FPA system control
10785 byte. This allows us to handle all cases except UNEQ and
10786 LTGT. */
10787 switch (comp_code)
10789 case GE: return ARM_GE;
10790 case GT: return ARM_GT;
10791 case LE: return ARM_LS;
10792 case LT: return ARM_MI;
10793 case NE: return ARM_NE;
10794 case EQ: return ARM_EQ;
10795 case ORDERED: return ARM_VC;
10796 case UNORDERED: return ARM_VS;
10797 case UNLT: return ARM_LT;
10798 case UNLE: return ARM_LE;
10799 case UNGT: return ARM_HI;
10800 case UNGE: return ARM_PL;
10801 /* UNEQ and LTGT do not have a representation. */
10802 case UNEQ: /* Fall through. */
10803 case LTGT: /* Fall through. */
10804 default: abort ();
10807 case CC_SWPmode:
10808 switch (comp_code)
10810 case NE: return ARM_NE;
10811 case EQ: return ARM_EQ;
10812 case GE: return ARM_LE;
10813 case GT: return ARM_LT;
10814 case LE: return ARM_GE;
10815 case LT: return ARM_GT;
10816 case GEU: return ARM_LS;
10817 case GTU: return ARM_CC;
10818 case LEU: return ARM_CS;
10819 case LTU: return ARM_HI;
10820 default: abort ();
10823 case CC_Cmode:
10824 switch (comp_code)
10826 case LTU: return ARM_CS;
10827 case GEU: return ARM_CC;
10828 default: abort ();
10831 case CCmode:
10832 switch (comp_code)
10834 case NE: return ARM_NE;
10835 case EQ: return ARM_EQ;
10836 case GE: return ARM_GE;
10837 case GT: return ARM_GT;
10838 case LE: return ARM_LE;
10839 case LT: return ARM_LT;
10840 case GEU: return ARM_CS;
10841 case GTU: return ARM_HI;
10842 case LEU: return ARM_LS;
10843 case LTU: return ARM_CC;
10844 default: abort ();
10847 default: abort ();
10850 abort ();
10853 void
10854 arm_final_prescan_insn (rtx insn)
10856 /* BODY will hold the body of INSN. */
10857 rtx body = PATTERN (insn);
10859 /* This will be 1 if trying to repeat the trick, and things need to be
10860 reversed if it appears to fail. */
10861 int reverse = 0;
10863 /* JUMP_CLOBBERS will be one implies that the conditions if a branch is
10864 taken are clobbered, even if the rtl suggests otherwise. It also
10865 means that we have to grub around within the jump expression to find
10866 out what the conditions are when the jump isn't taken. */
10867 int jump_clobbers = 0;
10869 /* If we start with a return insn, we only succeed if we find another one. */
10870 int seeking_return = 0;
10872 /* START_INSN will hold the insn from where we start looking. This is the
10873 first insn after the following code_label if REVERSE is true. */
10874 rtx start_insn = insn;
10876 /* If in state 4, check if the target branch is reached, in order to
10877 change back to state 0. */
10878 if (arm_ccfsm_state == 4)
10880 if (insn == arm_target_insn)
10882 arm_target_insn = NULL;
10883 arm_ccfsm_state = 0;
10885 return;
10888 /* If in state 3, it is possible to repeat the trick, if this insn is an
10889 unconditional branch to a label, and immediately following this branch
10890 is the previous target label which is only used once, and the label this
10891 branch jumps to is not too far off. */
10892 if (arm_ccfsm_state == 3)
10894 if (simplejump_p (insn))
10896 start_insn = next_nonnote_insn (start_insn);
10897 if (GET_CODE (start_insn) == BARRIER)
10899 /* XXX Isn't this always a barrier? */
10900 start_insn = next_nonnote_insn (start_insn);
10902 if (GET_CODE (start_insn) == CODE_LABEL
10903 && CODE_LABEL_NUMBER (start_insn) == arm_target_label
10904 && LABEL_NUSES (start_insn) == 1)
10905 reverse = TRUE;
10906 else
10907 return;
10909 else if (GET_CODE (body) == RETURN)
10911 start_insn = next_nonnote_insn (start_insn);
10912 if (GET_CODE (start_insn) == BARRIER)
10913 start_insn = next_nonnote_insn (start_insn);
10914 if (GET_CODE (start_insn) == CODE_LABEL
10915 && CODE_LABEL_NUMBER (start_insn) == arm_target_label
10916 && LABEL_NUSES (start_insn) == 1)
10918 reverse = TRUE;
10919 seeking_return = 1;
10921 else
10922 return;
10924 else
10925 return;
10928 if (arm_ccfsm_state != 0 && !reverse)
10929 abort ();
10930 if (GET_CODE (insn) != JUMP_INSN)
10931 return;
10933 /* This jump might be paralleled with a clobber of the condition codes
10934 the jump should always come first */
10935 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) > 0)
10936 body = XVECEXP (body, 0, 0);
10938 if (reverse
10939 || (GET_CODE (body) == SET && GET_CODE (SET_DEST (body)) == PC
10940 && GET_CODE (SET_SRC (body)) == IF_THEN_ELSE))
10942 int insns_skipped;
10943 int fail = FALSE, succeed = FALSE;
10944 /* Flag which part of the IF_THEN_ELSE is the LABEL_REF. */
10945 int then_not_else = TRUE;
10946 rtx this_insn = start_insn, label = 0;
10948 /* If the jump cannot be done with one instruction, we cannot
10949 conditionally execute the instruction in the inverse case. */
10950 if (get_attr_conds (insn) == CONDS_JUMP_CLOB)
10952 jump_clobbers = 1;
10953 return;
10956 /* Register the insn jumped to. */
10957 if (reverse)
10959 if (!seeking_return)
10960 label = XEXP (SET_SRC (body), 0);
10962 else if (GET_CODE (XEXP (SET_SRC (body), 1)) == LABEL_REF)
10963 label = XEXP (XEXP (SET_SRC (body), 1), 0);
10964 else if (GET_CODE (XEXP (SET_SRC (body), 2)) == LABEL_REF)
10966 label = XEXP (XEXP (SET_SRC (body), 2), 0);
10967 then_not_else = FALSE;
10969 else if (GET_CODE (XEXP (SET_SRC (body), 1)) == RETURN)
10970 seeking_return = 1;
10971 else if (GET_CODE (XEXP (SET_SRC (body), 2)) == RETURN)
10973 seeking_return = 1;
10974 then_not_else = FALSE;
10976 else
10977 abort ();
10979 /* See how many insns this branch skips, and what kind of insns. If all
10980 insns are okay, and the label or unconditional branch to the same
10981 label is not too far away, succeed. */
10982 for (insns_skipped = 0;
10983 !fail && !succeed && insns_skipped++ < max_insns_skipped;)
10985 rtx scanbody;
10987 this_insn = next_nonnote_insn (this_insn);
10988 if (!this_insn)
10989 break;
10991 switch (GET_CODE (this_insn))
10993 case CODE_LABEL:
10994 /* Succeed if it is the target label, otherwise fail since
10995 control falls in from somewhere else. */
10996 if (this_insn == label)
10998 if (jump_clobbers)
11000 arm_ccfsm_state = 2;
11001 this_insn = next_nonnote_insn (this_insn);
11003 else
11004 arm_ccfsm_state = 1;
11005 succeed = TRUE;
11007 else
11008 fail = TRUE;
11009 break;
11011 case BARRIER:
11012 /* Succeed if the following insn is the target label.
11013 Otherwise fail.
11014 If return insns are used then the last insn in a function
11015 will be a barrier. */
11016 this_insn = next_nonnote_insn (this_insn);
11017 if (this_insn && this_insn == label)
11019 if (jump_clobbers)
11021 arm_ccfsm_state = 2;
11022 this_insn = next_nonnote_insn (this_insn);
11024 else
11025 arm_ccfsm_state = 1;
11026 succeed = TRUE;
11028 else
11029 fail = TRUE;
11030 break;
11032 case CALL_INSN:
11033 /* If using 32-bit addresses the cc is not preserved over
11034 calls. */
11035 if (TARGET_APCS_32)
11037 /* Succeed if the following insn is the target label,
11038 or if the following two insns are a barrier and
11039 the target label. */
11040 this_insn = next_nonnote_insn (this_insn);
11041 if (this_insn && GET_CODE (this_insn) == BARRIER)
11042 this_insn = next_nonnote_insn (this_insn);
11044 if (this_insn && this_insn == label
11045 && insns_skipped < max_insns_skipped)
11047 if (jump_clobbers)
11049 arm_ccfsm_state = 2;
11050 this_insn = next_nonnote_insn (this_insn);
11052 else
11053 arm_ccfsm_state = 1;
11054 succeed = TRUE;
11056 else
11057 fail = TRUE;
11059 break;
11061 case JUMP_INSN:
11062 /* If this is an unconditional branch to the same label, succeed.
11063 If it is to another label, do nothing. If it is conditional,
11064 fail. */
11065 /* XXX Probably, the tests for SET and the PC are
11066 unnecessary. */
11068 scanbody = PATTERN (this_insn);
11069 if (GET_CODE (scanbody) == SET
11070 && GET_CODE (SET_DEST (scanbody)) == PC)
11072 if (GET_CODE (SET_SRC (scanbody)) == LABEL_REF
11073 && XEXP (SET_SRC (scanbody), 0) == label && !reverse)
11075 arm_ccfsm_state = 2;
11076 succeed = TRUE;
11078 else if (GET_CODE (SET_SRC (scanbody)) == IF_THEN_ELSE)
11079 fail = TRUE;
11081 /* Fail if a conditional return is undesirable (eg on a
11082 StrongARM), but still allow this if optimizing for size. */
11083 else if (GET_CODE (scanbody) == RETURN
11084 && !use_return_insn (TRUE, NULL)
11085 && !optimize_size)
11086 fail = TRUE;
11087 else if (GET_CODE (scanbody) == RETURN
11088 && seeking_return)
11090 arm_ccfsm_state = 2;
11091 succeed = TRUE;
11093 else if (GET_CODE (scanbody) == PARALLEL)
11095 switch (get_attr_conds (this_insn))
11097 case CONDS_NOCOND:
11098 break;
11099 default:
11100 fail = TRUE;
11101 break;
11104 else
11105 fail = TRUE; /* Unrecognized jump (eg epilogue). */
11107 break;
11109 case INSN:
11110 /* Instructions using or affecting the condition codes make it
11111 fail. */
11112 scanbody = PATTERN (this_insn);
11113 if (!(GET_CODE (scanbody) == SET
11114 || GET_CODE (scanbody) == PARALLEL)
11115 || get_attr_conds (this_insn) != CONDS_NOCOND)
11116 fail = TRUE;
11118 /* A conditional cirrus instruction must be followed by
11119 a non Cirrus instruction. However, since we
11120 conditionalize instructions in this function and by
11121 the time we get here we can't add instructions
11122 (nops), because shorten_branches() has already been
11123 called, we will disable conditionalizing Cirrus
11124 instructions to be safe. */
11125 if (GET_CODE (scanbody) != USE
11126 && GET_CODE (scanbody) != CLOBBER
11127 && get_attr_cirrus (this_insn) != CIRRUS_NOT)
11128 fail = TRUE;
11129 break;
11131 default:
11132 break;
11135 if (succeed)
11137 if ((!seeking_return) && (arm_ccfsm_state == 1 || reverse))
11138 arm_target_label = CODE_LABEL_NUMBER (label);
11139 else if (seeking_return || arm_ccfsm_state == 2)
11141 while (this_insn && GET_CODE (PATTERN (this_insn)) == USE)
11143 this_insn = next_nonnote_insn (this_insn);
11144 if (this_insn && (GET_CODE (this_insn) == BARRIER
11145 || GET_CODE (this_insn) == CODE_LABEL))
11146 abort ();
11148 if (!this_insn)
11150 /* Oh, dear! we ran off the end.. give up. */
11151 recog (PATTERN (insn), insn, NULL);
11152 arm_ccfsm_state = 0;
11153 arm_target_insn = NULL;
11154 return;
11156 arm_target_insn = this_insn;
11158 else
11159 abort ();
11160 if (jump_clobbers)
11162 if (reverse)
11163 abort ();
11164 arm_current_cc =
11165 get_arm_condition_code (XEXP (XEXP (XEXP (SET_SRC (body),
11166 0), 0), 1));
11167 if (GET_CODE (XEXP (XEXP (SET_SRC (body), 0), 0)) == AND)
11168 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
11169 if (GET_CODE (XEXP (SET_SRC (body), 0)) == NE)
11170 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
11172 else
11174 /* If REVERSE is true, ARM_CURRENT_CC needs to be inverted from
11175 what it was. */
11176 if (!reverse)
11177 arm_current_cc = get_arm_condition_code (XEXP (SET_SRC (body),
11178 0));
11181 if (reverse || then_not_else)
11182 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
11185 /* Restore recog_data (getting the attributes of other insns can
11186 destroy this array, but final.c assumes that it remains intact
11187 across this call; since the insn has been recognized already we
11188 call recog direct). */
11189 recog (PATTERN (insn), insn, NULL);
11193 /* Returns true if REGNO is a valid register
11194 for holding a quantity of tyoe MODE. */
11196 arm_hard_regno_mode_ok (unsigned int regno, enum machine_mode mode)
11198 if (GET_MODE_CLASS (mode) == MODE_CC)
11199 return regno == CC_REGNUM || regno == VFPCC_REGNUM;
11201 if (TARGET_THUMB)
11202 /* For the Thumb we only allow values bigger than SImode in
11203 registers 0 - 6, so that there is always a second low
11204 register available to hold the upper part of the value.
11205 We probably we ought to ensure that the register is the
11206 start of an even numbered register pair. */
11207 return (ARM_NUM_REGS (mode) < 2) || (regno < LAST_LO_REGNUM);
11209 if (IS_CIRRUS_REGNUM (regno))
11210 /* We have outlawed SI values in Cirrus registers because they
11211 reside in the lower 32 bits, but SF values reside in the
11212 upper 32 bits. This causes gcc all sorts of grief. We can't
11213 even split the registers into pairs because Cirrus SI values
11214 get sign extended to 64bits-- aldyh. */
11215 return (GET_MODE_CLASS (mode) == MODE_FLOAT) || (mode == DImode);
11217 if (IS_VFP_REGNUM (regno))
11219 if (mode == SFmode || mode == SImode)
11220 return TRUE;
11222 /* DFmode values are only valid in even register pairs. */
11223 if (mode == DFmode)
11224 return ((regno - FIRST_VFP_REGNUM) & 1) == 0;
11225 return FALSE;
11228 if (IS_IWMMXT_GR_REGNUM (regno))
11229 return mode == SImode;
11231 if (IS_IWMMXT_REGNUM (regno))
11232 return VALID_IWMMXT_REG_MODE (mode);
11234 if (regno <= LAST_ARM_REGNUM)
11235 /* We allow any value to be stored in the general registers. */
11236 return 1;
11238 if ( regno == FRAME_POINTER_REGNUM
11239 || regno == ARG_POINTER_REGNUM)
11240 /* We only allow integers in the fake hard registers. */
11241 return GET_MODE_CLASS (mode) == MODE_INT;
11243 /* The only registers left are the FPA registers
11244 which we only allow to hold FP values. */
11245 return GET_MODE_CLASS (mode) == MODE_FLOAT
11246 && regno >= FIRST_FPA_REGNUM
11247 && regno <= LAST_FPA_REGNUM;
11251 arm_regno_class (int regno)
11253 if (TARGET_THUMB)
11255 if (regno == STACK_POINTER_REGNUM)
11256 return STACK_REG;
11257 if (regno == CC_REGNUM)
11258 return CC_REG;
11259 if (regno < 8)
11260 return LO_REGS;
11261 return HI_REGS;
11264 if ( regno <= LAST_ARM_REGNUM
11265 || regno == FRAME_POINTER_REGNUM
11266 || regno == ARG_POINTER_REGNUM)
11267 return GENERAL_REGS;
11269 if (regno == CC_REGNUM || regno == VFPCC_REGNUM)
11270 return NO_REGS;
11272 if (IS_CIRRUS_REGNUM (regno))
11273 return CIRRUS_REGS;
11275 if (IS_VFP_REGNUM (regno))
11276 return VFP_REGS;
11278 if (IS_IWMMXT_REGNUM (regno))
11279 return IWMMXT_REGS;
11281 if (IS_IWMMXT_GR_REGNUM (regno))
11282 return IWMMXT_GR_REGS;
11284 return FPA_REGS;
11287 /* Handle a special case when computing the offset
11288 of an argument from the frame pointer. */
11290 arm_debugger_arg_offset (int value, rtx addr)
11292 rtx insn;
11294 /* We are only interested if dbxout_parms() failed to compute the offset. */
11295 if (value != 0)
11296 return 0;
11298 /* We can only cope with the case where the address is held in a register. */
11299 if (GET_CODE (addr) != REG)
11300 return 0;
11302 /* If we are using the frame pointer to point at the argument, then
11303 an offset of 0 is correct. */
11304 if (REGNO (addr) == (unsigned) HARD_FRAME_POINTER_REGNUM)
11305 return 0;
11307 /* If we are using the stack pointer to point at the
11308 argument, then an offset of 0 is correct. */
11309 if ((TARGET_THUMB || !frame_pointer_needed)
11310 && REGNO (addr) == SP_REGNUM)
11311 return 0;
11313 /* Oh dear. The argument is pointed to by a register rather
11314 than being held in a register, or being stored at a known
11315 offset from the frame pointer. Since GDB only understands
11316 those two kinds of argument we must translate the address
11317 held in the register into an offset from the frame pointer.
11318 We do this by searching through the insns for the function
11319 looking to see where this register gets its value. If the
11320 register is initialized from the frame pointer plus an offset
11321 then we are in luck and we can continue, otherwise we give up.
11323 This code is exercised by producing debugging information
11324 for a function with arguments like this:
11326 double func (double a, double b, int c, double d) {return d;}
11328 Without this code the stab for parameter 'd' will be set to
11329 an offset of 0 from the frame pointer, rather than 8. */
11331 /* The if() statement says:
11333 If the insn is a normal instruction
11334 and if the insn is setting the value in a register
11335 and if the register being set is the register holding the address of the argument
11336 and if the address is computing by an addition
11337 that involves adding to a register
11338 which is the frame pointer
11339 a constant integer
11341 then... */
11343 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
11345 if ( GET_CODE (insn) == INSN
11346 && GET_CODE (PATTERN (insn)) == SET
11347 && REGNO (XEXP (PATTERN (insn), 0)) == REGNO (addr)
11348 && GET_CODE (XEXP (PATTERN (insn), 1)) == PLUS
11349 && GET_CODE (XEXP (XEXP (PATTERN (insn), 1), 0)) == REG
11350 && REGNO (XEXP (XEXP (PATTERN (insn), 1), 0)) == (unsigned) HARD_FRAME_POINTER_REGNUM
11351 && GET_CODE (XEXP (XEXP (PATTERN (insn), 1), 1)) == CONST_INT
11354 value = INTVAL (XEXP (XEXP (PATTERN (insn), 1), 1));
11356 break;
11360 if (value == 0)
11362 debug_rtx (addr);
11363 warning ("unable to compute real location of stacked parameter");
11364 value = 8; /* XXX magic hack */
11367 return value;
11370 #define def_mbuiltin(MASK, NAME, TYPE, CODE) \
11371 do \
11373 if ((MASK) & insn_flags) \
11374 builtin_function ((NAME), (TYPE), (CODE), BUILT_IN_MD, NULL, NULL_TREE); \
11376 while (0)
11378 struct builtin_description
11380 const unsigned int mask;
11381 const enum insn_code icode;
11382 const char * const name;
11383 const enum arm_builtins code;
11384 const enum rtx_code comparison;
11385 const unsigned int flag;
11388 static const struct builtin_description bdesc_2arg[] =
11390 #define IWMMXT_BUILTIN(code, string, builtin) \
11391 { FL_IWMMXT, CODE_FOR_##code, "__builtin_arm_" string, \
11392 ARM_BUILTIN_##builtin, 0, 0 },
11394 IWMMXT_BUILTIN (addv8qi3, "waddb", WADDB)
11395 IWMMXT_BUILTIN (addv4hi3, "waddh", WADDH)
11396 IWMMXT_BUILTIN (addv2si3, "waddw", WADDW)
11397 IWMMXT_BUILTIN (subv8qi3, "wsubb", WSUBB)
11398 IWMMXT_BUILTIN (subv4hi3, "wsubh", WSUBH)
11399 IWMMXT_BUILTIN (subv2si3, "wsubw", WSUBW)
11400 IWMMXT_BUILTIN (ssaddv8qi3, "waddbss", WADDSSB)
11401 IWMMXT_BUILTIN (ssaddv4hi3, "waddhss", WADDSSH)
11402 IWMMXT_BUILTIN (ssaddv2si3, "waddwss", WADDSSW)
11403 IWMMXT_BUILTIN (sssubv8qi3, "wsubbss", WSUBSSB)
11404 IWMMXT_BUILTIN (sssubv4hi3, "wsubhss", WSUBSSH)
11405 IWMMXT_BUILTIN (sssubv2si3, "wsubwss", WSUBSSW)
11406 IWMMXT_BUILTIN (usaddv8qi3, "waddbus", WADDUSB)
11407 IWMMXT_BUILTIN (usaddv4hi3, "waddhus", WADDUSH)
11408 IWMMXT_BUILTIN (usaddv2si3, "waddwus", WADDUSW)
11409 IWMMXT_BUILTIN (ussubv8qi3, "wsubbus", WSUBUSB)
11410 IWMMXT_BUILTIN (ussubv4hi3, "wsubhus", WSUBUSH)
11411 IWMMXT_BUILTIN (ussubv2si3, "wsubwus", WSUBUSW)
11412 IWMMXT_BUILTIN (mulv4hi3, "wmulul", WMULUL)
11413 IWMMXT_BUILTIN (smulv4hi3_highpart, "wmulsm", WMULSM)
11414 IWMMXT_BUILTIN (umulv4hi3_highpart, "wmulum", WMULUM)
11415 IWMMXT_BUILTIN (eqv8qi3, "wcmpeqb", WCMPEQB)
11416 IWMMXT_BUILTIN (eqv4hi3, "wcmpeqh", WCMPEQH)
11417 IWMMXT_BUILTIN (eqv2si3, "wcmpeqw", WCMPEQW)
11418 IWMMXT_BUILTIN (gtuv8qi3, "wcmpgtub", WCMPGTUB)
11419 IWMMXT_BUILTIN (gtuv4hi3, "wcmpgtuh", WCMPGTUH)
11420 IWMMXT_BUILTIN (gtuv2si3, "wcmpgtuw", WCMPGTUW)
11421 IWMMXT_BUILTIN (gtv8qi3, "wcmpgtsb", WCMPGTSB)
11422 IWMMXT_BUILTIN (gtv4hi3, "wcmpgtsh", WCMPGTSH)
11423 IWMMXT_BUILTIN (gtv2si3, "wcmpgtsw", WCMPGTSW)
11424 IWMMXT_BUILTIN (umaxv8qi3, "wmaxub", WMAXUB)
11425 IWMMXT_BUILTIN (smaxv8qi3, "wmaxsb", WMAXSB)
11426 IWMMXT_BUILTIN (umaxv4hi3, "wmaxuh", WMAXUH)
11427 IWMMXT_BUILTIN (smaxv4hi3, "wmaxsh", WMAXSH)
11428 IWMMXT_BUILTIN (umaxv2si3, "wmaxuw", WMAXUW)
11429 IWMMXT_BUILTIN (smaxv2si3, "wmaxsw", WMAXSW)
11430 IWMMXT_BUILTIN (uminv8qi3, "wminub", WMINUB)
11431 IWMMXT_BUILTIN (sminv8qi3, "wminsb", WMINSB)
11432 IWMMXT_BUILTIN (uminv4hi3, "wminuh", WMINUH)
11433 IWMMXT_BUILTIN (sminv4hi3, "wminsh", WMINSH)
11434 IWMMXT_BUILTIN (uminv2si3, "wminuw", WMINUW)
11435 IWMMXT_BUILTIN (sminv2si3, "wminsw", WMINSW)
11436 IWMMXT_BUILTIN (iwmmxt_anddi3, "wand", WAND)
11437 IWMMXT_BUILTIN (iwmmxt_nanddi3, "wandn", WANDN)
11438 IWMMXT_BUILTIN (iwmmxt_iordi3, "wor", WOR)
11439 IWMMXT_BUILTIN (iwmmxt_xordi3, "wxor", WXOR)
11440 IWMMXT_BUILTIN (iwmmxt_uavgv8qi3, "wavg2b", WAVG2B)
11441 IWMMXT_BUILTIN (iwmmxt_uavgv4hi3, "wavg2h", WAVG2H)
11442 IWMMXT_BUILTIN (iwmmxt_uavgrndv8qi3, "wavg2br", WAVG2BR)
11443 IWMMXT_BUILTIN (iwmmxt_uavgrndv4hi3, "wavg2hr", WAVG2HR)
11444 IWMMXT_BUILTIN (iwmmxt_wunpckilb, "wunpckilb", WUNPCKILB)
11445 IWMMXT_BUILTIN (iwmmxt_wunpckilh, "wunpckilh", WUNPCKILH)
11446 IWMMXT_BUILTIN (iwmmxt_wunpckilw, "wunpckilw", WUNPCKILW)
11447 IWMMXT_BUILTIN (iwmmxt_wunpckihb, "wunpckihb", WUNPCKIHB)
11448 IWMMXT_BUILTIN (iwmmxt_wunpckihh, "wunpckihh", WUNPCKIHH)
11449 IWMMXT_BUILTIN (iwmmxt_wunpckihw, "wunpckihw", WUNPCKIHW)
11450 IWMMXT_BUILTIN (iwmmxt_wmadds, "wmadds", WMADDS)
11451 IWMMXT_BUILTIN (iwmmxt_wmaddu, "wmaddu", WMADDU)
11453 #define IWMMXT_BUILTIN2(code, builtin) \
11454 { FL_IWMMXT, CODE_FOR_##code, NULL, ARM_BUILTIN_##builtin, 0, 0 },
11456 IWMMXT_BUILTIN2 (iwmmxt_wpackhss, WPACKHSS)
11457 IWMMXT_BUILTIN2 (iwmmxt_wpackwss, WPACKWSS)
11458 IWMMXT_BUILTIN2 (iwmmxt_wpackdss, WPACKDSS)
11459 IWMMXT_BUILTIN2 (iwmmxt_wpackhus, WPACKHUS)
11460 IWMMXT_BUILTIN2 (iwmmxt_wpackwus, WPACKWUS)
11461 IWMMXT_BUILTIN2 (iwmmxt_wpackdus, WPACKDUS)
11462 IWMMXT_BUILTIN2 (ashlv4hi3_di, WSLLH)
11463 IWMMXT_BUILTIN2 (ashlv4hi3, WSLLHI)
11464 IWMMXT_BUILTIN2 (ashlv2si3_di, WSLLW)
11465 IWMMXT_BUILTIN2 (ashlv2si3, WSLLWI)
11466 IWMMXT_BUILTIN2 (ashldi3_di, WSLLD)
11467 IWMMXT_BUILTIN2 (ashldi3_iwmmxt, WSLLDI)
11468 IWMMXT_BUILTIN2 (lshrv4hi3_di, WSRLH)
11469 IWMMXT_BUILTIN2 (lshrv4hi3, WSRLHI)
11470 IWMMXT_BUILTIN2 (lshrv2si3_di, WSRLW)
11471 IWMMXT_BUILTIN2 (lshrv2si3, WSRLWI)
11472 IWMMXT_BUILTIN2 (lshrdi3_di, WSRLD)
11473 IWMMXT_BUILTIN2 (lshrdi3_iwmmxt, WSRLDI)
11474 IWMMXT_BUILTIN2 (ashrv4hi3_di, WSRAH)
11475 IWMMXT_BUILTIN2 (ashrv4hi3, WSRAHI)
11476 IWMMXT_BUILTIN2 (ashrv2si3_di, WSRAW)
11477 IWMMXT_BUILTIN2 (ashrv2si3, WSRAWI)
11478 IWMMXT_BUILTIN2 (ashrdi3_di, WSRAD)
11479 IWMMXT_BUILTIN2 (ashrdi3_iwmmxt, WSRADI)
11480 IWMMXT_BUILTIN2 (rorv4hi3_di, WRORH)
11481 IWMMXT_BUILTIN2 (rorv4hi3, WRORHI)
11482 IWMMXT_BUILTIN2 (rorv2si3_di, WRORW)
11483 IWMMXT_BUILTIN2 (rorv2si3, WRORWI)
11484 IWMMXT_BUILTIN2 (rordi3_di, WRORD)
11485 IWMMXT_BUILTIN2 (rordi3, WRORDI)
11486 IWMMXT_BUILTIN2 (iwmmxt_wmacuz, WMACUZ)
11487 IWMMXT_BUILTIN2 (iwmmxt_wmacsz, WMACSZ)
11490 static const struct builtin_description bdesc_1arg[] =
11492 IWMMXT_BUILTIN (iwmmxt_tmovmskb, "tmovmskb", TMOVMSKB)
11493 IWMMXT_BUILTIN (iwmmxt_tmovmskh, "tmovmskh", TMOVMSKH)
11494 IWMMXT_BUILTIN (iwmmxt_tmovmskw, "tmovmskw", TMOVMSKW)
11495 IWMMXT_BUILTIN (iwmmxt_waccb, "waccb", WACCB)
11496 IWMMXT_BUILTIN (iwmmxt_wacch, "wacch", WACCH)
11497 IWMMXT_BUILTIN (iwmmxt_waccw, "waccw", WACCW)
11498 IWMMXT_BUILTIN (iwmmxt_wunpckehub, "wunpckehub", WUNPCKEHUB)
11499 IWMMXT_BUILTIN (iwmmxt_wunpckehuh, "wunpckehuh", WUNPCKEHUH)
11500 IWMMXT_BUILTIN (iwmmxt_wunpckehuw, "wunpckehuw", WUNPCKEHUW)
11501 IWMMXT_BUILTIN (iwmmxt_wunpckehsb, "wunpckehsb", WUNPCKEHSB)
11502 IWMMXT_BUILTIN (iwmmxt_wunpckehsh, "wunpckehsh", WUNPCKEHSH)
11503 IWMMXT_BUILTIN (iwmmxt_wunpckehsw, "wunpckehsw", WUNPCKEHSW)
11504 IWMMXT_BUILTIN (iwmmxt_wunpckelub, "wunpckelub", WUNPCKELUB)
11505 IWMMXT_BUILTIN (iwmmxt_wunpckeluh, "wunpckeluh", WUNPCKELUH)
11506 IWMMXT_BUILTIN (iwmmxt_wunpckeluw, "wunpckeluw", WUNPCKELUW)
11507 IWMMXT_BUILTIN (iwmmxt_wunpckelsb, "wunpckelsb", WUNPCKELSB)
11508 IWMMXT_BUILTIN (iwmmxt_wunpckelsh, "wunpckelsh", WUNPCKELSH)
11509 IWMMXT_BUILTIN (iwmmxt_wunpckelsw, "wunpckelsw", WUNPCKELSW)
11512 /* Set up all the iWMMXt builtins. This is
11513 not called if TARGET_IWMMXT is zero. */
11515 static void
11516 arm_init_iwmmxt_builtins (void)
11518 const struct builtin_description * d;
11519 size_t i;
11520 tree endlink = void_list_node;
11522 tree int_ftype_int
11523 = build_function_type (integer_type_node,
11524 tree_cons (NULL_TREE, integer_type_node, endlink));
11525 tree v8qi_ftype_v8qi_v8qi_int
11526 = build_function_type (V8QI_type_node,
11527 tree_cons (NULL_TREE, V8QI_type_node,
11528 tree_cons (NULL_TREE, V8QI_type_node,
11529 tree_cons (NULL_TREE,
11530 integer_type_node,
11531 endlink))));
11532 tree v4hi_ftype_v4hi_int
11533 = build_function_type (V4HI_type_node,
11534 tree_cons (NULL_TREE, V4HI_type_node,
11535 tree_cons (NULL_TREE, integer_type_node,
11536 endlink)));
11537 tree v2si_ftype_v2si_int
11538 = build_function_type (V2SI_type_node,
11539 tree_cons (NULL_TREE, V2SI_type_node,
11540 tree_cons (NULL_TREE, integer_type_node,
11541 endlink)));
11542 tree v2si_ftype_di_di
11543 = build_function_type (V2SI_type_node,
11544 tree_cons (NULL_TREE, long_long_integer_type_node,
11545 tree_cons (NULL_TREE, long_long_integer_type_node,
11546 endlink)));
11547 tree di_ftype_di_int
11548 = build_function_type (long_long_integer_type_node,
11549 tree_cons (NULL_TREE, long_long_integer_type_node,
11550 tree_cons (NULL_TREE, integer_type_node,
11551 endlink)));
11552 tree di_ftype_di_int_int
11553 = build_function_type (long_long_integer_type_node,
11554 tree_cons (NULL_TREE, long_long_integer_type_node,
11555 tree_cons (NULL_TREE, integer_type_node,
11556 tree_cons (NULL_TREE,
11557 integer_type_node,
11558 endlink))));
11559 tree int_ftype_v8qi
11560 = build_function_type (integer_type_node,
11561 tree_cons (NULL_TREE, V8QI_type_node,
11562 endlink));
11563 tree int_ftype_v4hi
11564 = build_function_type (integer_type_node,
11565 tree_cons (NULL_TREE, V4HI_type_node,
11566 endlink));
11567 tree int_ftype_v2si
11568 = build_function_type (integer_type_node,
11569 tree_cons (NULL_TREE, V2SI_type_node,
11570 endlink));
11571 tree int_ftype_v8qi_int
11572 = build_function_type (integer_type_node,
11573 tree_cons (NULL_TREE, V8QI_type_node,
11574 tree_cons (NULL_TREE, integer_type_node,
11575 endlink)));
11576 tree int_ftype_v4hi_int
11577 = build_function_type (integer_type_node,
11578 tree_cons (NULL_TREE, V4HI_type_node,
11579 tree_cons (NULL_TREE, integer_type_node,
11580 endlink)));
11581 tree int_ftype_v2si_int
11582 = build_function_type (integer_type_node,
11583 tree_cons (NULL_TREE, V2SI_type_node,
11584 tree_cons (NULL_TREE, integer_type_node,
11585 endlink)));
11586 tree v8qi_ftype_v8qi_int_int
11587 = build_function_type (V8QI_type_node,
11588 tree_cons (NULL_TREE, V8QI_type_node,
11589 tree_cons (NULL_TREE, integer_type_node,
11590 tree_cons (NULL_TREE,
11591 integer_type_node,
11592 endlink))));
11593 tree v4hi_ftype_v4hi_int_int
11594 = build_function_type (V4HI_type_node,
11595 tree_cons (NULL_TREE, V4HI_type_node,
11596 tree_cons (NULL_TREE, integer_type_node,
11597 tree_cons (NULL_TREE,
11598 integer_type_node,
11599 endlink))));
11600 tree v2si_ftype_v2si_int_int
11601 = build_function_type (V2SI_type_node,
11602 tree_cons (NULL_TREE, V2SI_type_node,
11603 tree_cons (NULL_TREE, integer_type_node,
11604 tree_cons (NULL_TREE,
11605 integer_type_node,
11606 endlink))));
11607 /* Miscellaneous. */
11608 tree v8qi_ftype_v4hi_v4hi
11609 = build_function_type (V8QI_type_node,
11610 tree_cons (NULL_TREE, V4HI_type_node,
11611 tree_cons (NULL_TREE, V4HI_type_node,
11612 endlink)));
11613 tree v4hi_ftype_v2si_v2si
11614 = build_function_type (V4HI_type_node,
11615 tree_cons (NULL_TREE, V2SI_type_node,
11616 tree_cons (NULL_TREE, V2SI_type_node,
11617 endlink)));
11618 tree v2si_ftype_v4hi_v4hi
11619 = build_function_type (V2SI_type_node,
11620 tree_cons (NULL_TREE, V4HI_type_node,
11621 tree_cons (NULL_TREE, V4HI_type_node,
11622 endlink)));
11623 tree v2si_ftype_v8qi_v8qi
11624 = build_function_type (V2SI_type_node,
11625 tree_cons (NULL_TREE, V8QI_type_node,
11626 tree_cons (NULL_TREE, V8QI_type_node,
11627 endlink)));
11628 tree v4hi_ftype_v4hi_di
11629 = build_function_type (V4HI_type_node,
11630 tree_cons (NULL_TREE, V4HI_type_node,
11631 tree_cons (NULL_TREE,
11632 long_long_integer_type_node,
11633 endlink)));
11634 tree v2si_ftype_v2si_di
11635 = build_function_type (V2SI_type_node,
11636 tree_cons (NULL_TREE, V2SI_type_node,
11637 tree_cons (NULL_TREE,
11638 long_long_integer_type_node,
11639 endlink)));
11640 tree void_ftype_int_int
11641 = build_function_type (void_type_node,
11642 tree_cons (NULL_TREE, integer_type_node,
11643 tree_cons (NULL_TREE, integer_type_node,
11644 endlink)));
11645 tree di_ftype_void
11646 = build_function_type (long_long_unsigned_type_node, endlink);
11647 tree di_ftype_v8qi
11648 = build_function_type (long_long_integer_type_node,
11649 tree_cons (NULL_TREE, V8QI_type_node,
11650 endlink));
11651 tree di_ftype_v4hi
11652 = build_function_type (long_long_integer_type_node,
11653 tree_cons (NULL_TREE, V4HI_type_node,
11654 endlink));
11655 tree di_ftype_v2si
11656 = build_function_type (long_long_integer_type_node,
11657 tree_cons (NULL_TREE, V2SI_type_node,
11658 endlink));
11659 tree v2si_ftype_v4hi
11660 = build_function_type (V2SI_type_node,
11661 tree_cons (NULL_TREE, V4HI_type_node,
11662 endlink));
11663 tree v4hi_ftype_v8qi
11664 = build_function_type (V4HI_type_node,
11665 tree_cons (NULL_TREE, V8QI_type_node,
11666 endlink));
11668 tree di_ftype_di_v4hi_v4hi
11669 = build_function_type (long_long_unsigned_type_node,
11670 tree_cons (NULL_TREE,
11671 long_long_unsigned_type_node,
11672 tree_cons (NULL_TREE, V4HI_type_node,
11673 tree_cons (NULL_TREE,
11674 V4HI_type_node,
11675 endlink))));
11677 tree di_ftype_v4hi_v4hi
11678 = build_function_type (long_long_unsigned_type_node,
11679 tree_cons (NULL_TREE, V4HI_type_node,
11680 tree_cons (NULL_TREE, V4HI_type_node,
11681 endlink)));
11683 /* Normal vector binops. */
11684 tree v8qi_ftype_v8qi_v8qi
11685 = build_function_type (V8QI_type_node,
11686 tree_cons (NULL_TREE, V8QI_type_node,
11687 tree_cons (NULL_TREE, V8QI_type_node,
11688 endlink)));
11689 tree v4hi_ftype_v4hi_v4hi
11690 = build_function_type (V4HI_type_node,
11691 tree_cons (NULL_TREE, V4HI_type_node,
11692 tree_cons (NULL_TREE, V4HI_type_node,
11693 endlink)));
11694 tree v2si_ftype_v2si_v2si
11695 = build_function_type (V2SI_type_node,
11696 tree_cons (NULL_TREE, V2SI_type_node,
11697 tree_cons (NULL_TREE, V2SI_type_node,
11698 endlink)));
11699 tree di_ftype_di_di
11700 = build_function_type (long_long_unsigned_type_node,
11701 tree_cons (NULL_TREE, long_long_unsigned_type_node,
11702 tree_cons (NULL_TREE,
11703 long_long_unsigned_type_node,
11704 endlink)));
11706 /* Add all builtins that are more or less simple operations on two
11707 operands. */
11708 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
11710 /* Use one of the operands; the target can have a different mode for
11711 mask-generating compares. */
11712 enum machine_mode mode;
11713 tree type;
11715 if (d->name == 0)
11716 continue;
11718 mode = insn_data[d->icode].operand[1].mode;
11720 switch (mode)
11722 case V8QImode:
11723 type = v8qi_ftype_v8qi_v8qi;
11724 break;
11725 case V4HImode:
11726 type = v4hi_ftype_v4hi_v4hi;
11727 break;
11728 case V2SImode:
11729 type = v2si_ftype_v2si_v2si;
11730 break;
11731 case DImode:
11732 type = di_ftype_di_di;
11733 break;
11735 default:
11736 abort ();
11739 def_mbuiltin (d->mask, d->name, type, d->code);
11742 /* Add the remaining MMX insns with somewhat more complicated types. */
11743 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wzero", di_ftype_void, ARM_BUILTIN_WZERO);
11744 def_mbuiltin (FL_IWMMXT, "__builtin_arm_setwcx", void_ftype_int_int, ARM_BUILTIN_SETWCX);
11745 def_mbuiltin (FL_IWMMXT, "__builtin_arm_getwcx", int_ftype_int, ARM_BUILTIN_GETWCX);
11747 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSLLH);
11748 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllw", v2si_ftype_v2si_di, ARM_BUILTIN_WSLLW);
11749 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wslld", di_ftype_di_di, ARM_BUILTIN_WSLLD);
11750 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSLLHI);
11751 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllwi", v2si_ftype_v2si_int, ARM_BUILTIN_WSLLWI);
11752 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wslldi", di_ftype_di_int, ARM_BUILTIN_WSLLDI);
11754 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSRLH);
11755 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlw", v2si_ftype_v2si_di, ARM_BUILTIN_WSRLW);
11756 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrld", di_ftype_di_di, ARM_BUILTIN_WSRLD);
11757 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSRLHI);
11758 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlwi", v2si_ftype_v2si_int, ARM_BUILTIN_WSRLWI);
11759 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrldi", di_ftype_di_int, ARM_BUILTIN_WSRLDI);
11761 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrah", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSRAH);
11762 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsraw", v2si_ftype_v2si_di, ARM_BUILTIN_WSRAW);
11763 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrad", di_ftype_di_di, ARM_BUILTIN_WSRAD);
11764 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrahi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSRAHI);
11765 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrawi", v2si_ftype_v2si_int, ARM_BUILTIN_WSRAWI);
11766 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsradi", di_ftype_di_int, ARM_BUILTIN_WSRADI);
11768 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WRORH);
11769 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorw", v2si_ftype_v2si_di, ARM_BUILTIN_WRORW);
11770 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrord", di_ftype_di_di, ARM_BUILTIN_WRORD);
11771 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WRORHI);
11772 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorwi", v2si_ftype_v2si_int, ARM_BUILTIN_WRORWI);
11773 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrordi", di_ftype_di_int, ARM_BUILTIN_WRORDI);
11775 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wshufh", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSHUFH);
11777 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadb", v2si_ftype_v8qi_v8qi, ARM_BUILTIN_WSADB);
11778 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadh", v2si_ftype_v4hi_v4hi, ARM_BUILTIN_WSADH);
11779 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadbz", v2si_ftype_v8qi_v8qi, ARM_BUILTIN_WSADBZ);
11780 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadhz", v2si_ftype_v4hi_v4hi, ARM_BUILTIN_WSADHZ);
11782 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsb", int_ftype_v8qi_int, ARM_BUILTIN_TEXTRMSB);
11783 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsh", int_ftype_v4hi_int, ARM_BUILTIN_TEXTRMSH);
11784 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsw", int_ftype_v2si_int, ARM_BUILTIN_TEXTRMSW);
11785 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmub", int_ftype_v8qi_int, ARM_BUILTIN_TEXTRMUB);
11786 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmuh", int_ftype_v4hi_int, ARM_BUILTIN_TEXTRMUH);
11787 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmuw", int_ftype_v2si_int, ARM_BUILTIN_TEXTRMUW);
11788 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrb", v8qi_ftype_v8qi_int_int, ARM_BUILTIN_TINSRB);
11789 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrh", v4hi_ftype_v4hi_int_int, ARM_BUILTIN_TINSRH);
11790 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrw", v2si_ftype_v2si_int_int, ARM_BUILTIN_TINSRW);
11792 def_mbuiltin (FL_IWMMXT, "__builtin_arm_waccb", di_ftype_v8qi, ARM_BUILTIN_WACCB);
11793 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wacch", di_ftype_v4hi, ARM_BUILTIN_WACCH);
11794 def_mbuiltin (FL_IWMMXT, "__builtin_arm_waccw", di_ftype_v2si, ARM_BUILTIN_WACCW);
11796 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskb", int_ftype_v8qi, ARM_BUILTIN_TMOVMSKB);
11797 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskh", int_ftype_v4hi, ARM_BUILTIN_TMOVMSKH);
11798 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskw", int_ftype_v2si, ARM_BUILTIN_TMOVMSKW);
11800 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackhss", v8qi_ftype_v4hi_v4hi, ARM_BUILTIN_WPACKHSS);
11801 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackhus", v8qi_ftype_v4hi_v4hi, ARM_BUILTIN_WPACKHUS);
11802 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackwus", v4hi_ftype_v2si_v2si, ARM_BUILTIN_WPACKWUS);
11803 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackwss", v4hi_ftype_v2si_v2si, ARM_BUILTIN_WPACKWSS);
11804 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackdus", v2si_ftype_di_di, ARM_BUILTIN_WPACKDUS);
11805 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackdss", v2si_ftype_di_di, ARM_BUILTIN_WPACKDSS);
11807 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehub", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKEHUB);
11808 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehuh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKEHUH);
11809 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehuw", di_ftype_v2si, ARM_BUILTIN_WUNPCKEHUW);
11810 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsb", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKEHSB);
11811 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKEHSH);
11812 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsw", di_ftype_v2si, ARM_BUILTIN_WUNPCKEHSW);
11813 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelub", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKELUB);
11814 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckeluh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKELUH);
11815 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckeluw", di_ftype_v2si, ARM_BUILTIN_WUNPCKELUW);
11816 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsb", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKELSB);
11817 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKELSH);
11818 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsw", di_ftype_v2si, ARM_BUILTIN_WUNPCKELSW);
11820 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacs", di_ftype_di_v4hi_v4hi, ARM_BUILTIN_WMACS);
11821 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacsz", di_ftype_v4hi_v4hi, ARM_BUILTIN_WMACSZ);
11822 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacu", di_ftype_di_v4hi_v4hi, ARM_BUILTIN_WMACU);
11823 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacuz", di_ftype_v4hi_v4hi, ARM_BUILTIN_WMACUZ);
11825 def_mbuiltin (FL_IWMMXT, "__builtin_arm_walign", v8qi_ftype_v8qi_v8qi_int, ARM_BUILTIN_WALIGN);
11826 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmia", di_ftype_di_int_int, ARM_BUILTIN_TMIA);
11827 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiaph", di_ftype_di_int_int, ARM_BUILTIN_TMIAPH);
11828 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiabb", di_ftype_di_int_int, ARM_BUILTIN_TMIABB);
11829 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiabt", di_ftype_di_int_int, ARM_BUILTIN_TMIABT);
11830 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiatb", di_ftype_di_int_int, ARM_BUILTIN_TMIATB);
11831 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiatt", di_ftype_di_int_int, ARM_BUILTIN_TMIATT);
11834 static void
11835 arm_init_builtins (void)
11837 if (TARGET_REALLY_IWMMXT)
11838 arm_init_iwmmxt_builtins ();
11841 /* Errors in the source file can cause expand_expr to return const0_rtx
11842 where we expect a vector. To avoid crashing, use one of the vector
11843 clear instructions. */
11845 static rtx
11846 safe_vector_operand (rtx x, enum machine_mode mode)
11848 if (x != const0_rtx)
11849 return x;
11850 x = gen_reg_rtx (mode);
11852 emit_insn (gen_iwmmxt_clrdi (mode == DImode ? x
11853 : gen_rtx_SUBREG (DImode, x, 0)));
11854 return x;
11857 /* Subroutine of arm_expand_builtin to take care of binop insns. */
11859 static rtx
11860 arm_expand_binop_builtin (enum insn_code icode,
11861 tree arglist, rtx target)
11863 rtx pat;
11864 tree arg0 = TREE_VALUE (arglist);
11865 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
11866 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
11867 rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
11868 enum machine_mode tmode = insn_data[icode].operand[0].mode;
11869 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
11870 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
11872 if (VECTOR_MODE_P (mode0))
11873 op0 = safe_vector_operand (op0, mode0);
11874 if (VECTOR_MODE_P (mode1))
11875 op1 = safe_vector_operand (op1, mode1);
11877 if (! target
11878 || GET_MODE (target) != tmode
11879 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
11880 target = gen_reg_rtx (tmode);
11882 /* In case the insn wants input operands in modes different from
11883 the result, abort. */
11884 if (GET_MODE (op0) != mode0 || GET_MODE (op1) != mode1)
11885 abort ();
11887 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
11888 op0 = copy_to_mode_reg (mode0, op0);
11889 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
11890 op1 = copy_to_mode_reg (mode1, op1);
11892 pat = GEN_FCN (icode) (target, op0, op1);
11893 if (! pat)
11894 return 0;
11895 emit_insn (pat);
11896 return target;
11899 /* Subroutine of arm_expand_builtin to take care of unop insns. */
11901 static rtx
11902 arm_expand_unop_builtin (enum insn_code icode,
11903 tree arglist, rtx target, int do_load)
11905 rtx pat;
11906 tree arg0 = TREE_VALUE (arglist);
11907 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
11908 enum machine_mode tmode = insn_data[icode].operand[0].mode;
11909 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
11911 if (! target
11912 || GET_MODE (target) != tmode
11913 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
11914 target = gen_reg_rtx (tmode);
11915 if (do_load)
11916 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
11917 else
11919 if (VECTOR_MODE_P (mode0))
11920 op0 = safe_vector_operand (op0, mode0);
11922 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
11923 op0 = copy_to_mode_reg (mode0, op0);
11926 pat = GEN_FCN (icode) (target, op0);
11927 if (! pat)
11928 return 0;
11929 emit_insn (pat);
11930 return target;
11933 /* Expand an expression EXP that calls a built-in function,
11934 with result going to TARGET if that's convenient
11935 (and in mode MODE if that's convenient).
11936 SUBTARGET may be used as the target for computing one of EXP's operands.
11937 IGNORE is nonzero if the value is to be ignored. */
11939 static rtx
11940 arm_expand_builtin (tree exp,
11941 rtx target,
11942 rtx subtarget ATTRIBUTE_UNUSED,
11943 enum machine_mode mode ATTRIBUTE_UNUSED,
11944 int ignore ATTRIBUTE_UNUSED)
11946 const struct builtin_description * d;
11947 enum insn_code icode;
11948 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
11949 tree arglist = TREE_OPERAND (exp, 1);
11950 tree arg0;
11951 tree arg1;
11952 tree arg2;
11953 rtx op0;
11954 rtx op1;
11955 rtx op2;
11956 rtx pat;
11957 int fcode = DECL_FUNCTION_CODE (fndecl);
11958 size_t i;
11959 enum machine_mode tmode;
11960 enum machine_mode mode0;
11961 enum machine_mode mode1;
11962 enum machine_mode mode2;
11964 switch (fcode)
11966 case ARM_BUILTIN_TEXTRMSB:
11967 case ARM_BUILTIN_TEXTRMUB:
11968 case ARM_BUILTIN_TEXTRMSH:
11969 case ARM_BUILTIN_TEXTRMUH:
11970 case ARM_BUILTIN_TEXTRMSW:
11971 case ARM_BUILTIN_TEXTRMUW:
11972 icode = (fcode == ARM_BUILTIN_TEXTRMSB ? CODE_FOR_iwmmxt_textrmsb
11973 : fcode == ARM_BUILTIN_TEXTRMUB ? CODE_FOR_iwmmxt_textrmub
11974 : fcode == ARM_BUILTIN_TEXTRMSH ? CODE_FOR_iwmmxt_textrmsh
11975 : fcode == ARM_BUILTIN_TEXTRMUH ? CODE_FOR_iwmmxt_textrmuh
11976 : CODE_FOR_iwmmxt_textrmw);
11978 arg0 = TREE_VALUE (arglist);
11979 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
11980 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
11981 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
11982 tmode = insn_data[icode].operand[0].mode;
11983 mode0 = insn_data[icode].operand[1].mode;
11984 mode1 = insn_data[icode].operand[2].mode;
11986 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
11987 op0 = copy_to_mode_reg (mode0, op0);
11988 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
11990 /* @@@ better error message */
11991 error ("selector must be an immediate");
11992 return gen_reg_rtx (tmode);
11994 if (target == 0
11995 || GET_MODE (target) != tmode
11996 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
11997 target = gen_reg_rtx (tmode);
11998 pat = GEN_FCN (icode) (target, op0, op1);
11999 if (! pat)
12000 return 0;
12001 emit_insn (pat);
12002 return target;
12004 case ARM_BUILTIN_TINSRB:
12005 case ARM_BUILTIN_TINSRH:
12006 case ARM_BUILTIN_TINSRW:
12007 icode = (fcode == ARM_BUILTIN_TINSRB ? CODE_FOR_iwmmxt_tinsrb
12008 : fcode == ARM_BUILTIN_TINSRH ? CODE_FOR_iwmmxt_tinsrh
12009 : CODE_FOR_iwmmxt_tinsrw);
12010 arg0 = TREE_VALUE (arglist);
12011 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12012 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
12013 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12014 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12015 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
12016 tmode = insn_data[icode].operand[0].mode;
12017 mode0 = insn_data[icode].operand[1].mode;
12018 mode1 = insn_data[icode].operand[2].mode;
12019 mode2 = insn_data[icode].operand[3].mode;
12021 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12022 op0 = copy_to_mode_reg (mode0, op0);
12023 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12024 op1 = copy_to_mode_reg (mode1, op1);
12025 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
12027 /* @@@ better error message */
12028 error ("selector must be an immediate");
12029 return const0_rtx;
12031 if (target == 0
12032 || GET_MODE (target) != tmode
12033 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12034 target = gen_reg_rtx (tmode);
12035 pat = GEN_FCN (icode) (target, op0, op1, op2);
12036 if (! pat)
12037 return 0;
12038 emit_insn (pat);
12039 return target;
12041 case ARM_BUILTIN_SETWCX:
12042 arg0 = TREE_VALUE (arglist);
12043 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12044 op0 = force_reg (SImode, expand_expr (arg0, NULL_RTX, VOIDmode, 0));
12045 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12046 emit_insn (gen_iwmmxt_tmcr (op1, op0));
12047 return 0;
12049 case ARM_BUILTIN_GETWCX:
12050 arg0 = TREE_VALUE (arglist);
12051 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12052 target = gen_reg_rtx (SImode);
12053 emit_insn (gen_iwmmxt_tmrc (target, op0));
12054 return target;
12056 case ARM_BUILTIN_WSHUFH:
12057 icode = CODE_FOR_iwmmxt_wshufh;
12058 arg0 = TREE_VALUE (arglist);
12059 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12060 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12061 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12062 tmode = insn_data[icode].operand[0].mode;
12063 mode1 = insn_data[icode].operand[1].mode;
12064 mode2 = insn_data[icode].operand[2].mode;
12066 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
12067 op0 = copy_to_mode_reg (mode1, op0);
12068 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
12070 /* @@@ better error message */
12071 error ("mask must be an immediate");
12072 return const0_rtx;
12074 if (target == 0
12075 || GET_MODE (target) != tmode
12076 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12077 target = gen_reg_rtx (tmode);
12078 pat = GEN_FCN (icode) (target, op0, op1);
12079 if (! pat)
12080 return 0;
12081 emit_insn (pat);
12082 return target;
12084 case ARM_BUILTIN_WSADB:
12085 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadb, arglist, target);
12086 case ARM_BUILTIN_WSADH:
12087 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadh, arglist, target);
12088 case ARM_BUILTIN_WSADBZ:
12089 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadbz, arglist, target);
12090 case ARM_BUILTIN_WSADHZ:
12091 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadhz, arglist, target);
12093 /* Several three-argument builtins. */
12094 case ARM_BUILTIN_WMACS:
12095 case ARM_BUILTIN_WMACU:
12096 case ARM_BUILTIN_WALIGN:
12097 case ARM_BUILTIN_TMIA:
12098 case ARM_BUILTIN_TMIAPH:
12099 case ARM_BUILTIN_TMIATT:
12100 case ARM_BUILTIN_TMIATB:
12101 case ARM_BUILTIN_TMIABT:
12102 case ARM_BUILTIN_TMIABB:
12103 icode = (fcode == ARM_BUILTIN_WMACS ? CODE_FOR_iwmmxt_wmacs
12104 : fcode == ARM_BUILTIN_WMACU ? CODE_FOR_iwmmxt_wmacu
12105 : fcode == ARM_BUILTIN_TMIA ? CODE_FOR_iwmmxt_tmia
12106 : fcode == ARM_BUILTIN_TMIAPH ? CODE_FOR_iwmmxt_tmiaph
12107 : fcode == ARM_BUILTIN_TMIABB ? CODE_FOR_iwmmxt_tmiabb
12108 : fcode == ARM_BUILTIN_TMIABT ? CODE_FOR_iwmmxt_tmiabt
12109 : fcode == ARM_BUILTIN_TMIATB ? CODE_FOR_iwmmxt_tmiatb
12110 : fcode == ARM_BUILTIN_TMIATT ? CODE_FOR_iwmmxt_tmiatt
12111 : CODE_FOR_iwmmxt_walign);
12112 arg0 = TREE_VALUE (arglist);
12113 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12114 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
12115 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12116 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12117 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
12118 tmode = insn_data[icode].operand[0].mode;
12119 mode0 = insn_data[icode].operand[1].mode;
12120 mode1 = insn_data[icode].operand[2].mode;
12121 mode2 = insn_data[icode].operand[3].mode;
12123 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12124 op0 = copy_to_mode_reg (mode0, op0);
12125 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12126 op1 = copy_to_mode_reg (mode1, op1);
12127 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
12128 op2 = copy_to_mode_reg (mode2, op2);
12129 if (target == 0
12130 || GET_MODE (target) != tmode
12131 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12132 target = gen_reg_rtx (tmode);
12133 pat = GEN_FCN (icode) (target, op0, op1, op2);
12134 if (! pat)
12135 return 0;
12136 emit_insn (pat);
12137 return target;
12139 case ARM_BUILTIN_WZERO:
12140 target = gen_reg_rtx (DImode);
12141 emit_insn (gen_iwmmxt_clrdi (target));
12142 return target;
12144 default:
12145 break;
12148 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
12149 if (d->code == (const enum arm_builtins) fcode)
12150 return arm_expand_binop_builtin (d->icode, arglist, target);
12152 for (i = 0, d = bdesc_1arg; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
12153 if (d->code == (const enum arm_builtins) fcode)
12154 return arm_expand_unop_builtin (d->icode, arglist, target, 0);
12156 /* @@@ Should really do something sensible here. */
12157 return NULL_RTX;
12160 /* Recursively search through all of the blocks in a function
12161 checking to see if any of the variables created in that
12162 function match the RTX called 'orig'. If they do then
12163 replace them with the RTX called 'new'. */
12164 static void
12165 replace_symbols_in_block (tree block, rtx orig, rtx new)
12167 for (; block; block = BLOCK_CHAIN (block))
12169 tree sym;
12171 if (!TREE_USED (block))
12172 continue;
12174 for (sym = BLOCK_VARS (block); sym; sym = TREE_CHAIN (sym))
12176 if ( (DECL_NAME (sym) == 0 && TREE_CODE (sym) != TYPE_DECL)
12177 || DECL_IGNORED_P (sym)
12178 || TREE_CODE (sym) != VAR_DECL
12179 || DECL_EXTERNAL (sym)
12180 || !rtx_equal_p (DECL_RTL (sym), orig)
12182 continue;
12184 SET_DECL_RTL (sym, new);
12187 replace_symbols_in_block (BLOCK_SUBBLOCKS (block), orig, new);
12191 /* Return the number (counting from 0) of
12192 the least significant set bit in MASK. */
12194 inline static int
12195 number_of_first_bit_set (int mask)
12197 int bit;
12199 for (bit = 0;
12200 (mask & (1 << bit)) == 0;
12201 ++bit)
12202 continue;
12204 return bit;
12207 /* Generate code to return from a thumb function.
12208 If 'reg_containing_return_addr' is -1, then the return address is
12209 actually on the stack, at the stack pointer. */
12210 static void
12211 thumb_exit (FILE *f, int reg_containing_return_addr, rtx eh_ofs)
12213 unsigned regs_available_for_popping;
12214 unsigned regs_to_pop;
12215 int pops_needed;
12216 unsigned available;
12217 unsigned required;
12218 int mode;
12219 int size;
12220 int restore_a4 = FALSE;
12222 /* Compute the registers we need to pop. */
12223 regs_to_pop = 0;
12224 pops_needed = 0;
12226 /* There is an assumption here, that if eh_ofs is not NULL, the
12227 normal return address will have been pushed. */
12228 if (reg_containing_return_addr == -1 || eh_ofs)
12230 /* When we are generating a return for __builtin_eh_return,
12231 reg_containing_return_addr must specify the return regno. */
12232 if (eh_ofs && reg_containing_return_addr == -1)
12233 abort ();
12235 regs_to_pop |= 1 << LR_REGNUM;
12236 ++pops_needed;
12239 if (TARGET_BACKTRACE)
12241 /* Restore the (ARM) frame pointer and stack pointer. */
12242 regs_to_pop |= (1 << ARM_HARD_FRAME_POINTER_REGNUM) | (1 << SP_REGNUM);
12243 pops_needed += 2;
12246 /* If there is nothing to pop then just emit the BX instruction and
12247 return. */
12248 if (pops_needed == 0)
12250 if (eh_ofs)
12251 asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, REGNO (eh_ofs));
12253 asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
12254 return;
12256 /* Otherwise if we are not supporting interworking and we have not created
12257 a backtrace structure and the function was not entered in ARM mode then
12258 just pop the return address straight into the PC. */
12259 else if (!TARGET_INTERWORK
12260 && !TARGET_BACKTRACE
12261 && !is_called_in_ARM_mode (current_function_decl))
12263 if (eh_ofs)
12265 asm_fprintf (f, "\tadd\t%r, #4\n", SP_REGNUM);
12266 asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, REGNO (eh_ofs));
12267 asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
12269 else
12270 asm_fprintf (f, "\tpop\t{%r}\n", PC_REGNUM);
12272 return;
12275 /* Find out how many of the (return) argument registers we can corrupt. */
12276 regs_available_for_popping = 0;
12278 /* If returning via __builtin_eh_return, the bottom three registers
12279 all contain information needed for the return. */
12280 if (eh_ofs)
12281 size = 12;
12282 else
12284 #ifdef RTX_CODE
12285 /* If we can deduce the registers used from the function's
12286 return value. This is more reliable that examining
12287 regs_ever_live[] because that will be set if the register is
12288 ever used in the function, not just if the register is used
12289 to hold a return value. */
12291 if (current_function_return_rtx != 0)
12292 mode = GET_MODE (current_function_return_rtx);
12293 else
12294 #endif
12295 mode = DECL_MODE (DECL_RESULT (current_function_decl));
12297 size = GET_MODE_SIZE (mode);
12299 if (size == 0)
12301 /* In a void function we can use any argument register.
12302 In a function that returns a structure on the stack
12303 we can use the second and third argument registers. */
12304 if (mode == VOIDmode)
12305 regs_available_for_popping =
12306 (1 << ARG_REGISTER (1))
12307 | (1 << ARG_REGISTER (2))
12308 | (1 << ARG_REGISTER (3));
12309 else
12310 regs_available_for_popping =
12311 (1 << ARG_REGISTER (2))
12312 | (1 << ARG_REGISTER (3));
12314 else if (size <= 4)
12315 regs_available_for_popping =
12316 (1 << ARG_REGISTER (2))
12317 | (1 << ARG_REGISTER (3));
12318 else if (size <= 8)
12319 regs_available_for_popping =
12320 (1 << ARG_REGISTER (3));
12323 /* Match registers to be popped with registers into which we pop them. */
12324 for (available = regs_available_for_popping,
12325 required = regs_to_pop;
12326 required != 0 && available != 0;
12327 available &= ~(available & - available),
12328 required &= ~(required & - required))
12329 -- pops_needed;
12331 /* If we have any popping registers left over, remove them. */
12332 if (available > 0)
12333 regs_available_for_popping &= ~available;
12335 /* Otherwise if we need another popping register we can use
12336 the fourth argument register. */
12337 else if (pops_needed)
12339 /* If we have not found any free argument registers and
12340 reg a4 contains the return address, we must move it. */
12341 if (regs_available_for_popping == 0
12342 && reg_containing_return_addr == LAST_ARG_REGNUM)
12344 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM, LAST_ARG_REGNUM);
12345 reg_containing_return_addr = LR_REGNUM;
12347 else if (size > 12)
12349 /* Register a4 is being used to hold part of the return value,
12350 but we have dire need of a free, low register. */
12351 restore_a4 = TRUE;
12353 asm_fprintf (f, "\tmov\t%r, %r\n",IP_REGNUM, LAST_ARG_REGNUM);
12356 if (reg_containing_return_addr != LAST_ARG_REGNUM)
12358 /* The fourth argument register is available. */
12359 regs_available_for_popping |= 1 << LAST_ARG_REGNUM;
12361 --pops_needed;
12365 /* Pop as many registers as we can. */
12366 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
12367 regs_available_for_popping);
12369 /* Process the registers we popped. */
12370 if (reg_containing_return_addr == -1)
12372 /* The return address was popped into the lowest numbered register. */
12373 regs_to_pop &= ~(1 << LR_REGNUM);
12375 reg_containing_return_addr =
12376 number_of_first_bit_set (regs_available_for_popping);
12378 /* Remove this register for the mask of available registers, so that
12379 the return address will not be corrupted by further pops. */
12380 regs_available_for_popping &= ~(1 << reg_containing_return_addr);
12383 /* If we popped other registers then handle them here. */
12384 if (regs_available_for_popping)
12386 int frame_pointer;
12388 /* Work out which register currently contains the frame pointer. */
12389 frame_pointer = number_of_first_bit_set (regs_available_for_popping);
12391 /* Move it into the correct place. */
12392 asm_fprintf (f, "\tmov\t%r, %r\n",
12393 ARM_HARD_FRAME_POINTER_REGNUM, frame_pointer);
12395 /* (Temporarily) remove it from the mask of popped registers. */
12396 regs_available_for_popping &= ~(1 << frame_pointer);
12397 regs_to_pop &= ~(1 << ARM_HARD_FRAME_POINTER_REGNUM);
12399 if (regs_available_for_popping)
12401 int stack_pointer;
12403 /* We popped the stack pointer as well,
12404 find the register that contains it. */
12405 stack_pointer = number_of_first_bit_set (regs_available_for_popping);
12407 /* Move it into the stack register. */
12408 asm_fprintf (f, "\tmov\t%r, %r\n", SP_REGNUM, stack_pointer);
12410 /* At this point we have popped all necessary registers, so
12411 do not worry about restoring regs_available_for_popping
12412 to its correct value:
12414 assert (pops_needed == 0)
12415 assert (regs_available_for_popping == (1 << frame_pointer))
12416 assert (regs_to_pop == (1 << STACK_POINTER)) */
12418 else
12420 /* Since we have just move the popped value into the frame
12421 pointer, the popping register is available for reuse, and
12422 we know that we still have the stack pointer left to pop. */
12423 regs_available_for_popping |= (1 << frame_pointer);
12427 /* If we still have registers left on the stack, but we no longer have
12428 any registers into which we can pop them, then we must move the return
12429 address into the link register and make available the register that
12430 contained it. */
12431 if (regs_available_for_popping == 0 && pops_needed > 0)
12433 regs_available_for_popping |= 1 << reg_containing_return_addr;
12435 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM,
12436 reg_containing_return_addr);
12438 reg_containing_return_addr = LR_REGNUM;
12441 /* If we have registers left on the stack then pop some more.
12442 We know that at most we will want to pop FP and SP. */
12443 if (pops_needed > 0)
12445 int popped_into;
12446 int move_to;
12448 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
12449 regs_available_for_popping);
12451 /* We have popped either FP or SP.
12452 Move whichever one it is into the correct register. */
12453 popped_into = number_of_first_bit_set (regs_available_for_popping);
12454 move_to = number_of_first_bit_set (regs_to_pop);
12456 asm_fprintf (f, "\tmov\t%r, %r\n", move_to, popped_into);
12458 regs_to_pop &= ~(1 << move_to);
12460 --pops_needed;
12463 /* If we still have not popped everything then we must have only
12464 had one register available to us and we are now popping the SP. */
12465 if (pops_needed > 0)
12467 int popped_into;
12469 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
12470 regs_available_for_popping);
12472 popped_into = number_of_first_bit_set (regs_available_for_popping);
12474 asm_fprintf (f, "\tmov\t%r, %r\n", SP_REGNUM, popped_into);
12476 assert (regs_to_pop == (1 << STACK_POINTER))
12477 assert (pops_needed == 1)
12481 /* If necessary restore the a4 register. */
12482 if (restore_a4)
12484 if (reg_containing_return_addr != LR_REGNUM)
12486 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM, LAST_ARG_REGNUM);
12487 reg_containing_return_addr = LR_REGNUM;
12490 asm_fprintf (f, "\tmov\t%r, %r\n", LAST_ARG_REGNUM, IP_REGNUM);
12493 if (eh_ofs)
12494 asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, REGNO (eh_ofs));
12496 /* Return to caller. */
12497 asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
12500 /* Emit code to push or pop registers to or from the stack. F is the
12501 assembly file. MASK is the registers to push or pop. PUSH is
12502 nonzero if we should push, and zero if we should pop. For debugging
12503 output, if pushing, adjust CFA_OFFSET by the amount of space added
12504 to the stack. REAL_REGS should have the same number of bits set as
12505 MASK, and will be used instead (in the same order) to describe which
12506 registers were saved - this is used to mark the save slots when we
12507 push high registers after moving them to low registers. */
12508 static void
12509 thumb_pushpop (FILE *f, int mask, int push, int *cfa_offset, int real_regs)
12511 int regno;
12512 int lo_mask = mask & 0xFF;
12513 int pushed_words = 0;
12515 if (lo_mask == 0 && !push && (mask & (1 << 15)))
12517 /* Special case. Do not generate a POP PC statement here, do it in
12518 thumb_exit() */
12519 thumb_exit (f, -1, NULL_RTX);
12520 return;
12523 fprintf (f, "\t%s\t{", push ? "push" : "pop");
12525 /* Look at the low registers first. */
12526 for (regno = 0; regno <= LAST_LO_REGNUM; regno++, lo_mask >>= 1)
12528 if (lo_mask & 1)
12530 asm_fprintf (f, "%r", regno);
12532 if ((lo_mask & ~1) != 0)
12533 fprintf (f, ", ");
12535 pushed_words++;
12539 if (push && (mask & (1 << LR_REGNUM)))
12541 /* Catch pushing the LR. */
12542 if (mask & 0xFF)
12543 fprintf (f, ", ");
12545 asm_fprintf (f, "%r", LR_REGNUM);
12547 pushed_words++;
12549 else if (!push && (mask & (1 << PC_REGNUM)))
12551 /* Catch popping the PC. */
12552 if (TARGET_INTERWORK || TARGET_BACKTRACE)
12554 /* The PC is never poped directly, instead
12555 it is popped into r3 and then BX is used. */
12556 fprintf (f, "}\n");
12558 thumb_exit (f, -1, NULL_RTX);
12560 return;
12562 else
12564 if (mask & 0xFF)
12565 fprintf (f, ", ");
12567 asm_fprintf (f, "%r", PC_REGNUM);
12571 fprintf (f, "}\n");
12573 if (push && pushed_words && dwarf2out_do_frame ())
12575 char *l = dwarf2out_cfi_label ();
12576 int pushed_mask = real_regs;
12578 *cfa_offset += pushed_words * 4;
12579 dwarf2out_def_cfa (l, SP_REGNUM, *cfa_offset);
12581 pushed_words = 0;
12582 pushed_mask = real_regs;
12583 for (regno = 0; regno <= 14; regno++, pushed_mask >>= 1)
12585 if (pushed_mask & 1)
12586 dwarf2out_reg_save (l, regno, 4 * pushed_words++ - *cfa_offset);
12591 void
12592 thumb_final_prescan_insn (rtx insn)
12594 if (flag_print_asm_name)
12595 asm_fprintf (asm_out_file, "%@ 0x%04x\n",
12596 INSN_ADDRESSES (INSN_UID (insn)));
12600 thumb_shiftable_const (unsigned HOST_WIDE_INT val)
12602 unsigned HOST_WIDE_INT mask = 0xff;
12603 int i;
12605 if (val == 0) /* XXX */
12606 return 0;
12608 for (i = 0; i < 25; i++)
12609 if ((val & (mask << i)) == val)
12610 return 1;
12612 return 0;
12615 /* Returns nonzero if the current function contains,
12616 or might contain a far jump. */
12618 thumb_far_jump_used_p (int in_prologue)
12620 rtx insn;
12622 /* This test is only important for leaf functions. */
12623 /* assert (!leaf_function_p ()); */
12625 /* If we have already decided that far jumps may be used,
12626 do not bother checking again, and always return true even if
12627 it turns out that they are not being used. Once we have made
12628 the decision that far jumps are present (and that hence the link
12629 register will be pushed onto the stack) we cannot go back on it. */
12630 if (cfun->machine->far_jump_used)
12631 return 1;
12633 /* If this function is not being called from the prologue/epilogue
12634 generation code then it must be being called from the
12635 INITIAL_ELIMINATION_OFFSET macro. */
12636 if (!in_prologue)
12638 /* In this case we know that we are being asked about the elimination
12639 of the arg pointer register. If that register is not being used,
12640 then there are no arguments on the stack, and we do not have to
12641 worry that a far jump might force the prologue to push the link
12642 register, changing the stack offsets. In this case we can just
12643 return false, since the presence of far jumps in the function will
12644 not affect stack offsets.
12646 If the arg pointer is live (or if it was live, but has now been
12647 eliminated and so set to dead) then we do have to test to see if
12648 the function might contain a far jump. This test can lead to some
12649 false negatives, since before reload is completed, then length of
12650 branch instructions is not known, so gcc defaults to returning their
12651 longest length, which in turn sets the far jump attribute to true.
12653 A false negative will not result in bad code being generated, but it
12654 will result in a needless push and pop of the link register. We
12655 hope that this does not occur too often. */
12656 if (regs_ever_live [ARG_POINTER_REGNUM])
12657 cfun->machine->arg_pointer_live = 1;
12658 else if (!cfun->machine->arg_pointer_live)
12659 return 0;
12662 /* Check to see if the function contains a branch
12663 insn with the far jump attribute set. */
12664 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
12666 if (GET_CODE (insn) == JUMP_INSN
12667 /* Ignore tablejump patterns. */
12668 && GET_CODE (PATTERN (insn)) != ADDR_VEC
12669 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC
12670 && get_attr_far_jump (insn) == FAR_JUMP_YES
12673 /* Record the fact that we have decided that
12674 the function does use far jumps. */
12675 cfun->machine->far_jump_used = 1;
12676 return 1;
12680 return 0;
12683 /* Return nonzero if FUNC must be entered in ARM mode. */
12685 is_called_in_ARM_mode (tree func)
12687 if (TREE_CODE (func) != FUNCTION_DECL)
12688 abort ();
12690 /* Ignore the problem about functions whoes address is taken. */
12691 if (TARGET_CALLEE_INTERWORKING && TREE_PUBLIC (func))
12692 return TRUE;
12694 #ifdef ARM_PE
12695 return lookup_attribute ("interfacearm", DECL_ATTRIBUTES (func)) != NULL_TREE;
12696 #else
12697 return FALSE;
12698 #endif
12701 /* The bits which aren't usefully expanded as rtl. */
12702 const char *
12703 thumb_unexpanded_epilogue (void)
12705 int regno;
12706 int live_regs_mask = 0;
12707 int high_regs_pushed = 0;
12708 int leaf_function = leaf_function_p ();
12709 int had_to_push_lr;
12710 rtx eh_ofs = cfun->machine->eh_epilogue_sp_ofs;
12712 if (return_used_this_function)
12713 return "";
12715 if (IS_NAKED (arm_current_func_type ()))
12716 return "";
12718 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
12719 if (THUMB_REG_PUSHED_P (regno))
12720 live_regs_mask |= 1 << regno;
12722 for (regno = 8; regno < 13; regno++)
12723 if (THUMB_REG_PUSHED_P (regno))
12724 high_regs_pushed++;
12726 /* The prolog may have pushed some high registers to use as
12727 work registers. eg the testsuite file:
12728 gcc/testsuite/gcc/gcc.c-torture/execute/complex-2.c
12729 compiles to produce:
12730 push {r4, r5, r6, r7, lr}
12731 mov r7, r9
12732 mov r6, r8
12733 push {r6, r7}
12734 as part of the prolog. We have to undo that pushing here. */
12736 if (high_regs_pushed)
12738 int mask = live_regs_mask;
12739 int next_hi_reg;
12740 int size;
12741 int mode;
12743 #ifdef RTX_CODE
12744 /* If we can deduce the registers used from the function's return value.
12745 This is more reliable that examining regs_ever_live[] because that
12746 will be set if the register is ever used in the function, not just if
12747 the register is used to hold a return value. */
12749 if (current_function_return_rtx != 0)
12750 mode = GET_MODE (current_function_return_rtx);
12751 else
12752 #endif
12753 mode = DECL_MODE (DECL_RESULT (current_function_decl));
12755 size = GET_MODE_SIZE (mode);
12757 /* Unless we are returning a type of size > 12 register r3 is
12758 available. */
12759 if (size < 13)
12760 mask |= 1 << 3;
12762 if (mask == 0)
12763 /* Oh dear! We have no low registers into which we can pop
12764 high registers! */
12765 internal_error
12766 ("no low registers available for popping high registers");
12768 for (next_hi_reg = 8; next_hi_reg < 13; next_hi_reg++)
12769 if (THUMB_REG_PUSHED_P (next_hi_reg))
12770 break;
12772 while (high_regs_pushed)
12774 /* Find lo register(s) into which the high register(s) can
12775 be popped. */
12776 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
12778 if (mask & (1 << regno))
12779 high_regs_pushed--;
12780 if (high_regs_pushed == 0)
12781 break;
12784 mask &= (2 << regno) - 1; /* A noop if regno == 8 */
12786 /* Pop the values into the low register(s). */
12787 thumb_pushpop (asm_out_file, mask, 0, NULL, mask);
12789 /* Move the value(s) into the high registers. */
12790 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
12792 if (mask & (1 << regno))
12794 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", next_hi_reg,
12795 regno);
12797 for (next_hi_reg++; next_hi_reg < 13; next_hi_reg++)
12798 if (THUMB_REG_PUSHED_P (next_hi_reg))
12799 break;
12805 had_to_push_lr = (live_regs_mask || !leaf_function
12806 || thumb_far_jump_used_p (1));
12808 if (TARGET_BACKTRACE
12809 && ((live_regs_mask & 0xFF) == 0)
12810 && regs_ever_live [LAST_ARG_REGNUM] != 0)
12812 /* The stack backtrace structure creation code had to
12813 push R7 in order to get a work register, so we pop
12814 it now. */
12815 live_regs_mask |= (1 << LAST_LO_REGNUM);
12818 if (current_function_pretend_args_size == 0 || TARGET_BACKTRACE)
12820 if (had_to_push_lr
12821 && !is_called_in_ARM_mode (current_function_decl)
12822 && !eh_ofs)
12823 live_regs_mask |= 1 << PC_REGNUM;
12825 /* Either no argument registers were pushed or a backtrace
12826 structure was created which includes an adjusted stack
12827 pointer, so just pop everything. */
12828 if (live_regs_mask)
12829 thumb_pushpop (asm_out_file, live_regs_mask, FALSE, NULL,
12830 live_regs_mask);
12832 if (eh_ofs)
12833 thumb_exit (asm_out_file, 2, eh_ofs);
12834 /* We have either just popped the return address into the
12835 PC or it is was kept in LR for the entire function or
12836 it is still on the stack because we do not want to
12837 return by doing a pop {pc}. */
12838 else if ((live_regs_mask & (1 << PC_REGNUM)) == 0)
12839 thumb_exit (asm_out_file,
12840 (had_to_push_lr
12841 && is_called_in_ARM_mode (current_function_decl)) ?
12842 -1 : LR_REGNUM, NULL_RTX);
12844 else
12846 /* Pop everything but the return address. */
12847 live_regs_mask &= ~(1 << PC_REGNUM);
12849 if (live_regs_mask)
12850 thumb_pushpop (asm_out_file, live_regs_mask, FALSE, NULL,
12851 live_regs_mask);
12853 if (had_to_push_lr)
12854 /* Get the return address into a temporary register. */
12855 thumb_pushpop (asm_out_file, 1 << LAST_ARG_REGNUM, 0, NULL,
12856 1 << LAST_ARG_REGNUM);
12858 /* Remove the argument registers that were pushed onto the stack. */
12859 asm_fprintf (asm_out_file, "\tadd\t%r, %r, #%d\n",
12860 SP_REGNUM, SP_REGNUM,
12861 current_function_pretend_args_size);
12863 if (eh_ofs)
12864 thumb_exit (asm_out_file, 2, eh_ofs);
12865 else
12866 thumb_exit (asm_out_file,
12867 had_to_push_lr ? LAST_ARG_REGNUM : LR_REGNUM, NULL_RTX);
12870 return "";
12873 /* Functions to save and restore machine-specific function data. */
12874 static struct machine_function *
12875 arm_init_machine_status (void)
12877 struct machine_function *machine;
12878 machine = (machine_function *) ggc_alloc_cleared (sizeof (machine_function));
12880 #if ARM_FT_UNKNOWN != 0
12881 machine->func_type = ARM_FT_UNKNOWN;
12882 #endif
12883 return machine;
12886 /* Return an RTX indicating where the return address to the
12887 calling function can be found. */
12889 arm_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
12891 if (count != 0)
12892 return NULL_RTX;
12894 if (TARGET_APCS_32)
12895 return get_hard_reg_initial_val (Pmode, LR_REGNUM);
12896 else
12898 rtx lr = gen_rtx_AND (Pmode, gen_rtx_REG (Pmode, LR_REGNUM),
12899 GEN_INT (RETURN_ADDR_MASK26));
12900 return get_func_hard_reg_initial_val (cfun, lr);
12904 /* Do anything needed before RTL is emitted for each function. */
12905 void
12906 arm_init_expanders (void)
12908 /* Arrange to initialize and mark the machine per-function status. */
12909 init_machine_status = arm_init_machine_status;
12912 HOST_WIDE_INT
12913 thumb_get_frame_size (void)
12915 int regno;
12917 int base_size = ROUND_UP_WORD (get_frame_size ());
12918 int count_regs = 0;
12919 int entry_size = 0;
12920 int leaf;
12922 if (! TARGET_THUMB)
12923 abort ();
12925 if (! TARGET_ATPCS)
12926 return base_size;
12928 /* We need to know if we are a leaf function. Unfortunately, it
12929 is possible to be called after start_sequence has been called,
12930 which causes get_insns to return the insns for the sequence,
12931 not the function, which will cause leaf_function_p to return
12932 the incorrect result.
12934 To work around this, we cache the computed frame size. This
12935 works because we will only be calling RTL expanders that need
12936 to know about leaf functions once reload has completed, and the
12937 frame size cannot be changed after that time, so we can safely
12938 use the cached value. */
12940 if (reload_completed)
12941 return cfun->machine->frame_size;
12943 leaf = leaf_function_p ();
12945 /* A leaf function does not need any stack alignment if it has nothing
12946 on the stack. */
12947 if (leaf && base_size == 0)
12949 cfun->machine->frame_size = 0;
12950 return 0;
12953 /* We know that SP will be word aligned on entry, and we must
12954 preserve that condition at any subroutine call. But those are
12955 the only constraints. */
12957 /* Space for variadic functions. */
12958 if (current_function_pretend_args_size)
12959 entry_size += current_function_pretend_args_size;
12961 /* Space for pushed lo registers. */
12962 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
12963 if (THUMB_REG_PUSHED_P (regno))
12964 count_regs++;
12966 /* Space for backtrace structure. */
12967 if (TARGET_BACKTRACE)
12969 if (count_regs == 0 && regs_ever_live[LAST_ARG_REGNUM] != 0)
12970 entry_size += 20;
12971 else
12972 entry_size += 16;
12975 if (count_regs || !leaf || thumb_far_jump_used_p (1))
12976 count_regs++; /* LR */
12978 entry_size += count_regs * 4;
12979 count_regs = 0;
12981 /* Space for pushed hi regs. */
12982 for (regno = 8; regno < 13; regno++)
12983 if (THUMB_REG_PUSHED_P (regno))
12984 count_regs++;
12986 entry_size += count_regs * 4;
12988 if ((entry_size + base_size + current_function_outgoing_args_size) & 7)
12989 base_size += 4;
12990 if ((entry_size + base_size + current_function_outgoing_args_size) & 7)
12991 abort ();
12993 cfun->machine->frame_size = base_size;
12995 return base_size;
12998 /* Generate the rest of a function's prologue. */
12999 void
13000 thumb_expand_prologue (void)
13002 rtx insn, dwarf;
13004 HOST_WIDE_INT amount = (thumb_get_frame_size ()
13005 + current_function_outgoing_args_size);
13006 unsigned long func_type;
13008 func_type = arm_current_func_type ();
13010 /* Naked functions don't have prologues. */
13011 if (IS_NAKED (func_type))
13012 return;
13014 if (IS_INTERRUPT (func_type))
13016 error ("interrupt Service Routines cannot be coded in Thumb mode");
13017 return;
13020 if (frame_pointer_needed)
13022 insn = emit_insn (gen_movsi (hard_frame_pointer_rtx, stack_pointer_rtx));
13023 RTX_FRAME_RELATED_P (insn) = 1;
13026 if (amount)
13028 amount = ROUND_UP_WORD (amount);
13030 if (amount < 512)
13032 insn = emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
13033 GEN_INT (- amount)));
13034 RTX_FRAME_RELATED_P (insn) = 1;
13036 else
13038 int regno;
13039 rtx reg;
13041 /* The stack decrement is too big for an immediate value in a single
13042 insn. In theory we could issue multiple subtracts, but after
13043 three of them it becomes more space efficient to place the full
13044 value in the constant pool and load into a register. (Also the
13045 ARM debugger really likes to see only one stack decrement per
13046 function). So instead we look for a scratch register into which
13047 we can load the decrement, and then we subtract this from the
13048 stack pointer. Unfortunately on the thumb the only available
13049 scratch registers are the argument registers, and we cannot use
13050 these as they may hold arguments to the function. Instead we
13051 attempt to locate a call preserved register which is used by this
13052 function. If we can find one, then we know that it will have
13053 been pushed at the start of the prologue and so we can corrupt
13054 it now. */
13055 for (regno = LAST_ARG_REGNUM + 1; regno <= LAST_LO_REGNUM; regno++)
13056 if (THUMB_REG_PUSHED_P (regno)
13057 && !(frame_pointer_needed
13058 && (regno == THUMB_HARD_FRAME_POINTER_REGNUM)))
13059 break;
13061 if (regno > LAST_LO_REGNUM) /* Very unlikely. */
13063 rtx spare = gen_rtx_REG (SImode, IP_REGNUM);
13065 /* Choose an arbitrary, non-argument low register. */
13066 reg = gen_rtx_REG (SImode, LAST_LO_REGNUM);
13068 /* Save it by copying it into a high, scratch register. */
13069 emit_insn (gen_movsi (spare, reg));
13070 /* Add a USE to stop propagate_one_insn() from barfing. */
13071 emit_insn (gen_prologue_use (spare));
13073 /* Decrement the stack. */
13074 emit_insn (gen_movsi (reg, GEN_INT (- amount)));
13075 insn = emit_insn (gen_addsi3 (stack_pointer_rtx,
13076 stack_pointer_rtx, reg));
13077 RTX_FRAME_RELATED_P (insn) = 1;
13078 dwarf = gen_rtx_SET (SImode, stack_pointer_rtx,
13079 plus_constant (stack_pointer_rtx,
13080 GEN_INT (- amount)));
13081 RTX_FRAME_RELATED_P (dwarf) = 1;
13082 REG_NOTES (insn)
13083 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
13084 REG_NOTES (insn));
13086 /* Restore the low register's original value. */
13087 emit_insn (gen_movsi (reg, spare));
13089 /* Emit a USE of the restored scratch register, so that flow
13090 analysis will not consider the restore redundant. The
13091 register won't be used again in this function and isn't
13092 restored by the epilogue. */
13093 emit_insn (gen_prologue_use (reg));
13095 else
13097 reg = gen_rtx_REG (SImode, regno);
13099 emit_insn (gen_movsi (reg, GEN_INT (- amount)));
13101 insn = emit_insn (gen_addsi3 (stack_pointer_rtx,
13102 stack_pointer_rtx, reg));
13103 RTX_FRAME_RELATED_P (insn) = 1;
13104 dwarf = gen_rtx_SET (SImode, stack_pointer_rtx,
13105 plus_constant (stack_pointer_rtx,
13106 GEN_INT (- amount)));
13107 RTX_FRAME_RELATED_P (dwarf) = 1;
13108 REG_NOTES (insn)
13109 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
13110 REG_NOTES (insn));
13115 if (current_function_profile || TARGET_NO_SCHED_PRO)
13116 emit_insn (gen_blockage ());
13119 void
13120 thumb_expand_epilogue (void)
13122 HOST_WIDE_INT amount = (thumb_get_frame_size ()
13123 + current_function_outgoing_args_size);
13124 int regno;
13126 /* Naked functions don't have prologues. */
13127 if (IS_NAKED (arm_current_func_type ()))
13128 return;
13130 if (frame_pointer_needed)
13131 emit_insn (gen_movsi (stack_pointer_rtx, hard_frame_pointer_rtx));
13132 else if (amount)
13134 amount = ROUND_UP_WORD (amount);
13136 if (amount < 512)
13137 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
13138 GEN_INT (amount)));
13139 else
13141 /* r3 is always free in the epilogue. */
13142 rtx reg = gen_rtx_REG (SImode, LAST_ARG_REGNUM);
13144 emit_insn (gen_movsi (reg, GEN_INT (amount)));
13145 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, reg));
13149 /* Emit a USE (stack_pointer_rtx), so that
13150 the stack adjustment will not be deleted. */
13151 emit_insn (gen_prologue_use (stack_pointer_rtx));
13153 if (current_function_profile || TARGET_NO_SCHED_PRO)
13154 emit_insn (gen_blockage ());
13156 /* Emit a clobber for each insn that will be restored in the epilogue,
13157 so that flow2 will get register lifetimes correct. */
13158 for (regno = 0; regno < 13; regno++)
13159 if (regs_ever_live[regno] && !call_used_regs[regno])
13160 emit_insn (gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, regno)));
13162 if (! regs_ever_live[LR_REGNUM])
13163 emit_insn (gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, LR_REGNUM)));
13166 static void
13167 thumb_output_function_prologue (FILE *f, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
13169 int live_regs_mask = 0;
13170 int high_regs_pushed = 0;
13171 int cfa_offset = 0;
13172 int regno;
13174 if (IS_NAKED (arm_current_func_type ()))
13175 return;
13177 if (is_called_in_ARM_mode (current_function_decl))
13179 const char * name;
13181 if (GET_CODE (DECL_RTL (current_function_decl)) != MEM)
13182 abort ();
13183 if (GET_CODE (XEXP (DECL_RTL (current_function_decl), 0)) != SYMBOL_REF)
13184 abort ();
13185 name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
13187 /* Generate code sequence to switch us into Thumb mode. */
13188 /* The .code 32 directive has already been emitted by
13189 ASM_DECLARE_FUNCTION_NAME. */
13190 asm_fprintf (f, "\torr\t%r, %r, #1\n", IP_REGNUM, PC_REGNUM);
13191 asm_fprintf (f, "\tbx\t%r\n", IP_REGNUM);
13193 /* Generate a label, so that the debugger will notice the
13194 change in instruction sets. This label is also used by
13195 the assembler to bypass the ARM code when this function
13196 is called from a Thumb encoded function elsewhere in the
13197 same file. Hence the definition of STUB_NAME here must
13198 agree with the definition in gas/config/tc-arm.c. */
13200 #define STUB_NAME ".real_start_of"
13202 fprintf (f, "\t.code\t16\n");
13203 #ifdef ARM_PE
13204 if (arm_dllexport_name_p (name))
13205 name = arm_strip_name_encoding (name);
13206 #endif
13207 asm_fprintf (f, "\t.globl %s%U%s\n", STUB_NAME, name);
13208 fprintf (f, "\t.thumb_func\n");
13209 asm_fprintf (f, "%s%U%s:\n", STUB_NAME, name);
13212 if (current_function_pretend_args_size)
13214 if (cfun->machine->uses_anonymous_args)
13216 int num_pushes;
13218 fprintf (f, "\tpush\t{");
13220 num_pushes = ARM_NUM_INTS (current_function_pretend_args_size);
13222 for (regno = LAST_ARG_REGNUM + 1 - num_pushes;
13223 regno <= LAST_ARG_REGNUM;
13224 regno++)
13225 asm_fprintf (f, "%r%s", regno,
13226 regno == LAST_ARG_REGNUM ? "" : ", ");
13228 fprintf (f, "}\n");
13230 else
13231 asm_fprintf (f, "\tsub\t%r, %r, #%d\n",
13232 SP_REGNUM, SP_REGNUM,
13233 current_function_pretend_args_size);
13235 /* We don't need to record the stores for unwinding (would it
13236 help the debugger any if we did?), but record the change in
13237 the stack pointer. */
13238 if (dwarf2out_do_frame ())
13240 char *l = dwarf2out_cfi_label ();
13241 cfa_offset = cfa_offset + current_function_pretend_args_size;
13242 dwarf2out_def_cfa (l, SP_REGNUM, cfa_offset);
13246 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
13247 if (THUMB_REG_PUSHED_P (regno))
13248 live_regs_mask |= 1 << regno;
13250 if (live_regs_mask || !leaf_function_p () || thumb_far_jump_used_p (1))
13251 live_regs_mask |= 1 << LR_REGNUM;
13253 if (TARGET_BACKTRACE)
13255 int offset;
13256 int work_register = 0;
13257 int wr;
13259 /* We have been asked to create a stack backtrace structure.
13260 The code looks like this:
13262 0 .align 2
13263 0 func:
13264 0 sub SP, #16 Reserve space for 4 registers.
13265 2 push {R7} Get a work register.
13266 4 add R7, SP, #20 Get the stack pointer before the push.
13267 6 str R7, [SP, #8] Store the stack pointer (before reserving the space).
13268 8 mov R7, PC Get hold of the start of this code plus 12.
13269 10 str R7, [SP, #16] Store it.
13270 12 mov R7, FP Get hold of the current frame pointer.
13271 14 str R7, [SP, #4] Store it.
13272 16 mov R7, LR Get hold of the current return address.
13273 18 str R7, [SP, #12] Store it.
13274 20 add R7, SP, #16 Point at the start of the backtrace structure.
13275 22 mov FP, R7 Put this value into the frame pointer. */
13277 if ((live_regs_mask & 0xFF) == 0)
13279 /* See if the a4 register is free. */
13281 if (regs_ever_live [LAST_ARG_REGNUM] == 0)
13282 work_register = LAST_ARG_REGNUM;
13283 else /* We must push a register of our own. */
13284 live_regs_mask |= (1 << LAST_LO_REGNUM);
13287 if (work_register == 0)
13289 /* Select a register from the list that will be pushed to
13290 use as our work register. */
13291 for (work_register = (LAST_LO_REGNUM + 1); work_register--;)
13292 if ((1 << work_register) & live_regs_mask)
13293 break;
13296 asm_fprintf
13297 (f, "\tsub\t%r, %r, #16\t%@ Create stack backtrace structure\n",
13298 SP_REGNUM, SP_REGNUM);
13300 if (dwarf2out_do_frame ())
13302 char *l = dwarf2out_cfi_label ();
13303 cfa_offset = cfa_offset + 16;
13304 dwarf2out_def_cfa (l, SP_REGNUM, cfa_offset);
13307 if (live_regs_mask)
13308 thumb_pushpop (f, live_regs_mask, 1, &cfa_offset, live_regs_mask);
13310 for (offset = 0, wr = 1 << 15; wr != 0; wr >>= 1)
13311 if (wr & live_regs_mask)
13312 offset += 4;
13314 asm_fprintf (f, "\tadd\t%r, %r, #%d\n", work_register, SP_REGNUM,
13315 offset + 16 + current_function_pretend_args_size);
13317 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13318 offset + 4);
13320 /* Make sure that the instruction fetching the PC is in the right place
13321 to calculate "start of backtrace creation code + 12". */
13322 if (live_regs_mask)
13324 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, PC_REGNUM);
13325 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13326 offset + 12);
13327 asm_fprintf (f, "\tmov\t%r, %r\n", work_register,
13328 ARM_HARD_FRAME_POINTER_REGNUM);
13329 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13330 offset);
13332 else
13334 asm_fprintf (f, "\tmov\t%r, %r\n", work_register,
13335 ARM_HARD_FRAME_POINTER_REGNUM);
13336 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13337 offset);
13338 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, PC_REGNUM);
13339 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13340 offset + 12);
13343 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, LR_REGNUM);
13344 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
13345 offset + 8);
13346 asm_fprintf (f, "\tadd\t%r, %r, #%d\n", work_register, SP_REGNUM,
13347 offset + 12);
13348 asm_fprintf (f, "\tmov\t%r, %r\t\t%@ Backtrace structure created\n",
13349 ARM_HARD_FRAME_POINTER_REGNUM, work_register);
13351 else if (live_regs_mask)
13352 thumb_pushpop (f, live_regs_mask, 1, &cfa_offset, live_regs_mask);
13354 for (regno = 8; regno < 13; regno++)
13355 if (THUMB_REG_PUSHED_P (regno))
13356 high_regs_pushed++;
13358 if (high_regs_pushed)
13360 int pushable_regs = 0;
13361 int mask = live_regs_mask & 0xff;
13362 int next_hi_reg;
13364 for (next_hi_reg = 12; next_hi_reg > LAST_LO_REGNUM; next_hi_reg--)
13365 if (THUMB_REG_PUSHED_P (next_hi_reg))
13366 break;
13368 pushable_regs = mask;
13370 if (pushable_regs == 0)
13372 /* Desperation time -- this probably will never happen. */
13373 if (THUMB_REG_PUSHED_P (LAST_ARG_REGNUM))
13374 asm_fprintf (f, "\tmov\t%r, %r\n", IP_REGNUM, LAST_ARG_REGNUM);
13375 mask = 1 << LAST_ARG_REGNUM;
13378 while (high_regs_pushed > 0)
13380 int real_regs_mask = 0;
13382 for (regno = LAST_LO_REGNUM; regno >= 0; regno--)
13384 if (mask & (1 << regno))
13386 asm_fprintf (f, "\tmov\t%r, %r\n", regno, next_hi_reg);
13388 high_regs_pushed--;
13389 real_regs_mask |= (1 << next_hi_reg);
13391 if (high_regs_pushed)
13393 for (next_hi_reg--; next_hi_reg > LAST_LO_REGNUM;
13394 next_hi_reg--)
13395 if (THUMB_REG_PUSHED_P (next_hi_reg))
13396 break;
13398 else
13400 mask &= ~((1 << regno) - 1);
13401 break;
13406 thumb_pushpop (f, mask, 1, &cfa_offset, real_regs_mask);
13409 if (pushable_regs == 0
13410 && (THUMB_REG_PUSHED_P (LAST_ARG_REGNUM)))
13411 asm_fprintf (f, "\tmov\t%r, %r\n", LAST_ARG_REGNUM, IP_REGNUM);
13415 /* Handle the case of a double word load into a low register from
13416 a computed memory address. The computed address may involve a
13417 register which is overwritten by the load. */
13418 const char *
13419 thumb_load_double_from_address (rtx *operands)
13421 rtx addr;
13422 rtx base;
13423 rtx offset;
13424 rtx arg1;
13425 rtx arg2;
13427 if (GET_CODE (operands[0]) != REG)
13428 abort ();
13430 if (GET_CODE (operands[1]) != MEM)
13431 abort ();
13433 /* Get the memory address. */
13434 addr = XEXP (operands[1], 0);
13436 /* Work out how the memory address is computed. */
13437 switch (GET_CODE (addr))
13439 case REG:
13440 operands[2] = gen_rtx_MEM (SImode,
13441 plus_constant (XEXP (operands[1], 0), 4));
13443 if (REGNO (operands[0]) == REGNO (addr))
13445 output_asm_insn ("ldr\t%H0, %2", operands);
13446 output_asm_insn ("ldr\t%0, %1", operands);
13448 else
13450 output_asm_insn ("ldr\t%0, %1", operands);
13451 output_asm_insn ("ldr\t%H0, %2", operands);
13453 break;
13455 case CONST:
13456 /* Compute <address> + 4 for the high order load. */
13457 operands[2] = gen_rtx_MEM (SImode,
13458 plus_constant (XEXP (operands[1], 0), 4));
13460 output_asm_insn ("ldr\t%0, %1", operands);
13461 output_asm_insn ("ldr\t%H0, %2", operands);
13462 break;
13464 case PLUS:
13465 arg1 = XEXP (addr, 0);
13466 arg2 = XEXP (addr, 1);
13468 if (CONSTANT_P (arg1))
13469 base = arg2, offset = arg1;
13470 else
13471 base = arg1, offset = arg2;
13473 if (GET_CODE (base) != REG)
13474 abort ();
13476 /* Catch the case of <address> = <reg> + <reg> */
13477 if (GET_CODE (offset) == REG)
13479 int reg_offset = REGNO (offset);
13480 int reg_base = REGNO (base);
13481 int reg_dest = REGNO (operands[0]);
13483 /* Add the base and offset registers together into the
13484 higher destination register. */
13485 asm_fprintf (asm_out_file, "\tadd\t%r, %r, %r",
13486 reg_dest + 1, reg_base, reg_offset);
13488 /* Load the lower destination register from the address in
13489 the higher destination register. */
13490 asm_fprintf (asm_out_file, "\tldr\t%r, [%r, #0]",
13491 reg_dest, reg_dest + 1);
13493 /* Load the higher destination register from its own address
13494 plus 4. */
13495 asm_fprintf (asm_out_file, "\tldr\t%r, [%r, #4]",
13496 reg_dest + 1, reg_dest + 1);
13498 else
13500 /* Compute <address> + 4 for the high order load. */
13501 operands[2] = gen_rtx_MEM (SImode,
13502 plus_constant (XEXP (operands[1], 0), 4));
13504 /* If the computed address is held in the low order register
13505 then load the high order register first, otherwise always
13506 load the low order register first. */
13507 if (REGNO (operands[0]) == REGNO (base))
13509 output_asm_insn ("ldr\t%H0, %2", operands);
13510 output_asm_insn ("ldr\t%0, %1", operands);
13512 else
13514 output_asm_insn ("ldr\t%0, %1", operands);
13515 output_asm_insn ("ldr\t%H0, %2", operands);
13518 break;
13520 case LABEL_REF:
13521 /* With no registers to worry about we can just load the value
13522 directly. */
13523 operands[2] = gen_rtx_MEM (SImode,
13524 plus_constant (XEXP (operands[1], 0), 4));
13526 output_asm_insn ("ldr\t%H0, %2", operands);
13527 output_asm_insn ("ldr\t%0, %1", operands);
13528 break;
13530 default:
13531 abort ();
13532 break;
13535 return "";
13538 const char *
13539 thumb_output_move_mem_multiple (int n, rtx *operands)
13541 rtx tmp;
13543 switch (n)
13545 case 2:
13546 if (REGNO (operands[4]) > REGNO (operands[5]))
13548 tmp = operands[4];
13549 operands[4] = operands[5];
13550 operands[5] = tmp;
13552 output_asm_insn ("ldmia\t%1!, {%4, %5}", operands);
13553 output_asm_insn ("stmia\t%0!, {%4, %5}", operands);
13554 break;
13556 case 3:
13557 if (REGNO (operands[4]) > REGNO (operands[5]))
13559 tmp = operands[4];
13560 operands[4] = operands[5];
13561 operands[5] = tmp;
13563 if (REGNO (operands[5]) > REGNO (operands[6]))
13565 tmp = operands[5];
13566 operands[5] = operands[6];
13567 operands[6] = tmp;
13569 if (REGNO (operands[4]) > REGNO (operands[5]))
13571 tmp = operands[4];
13572 operands[4] = operands[5];
13573 operands[5] = tmp;
13576 output_asm_insn ("ldmia\t%1!, {%4, %5, %6}", operands);
13577 output_asm_insn ("stmia\t%0!, {%4, %5, %6}", operands);
13578 break;
13580 default:
13581 abort ();
13584 return "";
13587 /* Routines for generating rtl. */
13588 void
13589 thumb_expand_movstrqi (rtx *operands)
13591 rtx out = copy_to_mode_reg (SImode, XEXP (operands[0], 0));
13592 rtx in = copy_to_mode_reg (SImode, XEXP (operands[1], 0));
13593 HOST_WIDE_INT len = INTVAL (operands[2]);
13594 HOST_WIDE_INT offset = 0;
13596 while (len >= 12)
13598 emit_insn (gen_movmem12b (out, in, out, in));
13599 len -= 12;
13602 if (len >= 8)
13604 emit_insn (gen_movmem8b (out, in, out, in));
13605 len -= 8;
13608 if (len >= 4)
13610 rtx reg = gen_reg_rtx (SImode);
13611 emit_insn (gen_movsi (reg, gen_rtx_MEM (SImode, in)));
13612 emit_insn (gen_movsi (gen_rtx_MEM (SImode, out), reg));
13613 len -= 4;
13614 offset += 4;
13617 if (len >= 2)
13619 rtx reg = gen_reg_rtx (HImode);
13620 emit_insn (gen_movhi (reg, gen_rtx_MEM (HImode,
13621 plus_constant (in, offset))));
13622 emit_insn (gen_movhi (gen_rtx_MEM (HImode, plus_constant (out, offset)),
13623 reg));
13624 len -= 2;
13625 offset += 2;
13628 if (len)
13630 rtx reg = gen_reg_rtx (QImode);
13631 emit_insn (gen_movqi (reg, gen_rtx_MEM (QImode,
13632 plus_constant (in, offset))));
13633 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (out, offset)),
13634 reg));
13639 thumb_cmp_operand (rtx op, enum machine_mode mode)
13641 return ((GET_CODE (op) == CONST_INT
13642 && INTVAL (op) < 256
13643 && INTVAL (op) >= 0)
13644 || s_register_operand (op, mode));
13648 thumb_cmpneg_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
13650 return (GET_CODE (op) == CONST_INT
13651 && INTVAL (op) < 0
13652 && INTVAL (op) > -256);
13655 /* Return TRUE if a result can be stored in OP without clobbering the
13656 condition code register. Prior to reload we only accept a
13657 register. After reload we have to be able to handle memory as
13658 well, since a pseudo may not get a hard reg and reload cannot
13659 handle output-reloads on jump insns.
13661 We could possibly handle mem before reload as well, but that might
13662 complicate things with the need to handle increment
13663 side-effects. */
13666 thumb_cbrch_target_operand (rtx op, enum machine_mode mode)
13668 return (s_register_operand (op, mode)
13669 || ((reload_in_progress || reload_completed)
13670 && memory_operand (op, mode)));
13673 /* Handle storing a half-word to memory during reload. */
13674 void
13675 thumb_reload_out_hi (rtx *operands)
13677 emit_insn (gen_thumb_movhi_clobber (operands[0], operands[1], operands[2]));
13680 /* Handle reading a half-word from memory during reload. */
13681 void
13682 thumb_reload_in_hi (rtx *operands ATTRIBUTE_UNUSED)
13684 abort ();
13687 /* Return the length of a function name prefix
13688 that starts with the character 'c'. */
13689 static int
13690 arm_get_strip_length (int c)
13692 switch (c)
13694 ARM_NAME_ENCODING_LENGTHS
13695 default: return 0;
13699 /* Return a pointer to a function's name with any
13700 and all prefix encodings stripped from it. */
13701 const char *
13702 arm_strip_name_encoding (const char *name)
13704 int skip;
13706 while ((skip = arm_get_strip_length (* name)))
13707 name += skip;
13709 return name;
13712 /* If there is a '*' anywhere in the name's prefix, then
13713 emit the stripped name verbatim, otherwise prepend an
13714 underscore if leading underscores are being used. */
13715 void
13716 arm_asm_output_labelref (FILE *stream, const char *name)
13718 int skip;
13719 int verbatim = 0;
13721 while ((skip = arm_get_strip_length (* name)))
13723 verbatim |= (*name == '*');
13724 name += skip;
13727 if (verbatim)
13728 fputs (name, stream);
13729 else
13730 asm_fprintf (stream, "%U%s", name);
13733 rtx aof_pic_label;
13735 #ifdef AOF_ASSEMBLER
13736 /* Special functions only needed when producing AOF syntax assembler. */
13738 struct pic_chain
13740 struct pic_chain * next;
13741 const char * symname;
13744 static struct pic_chain * aof_pic_chain = NULL;
13747 aof_pic_entry (rtx x)
13749 struct pic_chain ** chainp;
13750 int offset;
13752 if (aof_pic_label == NULL_RTX)
13754 aof_pic_label = gen_rtx_SYMBOL_REF (Pmode, "x$adcons");
13757 for (offset = 0, chainp = &aof_pic_chain; *chainp;
13758 offset += 4, chainp = &(*chainp)->next)
13759 if ((*chainp)->symname == XSTR (x, 0))
13760 return plus_constant (aof_pic_label, offset);
13762 *chainp = (struct pic_chain *) xmalloc (sizeof (struct pic_chain));
13763 (*chainp)->next = NULL;
13764 (*chainp)->symname = XSTR (x, 0);
13765 return plus_constant (aof_pic_label, offset);
13768 void
13769 aof_dump_pic_table (FILE *f)
13771 struct pic_chain * chain;
13773 if (aof_pic_chain == NULL)
13774 return;
13776 asm_fprintf (f, "\tAREA |%r$$adcons|, BASED %r\n",
13777 PIC_OFFSET_TABLE_REGNUM,
13778 PIC_OFFSET_TABLE_REGNUM);
13779 fputs ("|x$adcons|\n", f);
13781 for (chain = aof_pic_chain; chain; chain = chain->next)
13783 fputs ("\tDCD\t", f);
13784 assemble_name (f, chain->symname);
13785 fputs ("\n", f);
13789 int arm_text_section_count = 1;
13791 char *
13792 aof_text_section (void )
13794 static char buf[100];
13795 sprintf (buf, "\tAREA |C$$code%d|, CODE, READONLY",
13796 arm_text_section_count++);
13797 if (flag_pic)
13798 strcat (buf, ", PIC, REENTRANT");
13799 return buf;
13802 static int arm_data_section_count = 1;
13804 char *
13805 aof_data_section (void)
13807 static char buf[100];
13808 sprintf (buf, "\tAREA |C$$data%d|, DATA", arm_data_section_count++);
13809 return buf;
13812 /* The AOF assembler is religiously strict about declarations of
13813 imported and exported symbols, so that it is impossible to declare
13814 a function as imported near the beginning of the file, and then to
13815 export it later on. It is, however, possible to delay the decision
13816 until all the functions in the file have been compiled. To get
13817 around this, we maintain a list of the imports and exports, and
13818 delete from it any that are subsequently defined. At the end of
13819 compilation we spit the remainder of the list out before the END
13820 directive. */
13822 struct import
13824 struct import * next;
13825 const char * name;
13828 static struct import * imports_list = NULL;
13830 void
13831 aof_add_import (const char *name)
13833 struct import * new;
13835 for (new = imports_list; new; new = new->next)
13836 if (new->name == name)
13837 return;
13839 new = (struct import *) xmalloc (sizeof (struct import));
13840 new->next = imports_list;
13841 imports_list = new;
13842 new->name = name;
13845 void
13846 aof_delete_import (const char *name)
13848 struct import ** old;
13850 for (old = &imports_list; *old; old = & (*old)->next)
13852 if ((*old)->name == name)
13854 *old = (*old)->next;
13855 return;
13860 int arm_main_function = 0;
13862 static void
13863 aof_dump_imports (FILE *f)
13865 /* The AOF assembler needs this to cause the startup code to be extracted
13866 from the library. Brining in __main causes the whole thing to work
13867 automagically. */
13868 if (arm_main_function)
13870 text_section ();
13871 fputs ("\tIMPORT __main\n", f);
13872 fputs ("\tDCD __main\n", f);
13875 /* Now dump the remaining imports. */
13876 while (imports_list)
13878 fprintf (f, "\tIMPORT\t");
13879 assemble_name (f, imports_list->name);
13880 fputc ('\n', f);
13881 imports_list = imports_list->next;
13885 static void
13886 aof_globalize_label (FILE *stream, const char *name)
13888 default_globalize_label (stream, name);
13889 if (! strcmp (name, "main"))
13890 arm_main_function = 1;
13893 static void
13894 aof_file_start (void)
13896 fputs ("__r0\tRN\t0\n", asm_out_file);
13897 fputs ("__a1\tRN\t0\n", asm_out_file);
13898 fputs ("__a2\tRN\t1\n", asm_out_file);
13899 fputs ("__a3\tRN\t2\n", asm_out_file);
13900 fputs ("__a4\tRN\t3\n", asm_out_file);
13901 fputs ("__v1\tRN\t4\n", asm_out_file);
13902 fputs ("__v2\tRN\t5\n", asm_out_file);
13903 fputs ("__v3\tRN\t6\n", asm_out_file);
13904 fputs ("__v4\tRN\t7\n", asm_out_file);
13905 fputs ("__v5\tRN\t8\n", asm_out_file);
13906 fputs ("__v6\tRN\t9\n", asm_out_file);
13907 fputs ("__sl\tRN\t10\n", asm_out_file);
13908 fputs ("__fp\tRN\t11\n", asm_out_file);
13909 fputs ("__ip\tRN\t12\n", asm_out_file);
13910 fputs ("__sp\tRN\t13\n", asm_out_file);
13911 fputs ("__lr\tRN\t14\n", asm_out_file);
13912 fputs ("__pc\tRN\t15\n", asm_out_file);
13913 fputs ("__f0\tFN\t0\n", asm_out_file);
13914 fputs ("__f1\tFN\t1\n", asm_out_file);
13915 fputs ("__f2\tFN\t2\n", asm_out_file);
13916 fputs ("__f3\tFN\t3\n", asm_out_file);
13917 fputs ("__f4\tFN\t4\n", asm_out_file);
13918 fputs ("__f5\tFN\t5\n", asm_out_file);
13919 fputs ("__f6\tFN\t6\n", asm_out_file);
13920 fputs ("__f7\tFN\t7\n", asm_out_file);
13921 text_section ();
13924 static void
13925 aof_file_end (void)
13927 if (flag_pic)
13928 aof_dump_pic_table (asm_out_file);
13929 aof_dump_imports (asm_out_file);
13930 fputs ("\tEND\n", asm_out_file);
13932 #endif /* AOF_ASSEMBLER */
13934 #ifdef OBJECT_FORMAT_ELF
13935 /* Switch to an arbitrary section NAME with attributes as specified
13936 by FLAGS. ALIGN specifies any known alignment requirements for
13937 the section; 0 if the default should be used.
13939 Differs from the default elf version only in the prefix character
13940 used before the section type. */
13942 static void
13943 arm_elf_asm_named_section (const char *name, unsigned int flags)
13945 char flagchars[10], *f = flagchars;
13947 if (! named_section_first_declaration (name))
13949 fprintf (asm_out_file, "\t.section\t%s\n", name);
13950 return;
13953 if (!(flags & SECTION_DEBUG))
13954 *f++ = 'a';
13955 if (flags & SECTION_WRITE)
13956 *f++ = 'w';
13957 if (flags & SECTION_CODE)
13958 *f++ = 'x';
13959 if (flags & SECTION_SMALL)
13960 *f++ = 's';
13961 if (flags & SECTION_MERGE)
13962 *f++ = 'M';
13963 if (flags & SECTION_STRINGS)
13964 *f++ = 'S';
13965 if (flags & SECTION_TLS)
13966 *f++ = 'T';
13967 *f = '\0';
13969 fprintf (asm_out_file, "\t.section\t%s,\"%s\"", name, flagchars);
13971 if (!(flags & SECTION_NOTYPE))
13973 const char *type;
13975 if (flags & SECTION_BSS)
13976 type = "nobits";
13977 else
13978 type = "progbits";
13980 fprintf (asm_out_file, ",%%%s", type);
13982 if (flags & SECTION_ENTSIZE)
13983 fprintf (asm_out_file, ",%d", flags & SECTION_ENTSIZE);
13986 putc ('\n', asm_out_file);
13988 #endif
13990 #ifndef ARM_PE
13991 /* Symbols in the text segment can be accessed without indirecting via the
13992 constant pool; it may take an extra binary operation, but this is still
13993 faster than indirecting via memory. Don't do this when not optimizing,
13994 since we won't be calculating al of the offsets necessary to do this
13995 simplification. */
13997 static void
13998 arm_encode_section_info (tree decl, rtx rtl, int first)
14000 /* This doesn't work with AOF syntax, since the string table may be in
14001 a different AREA. */
14002 #ifndef AOF_ASSEMBLER
14003 if (optimize > 0 && TREE_CONSTANT (decl))
14004 SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
14005 #endif
14007 /* If we are referencing a function that is weak then encode a long call
14008 flag in the function name, otherwise if the function is static or
14009 or known to be defined in this file then encode a short call flag. */
14010 if (first && TREE_CODE_CLASS (TREE_CODE (decl)) == 'd')
14012 if (TREE_CODE (decl) == FUNCTION_DECL && DECL_WEAK (decl))
14013 arm_encode_call_attribute (decl, LONG_CALL_FLAG_CHAR);
14014 else if (! TREE_PUBLIC (decl))
14015 arm_encode_call_attribute (decl, SHORT_CALL_FLAG_CHAR);
14018 #endif /* !ARM_PE */
14020 static void
14021 arm_internal_label (FILE *stream, const char *prefix, unsigned long labelno)
14023 if (arm_ccfsm_state == 3 && (unsigned) arm_target_label == labelno
14024 && !strcmp (prefix, "L"))
14026 arm_ccfsm_state = 0;
14027 arm_target_insn = NULL;
14029 default_internal_label (stream, prefix, labelno);
14032 /* Output code to add DELTA to the first argument, and then jump
14033 to FUNCTION. Used for C++ multiple inheritance. */
14034 static void
14035 arm_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
14036 HOST_WIDE_INT delta,
14037 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
14038 tree function)
14040 static int thunk_label = 0;
14041 char label[256];
14042 int mi_delta = delta;
14043 const char *const mi_op = mi_delta < 0 ? "sub" : "add";
14044 int shift = 0;
14045 int this_regno = (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function)
14046 ? 1 : 0);
14047 if (mi_delta < 0)
14048 mi_delta = - mi_delta;
14049 if (TARGET_THUMB)
14051 int labelno = thunk_label++;
14052 ASM_GENERATE_INTERNAL_LABEL (label, "LTHUMBFUNC", labelno);
14053 fputs ("\tldr\tr12, ", file);
14054 assemble_name (file, label);
14055 fputc ('\n', file);
14057 while (mi_delta != 0)
14059 if ((mi_delta & (3 << shift)) == 0)
14060 shift += 2;
14061 else
14063 asm_fprintf (file, "\t%s\t%r, %r, #%d\n",
14064 mi_op, this_regno, this_regno,
14065 mi_delta & (0xff << shift));
14066 mi_delta &= ~(0xff << shift);
14067 shift += 8;
14070 if (TARGET_THUMB)
14072 fprintf (file, "\tbx\tr12\n");
14073 ASM_OUTPUT_ALIGN (file, 2);
14074 assemble_name (file, label);
14075 fputs (":\n", file);
14076 assemble_integer (XEXP (DECL_RTL (function), 0), 4, BITS_PER_WORD, 1);
14078 else
14080 fputs ("\tb\t", file);
14081 assemble_name (file, XSTR (XEXP (DECL_RTL (function), 0), 0));
14082 if (NEED_PLT_RELOC)
14083 fputs ("(PLT)", file);
14084 fputc ('\n', file);
14089 arm_emit_vector_const (FILE *file, rtx x)
14091 int i;
14092 const char * pattern;
14094 if (GET_CODE (x) != CONST_VECTOR)
14095 abort ();
14097 switch (GET_MODE (x))
14099 case V2SImode: pattern = "%08x"; break;
14100 case V4HImode: pattern = "%04x"; break;
14101 case V8QImode: pattern = "%02x"; break;
14102 default: abort ();
14105 fprintf (file, "0x");
14106 for (i = CONST_VECTOR_NUNITS (x); i--;)
14108 rtx element;
14110 element = CONST_VECTOR_ELT (x, i);
14111 fprintf (file, pattern, INTVAL (element));
14114 return 1;
14117 const char *
14118 arm_output_load_gr (rtx *operands)
14120 rtx reg;
14121 rtx offset;
14122 rtx wcgr;
14123 rtx sum;
14125 if (GET_CODE (operands [1]) != MEM
14126 || GET_CODE (sum = XEXP (operands [1], 0)) != PLUS
14127 || GET_CODE (reg = XEXP (sum, 0)) != REG
14128 || GET_CODE (offset = XEXP (sum, 1)) != CONST_INT
14129 || ((INTVAL (offset) < 1024) && (INTVAL (offset) > -1024)))
14130 return "wldrw%?\t%0, %1";
14132 /* Fix up an out-of-range load of a GR register. */
14133 output_asm_insn ("str%?\t%0, [sp, #-4]!\t@ Start of GR load expansion", & reg);
14134 wcgr = operands[0];
14135 operands[0] = reg;
14136 output_asm_insn ("ldr%?\t%0, %1", operands);
14138 operands[0] = wcgr;
14139 operands[1] = reg;
14140 output_asm_insn ("tmcr%?\t%0, %1", operands);
14141 output_asm_insn ("ldr%?\t%0, [sp], #4\t@ End of GR load expansion", & reg);
14143 return "";
14146 static rtx
14147 arm_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
14148 int incoming ATTRIBUTE_UNUSED)
14150 #if 0
14151 /* FIXME: The ARM backend has special code to handle structure
14152 returns, and will reserve its own hidden first argument. So
14153 if this macro is enabled a *second* hidden argument will be
14154 reserved, which will break binary compatibility with old
14155 toolchains and also thunk handling. One day this should be
14156 fixed. */
14157 return 0;
14158 #else
14159 /* Register in which address to store a structure value
14160 is passed to a function. */
14161 return gen_rtx_REG (Pmode, ARG_REGISTER (1));
14162 #endif
14165 /* Worker function for TARGET_SETUP_INCOMING_VARARGS.
14167 On the ARM, PRETEND_SIZE is set in order to have the prologue push the last
14168 named arg and all anonymous args onto the stack.
14169 XXX I know the prologue shouldn't be pushing registers, but it is faster
14170 that way. */
14172 static void
14173 arm_setup_incoming_varargs (CUMULATIVE_ARGS *cum,
14174 enum machine_mode mode ATTRIBUTE_UNUSED,
14175 tree type ATTRIBUTE_UNUSED,
14176 int *pretend_size,
14177 int second_time ATTRIBUTE_UNUSED)
14179 cfun->machine->uses_anonymous_args = 1;
14180 if (cum->nregs < NUM_ARG_REGS)
14181 *pretend_size = (NUM_ARG_REGS - cum->nregs) * UNITS_PER_WORD;
14184 /* Return nonzero if the CONSUMER instruction (a store) does not need
14185 PRODUCER's value to calculate the address. */
14188 arm_no_early_store_addr_dep (rtx producer, rtx consumer)
14190 rtx value = PATTERN (producer);
14191 rtx addr = PATTERN (consumer);
14193 if (GET_CODE (value) == COND_EXEC)
14194 value = COND_EXEC_CODE (value);
14195 if (GET_CODE (value) == PARALLEL)
14196 value = XVECEXP (value, 0, 0);
14197 value = XEXP (value, 0);
14198 if (GET_CODE (addr) == COND_EXEC)
14199 addr = COND_EXEC_CODE (addr);
14200 if (GET_CODE (addr) == PARALLEL)
14201 addr = XVECEXP (addr, 0, 0);
14202 addr = XEXP (addr, 0);
14204 return !reg_overlap_mentioned_p (value, addr);
14207 /* Return nonzero if the CONSUMER instruction (an ALU op) does not
14208 have an early register shift value or amount dependency on the
14209 result of PRODUCER. */
14212 arm_no_early_alu_shift_dep (rtx producer, rtx consumer)
14214 rtx value = PATTERN (producer);
14215 rtx op = PATTERN (consumer);
14216 rtx early_op;
14218 if (GET_CODE (value) == COND_EXEC)
14219 value = COND_EXEC_CODE (value);
14220 if (GET_CODE (value) == PARALLEL)
14221 value = XVECEXP (value, 0, 0);
14222 value = XEXP (value, 0);
14223 if (GET_CODE (op) == COND_EXEC)
14224 op = COND_EXEC_CODE (op);
14225 if (GET_CODE (op) == PARALLEL)
14226 op = XVECEXP (op, 0, 0);
14227 op = XEXP (op, 1);
14229 early_op = XEXP (op, 0);
14230 /* This is either an actual independent shift, or a shift applied to
14231 the first operand of another operation. We want the whole shift
14232 operation. */
14233 if (GET_CODE (early_op) == REG)
14234 early_op = op;
14236 return !reg_overlap_mentioned_p (value, early_op);
14239 /* Return nonzero if the CONSUMER instruction (an ALU op) does not
14240 have an early register shift value dependency on the result of
14241 PRODUCER. */
14244 arm_no_early_alu_shift_value_dep (rtx producer, rtx consumer)
14246 rtx value = PATTERN (producer);
14247 rtx op = PATTERN (consumer);
14248 rtx early_op;
14250 if (GET_CODE (value) == COND_EXEC)
14251 value = COND_EXEC_CODE (value);
14252 if (GET_CODE (value) == PARALLEL)
14253 value = XVECEXP (value, 0, 0);
14254 value = XEXP (value, 0);
14255 if (GET_CODE (op) == COND_EXEC)
14256 op = COND_EXEC_CODE (op);
14257 if (GET_CODE (op) == PARALLEL)
14258 op = XVECEXP (op, 0, 0);
14259 op = XEXP (op, 1);
14261 early_op = XEXP (op, 0);
14263 /* This is either an actual independent shift, or a shift applied to
14264 the first operand of another operation. We want the value being
14265 shifted, in either case. */
14266 if (GET_CODE (early_op) != REG)
14267 early_op = XEXP (early_op, 0);
14269 return !reg_overlap_mentioned_p (value, early_op);
14272 /* Return nonzero if the CONSUMER (a mul or mac op) does not
14273 have an early register mult dependency on the result of
14274 PRODUCER. */
14277 arm_no_early_mul_dep (rtx producer, rtx consumer)
14279 rtx value = PATTERN (producer);
14280 rtx op = PATTERN (consumer);
14282 if (GET_CODE (value) == COND_EXEC)
14283 value = COND_EXEC_CODE (value);
14284 if (GET_CODE (value) == PARALLEL)
14285 value = XVECEXP (value, 0, 0);
14286 value = XEXP (value, 0);
14287 if (GET_CODE (op) == COND_EXEC)
14288 op = COND_EXEC_CODE (op);
14289 if (GET_CODE (op) == PARALLEL)
14290 op = XVECEXP (op, 0, 0);
14291 op = XEXP (op, 1);
14293 return (GET_CODE (op) == PLUS
14294 && !reg_overlap_mentioned_p (value, XEXP (op, 0)));